status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
timestamp[us, tz=UTC]
language
stringclasses
5 values
commit_datetime
timestamp[us, tz=UTC]
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,569
[Bug] [Master] process pause and recover fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened process pause and recover fail ### What you expected to happen process can pause and recover successfully ### How to reproduce create a process with two task, task A sleep 1m. run process, and pause. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9569
https://github.com/apache/dolphinscheduler/pull/9568
32f76e487f1d022d248e494c6bc624f09091761b
63638601b0e1e5768fae142b419a4b965bf33d2b
2022-04-18T12:42:23Z
java
2022-04-19T02:23:56Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public boolean acquireTaskGroupAgain(TaskGroupQueue taskGroupQueue) { return robTaskGroupResouce(taskGroupQueue); } @Override public void releaseAllTaskGroup(int processInstanceId) { List<TaskInstance> taskInstances = this.taskInstanceMapper.loadAllInfosNoRelease(processInstanceId, TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()); for (TaskInstance info : taskInstances) { releaseTaskGroup(info); } } /** * release the TGQ resource when the corresponding task is finished. * * @return the result code and msg */ @Override public TaskInstance releaseTaskGroup(TaskInstance taskInstance) { TaskGroup taskGroup = taskGroupMapper.selectById(taskInstance.getTaskGroupId()); if (taskGroup == null) { return null; } TaskGroupQueue thisTaskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskInstance.getId()); if (thisTaskGroupQueue.getStatus() == TaskGroupQueueStatus.RELEASE) { return null; } try { while (taskGroupMapper.releaseTaskGroupResource(taskGroup.getId(), taskGroup.getUseSize() , thisTaskGroupQueue.getId(), TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()) != 1) { thisTaskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskInstance.getId()); if (thisTaskGroupQueue.getStatus() == TaskGroupQueueStatus.RELEASE) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,569
[Bug] [Master] process pause and recover fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened process pause and recover fail ### What you expected to happen process can pause and recover successfully ### How to reproduce create a process with two task, task A sleep 1m. run process, and pause. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9569
https://github.com/apache/dolphinscheduler/pull/9568
32f76e487f1d022d248e494c6bc624f09091761b
63638601b0e1e5768fae142b419a4b965bf33d2b
2022-04-18T12:42:23Z
java
2022-04-19T02:23:56Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
return null; } taskGroup = taskGroupMapper.selectById(taskInstance.getTaskGroupId()); } } catch (Exception e) { logger.error("release the task group error", e); } logger.info("updateTask:{}", taskInstance.getName()); changeTaskGroupQueueStatus(taskInstance.getId(), TaskGroupQueueStatus.RELEASE); TaskGroupQueue taskGroupQueue = this.taskGroupQueueMapper.queryTheHighestPriorityTasks(taskGroup.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode(), Flag.NO.getCode(), Flag.NO.getCode()); if (taskGroupQueue == null) { return null; } while (this.taskGroupQueueMapper.updateInQueueCAS(Flag.NO.getCode(), Flag.YES.getCode(), taskGroupQueue.getId()) != 1) { taskGroupQueue = this.taskGroupQueueMapper.queryTheHighestPriorityTasks(taskGroup.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode(), Flag.NO.getCode(), Flag.NO.getCode()); if (taskGroupQueue == null) { return null; } } return this.taskInstanceMapper.selectById(taskGroupQueue.getTaskId()); } /** * release the TGQ resource when the corresponding task is finished. * * @param taskId task id * @return the result code and msg */ @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,569
[Bug] [Master] process pause and recover fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened process pause and recover fail ### What you expected to happen process can pause and recover successfully ### How to reproduce create a process with two task, task A sleep 1m. run process, and pause. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9569
https://github.com/apache/dolphinscheduler/pull/9568
32f76e487f1d022d248e494c6bc624f09091761b
63638601b0e1e5768fae142b419a4b965bf33d2b
2022-04-18T12:42:23Z
java
2022-04-19T02:23:56Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public void changeTaskGroupQueueStatus(int taskId, TaskGroupQueueStatus status) { TaskGroupQueue taskGroupQueue = taskGroupQueueMapper.queryByTaskId(taskId); taskGroupQueue.setStatus(status); taskGroupQueue.setUpdateTime(new Date(System.currentTimeMillis())); taskGroupQueueMapper.updateById(taskGroupQueue); } /** * insert into task group queue * * @param taskId task id * @param taskName task name * @param groupId group id * @param processId process id * @param priority priority * @return result and msg code */ @Override public TaskGroupQueue insertIntoTaskGroupQueue(Integer taskId, String taskName, Integer groupId, Integer processId, Integer priority, TaskGroupQueueStatus status) { TaskGroupQueue taskGroupQueue = new TaskGroupQueue(taskId, taskName, groupId, processId, priority, status); taskGroupQueue.setCreateTime(new Date()); taskGroupQueue.setUpdateTime(new Date()); taskGroupQueueMapper.insert(taskGroupQueue); return taskGroupQueue; } @Override public int updateTaskGroupQueueStatus(Integer taskId, int status) { return taskGroupQueueMapper.updateStatusByTaskId(taskId, status); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,569
[Bug] [Master] process pause and recover fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened process pause and recover fail ### What you expected to happen process can pause and recover successfully ### How to reproduce create a process with two task, task A sleep 1m. run process, and pause. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9569
https://github.com/apache/dolphinscheduler/pull/9568
32f76e487f1d022d248e494c6bc624f09091761b
63638601b0e1e5768fae142b419a4b965bf33d2b
2022-04-18T12:42:23Z
java
2022-04-19T02:23:56Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
@Override public int updateTaskGroupQueue(TaskGroupQueue taskGroupQueue) { return taskGroupQueueMapper.updateById(taskGroupQueue); } @Override public TaskGroupQueue loadTaskGroupQueue(int taskId) { return this.taskGroupQueueMapper.queryByTaskId(taskId); } @Override public void sendStartTask2Master(ProcessInstance processInstance, int taskId, org.apache.dolphinscheduler.remote.command.CommandType taskType) { String host = processInstance.getHost(); String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); TaskEventChangeCommand taskEventChangeCommand = new TaskEventChangeCommand( processInstance.getId(), taskId ); stateEventCallbackService.sendResult(address, port, taskEventChangeCommand.convert2Command(taskType)); } @Override public ProcessInstance loadNextProcess4Serial(long code, int state) { return this.processInstanceMapper.loadNextProcess4Serial(code, state); } protected void deleteCommandWithCheck(int commandId) { int delete = this.commandMapper.deleteById(commandId); if (delete != 1) { throw new ServiceException("delete command fail, id:" + commandId); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataAnalysisService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; /** * data analysis service */ public interface DataAnalysisService {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataAnalysisService.java
/** * statistical task instance status data * * @param loginUser login user * @param projectCode project code * @param startDate start date * @param endDate end date * @return task state count data */ Map<String, Object> countTaskStateByProject(User loginUser, long projectCode, String startDate, String endDate); /** * statistical process instance status data * * @param loginUser login user * @param projectCode project code * @param startDate start date
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataAnalysisService.java
* @param endDate end date * @return process instance state count data */ Map<String, Object> countProcessInstanceStateByProject(User loginUser, long projectCode, String startDate, String endDate); /** * statistics the process definition quantities of a certain person * * We only need projects which users have permission to see to determine whether the definition belongs to the user or not. * * @param loginUser login user * @param projectCode project code * @return definition count data */ Map<String, Object> countDefinitionByUser(User loginUser, long projectCode); /** * statistical command status data * * @param loginUser login user * @return command state count data */ Map<String, Object> countCommandState(User loginUser); /** * count queue state * * @param loginUser login user * @return queue state count data */ Map<String, Object> countQueueState(User loginUser); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.dto.CommandStateCount; import org.apache.dolphinscheduler.api.dto.DefineUserDto; import org.apache.dolphinscheduler.api.dto.TaskCountDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.DataAnalysisService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.TriFunction;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
import org.apache.dolphinscheduler.dao.entity.CommandCount; import org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser; import org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.plugin.task.api.enums.ExecutionStatus; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * data analysis service impl */ @Service
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
public class DataAnalysisServiceImpl extends BaseServiceImpl implements DataAnalysisService { @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private CommandMapper commandMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
private TaskInstanceMapper taskInstanceMapper; @Autowired private ProcessService processService; /** * statistical task instance status data * * @param loginUser login user * @param projectCode project code * @param startDate start date * @param endDate end date * @return task state count data */ @Override public Map<String, Object> countTaskStateByProject(User loginUser, long projectCode, String startDate, String endDate) { return countStateByProject( loginUser, projectCode, startDate, endDate, (start, end, projectCodes) -> this.taskInstanceMapper.countTaskInstanceStateByProjectCodes(start, end, projectCodes)); } /** * statistical process instance status data * * @param loginUser login user * @param projectCode project code * @param startDate start date * @param endDate end date * @return process instance state count data */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
@Override public Map<String, Object> countProcessInstanceStateByProject(User loginUser, long projectCode, String startDate, String endDate) { Map<String, Object> result = this.countStateByProject( loginUser, projectCode, startDate, endDate, (start, end, projectCodes) -> this.processInstanceMapper.countInstanceStateByProjectCodes(start, end, projectCodes)); if (result.containsKey(Constants.STATUS) && result.get(Constants.STATUS).equals(Status.SUCCESS)) { ((TaskCountDto)result.get(Constants.DATA_LIST)).removeStateFromCountList(ExecutionStatus.FORCED_SUCCESS); } return result; } /** * Wrapper function of counting process instance state and task state * * @param loginUser login user * @param projectCode project code * @param startDate start date * @param endDate end date */ private Map<String, Object> countStateByProject(User loginUser, long projectCode, String startDate, String endDate , TriFunction<Date, Date, Long[], List<ExecuteStatusCount>> instanceStateCounter) { Map<String, Object> result = new HashMap<>(); if (projectCode != 0) { Project project = projectMapper.queryByCode(projectCode); result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
} } Date start = null; Date end = null; if (!StringUtils.isEmpty(startDate) && !StringUtils.isEmpty(endDate)) { start = DateUtils.getScheduleDate(startDate); end = DateUtils.getScheduleDate(endDate); if (Objects.isNull(start) || Objects.isNull(end)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, Constants.START_END_DATE); return result; } } List<ExecuteStatusCount> processInstanceStateCounts = new ArrayList<>(); Long[] projectCodeArray = projectCode == 0 ? getProjectCodesArrays(loginUser) : new Long[] {projectCode}; if (projectCodeArray.length != 0 || loginUser.getUserType() == UserType.ADMIN_USER) { processInstanceStateCounts = instanceStateCounter.apply(start, end, projectCodeArray); } if (processInstanceStateCounts != null) { TaskCountDto taskCountResult = new TaskCountDto(processInstanceStateCounts); result.put(Constants.DATA_LIST, taskCountResult); putMsg(result, Status.SUCCESS); } return result; } /** * statistics the process definition quantities of a certain person * <p> * We only need projects which users have permission to see to determine whether the definition belongs to the user or not. *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
* @param loginUser login user * @param projectCode project code * @return definition count data */ @Override public Map<String, Object> countDefinitionByUser(User loginUser, long projectCode) { Map<String, Object> result = new HashMap<>(); if (projectCode != 0) { Project project = projectMapper.queryByCode(projectCode); result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } } List<DefinitionGroupByUser> defineGroupByUsers = new ArrayList<>(); Long[] projectCodeArray = projectCode == 0 ? getProjectCodesArrays(loginUser) : new Long[] {projectCode}; if (projectCodeArray.length != 0 || loginUser.getUserType() == UserType.ADMIN_USER) { defineGroupByUsers = processDefinitionMapper.countDefinitionByProjectCodes(projectCodeArray); } DefineUserDto dto = new DefineUserDto(defineGroupByUsers); result.put(Constants.DATA_LIST, dto); putMsg(result, Status.SUCCESS); return result; } /** * statistical command status data * * @param loginUser login user * @return command state count data
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
*/ @Override public Map<String, Object> countCommandState(User loginUser) { Map<String, Object> result = new HashMap<>(); /** * find all the task lists in the project under the user * statistics based on task status execution, failure, completion, wait, total */ Date start = null; Date end = null; Long[] projectCodeArray = getProjectCodesArrays(loginUser); int userId = loginUser.getUserType() == UserType.ADMIN_USER ? 0 : loginUser.getId(); Map<CommandType, Integer> normalCountCommandCounts = commandMapper.countCommandState(userId, start, end, projectCodeArray) .stream() .collect(Collectors.toMap(CommandCount::getCommandType, CommandCount::getCount)); Map<CommandType, Integer> errorCommandCounts = errorCommandMapper.countCommandState(userId, start, end, projectCodeArray) .stream() .collect(Collectors.toMap(CommandCount::getCommandType, CommandCount::getCount)); List<CommandStateCount> list = Arrays.stream(CommandType.values()) .map(commandType -> new CommandStateCount( errorCommandCounts.getOrDefault(commandType, 0), normalCountCommandCounts.getOrDefault(commandType, 0), commandType) ).collect(Collectors.toList()); result.put(Constants.DATA_LIST, list); putMsg(result, Status.SUCCESS); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/DataAnalysisServiceImpl.java
} private Long[] getProjectCodesArrays(User loginUser) { List<Project> projectList = projectMapper.queryRelationProjectListByUserId( loginUser.getUserType() == UserType.ADMIN_USER ? 0 : loginUser.getId()); Set<Long> projectCodes = new HashSet<>(); projectList.forEach(project -> projectCodes.add(project.getCode())); if (loginUser.getUserType() == UserType.GENERAL_USER) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(loginUser.getId()); createProjects.forEach(project -> projectCodes.add(project.getCode())); } return projectCodes.toArray(new Long[0]); } /** * count queue state * * @return queue state count data */ @Override public Map<String, Object> countQueueState(User loginUser) { Map<String, Object> result = new HashMap<>(); Map<String, Integer> dataMap = new HashMap<>(); dataMap.put("taskQueue", 0); dataMap.put("taskKill", 0); result.put(Constants.DATA_LIST, dataMap); putMsg(result, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.plugin.task.api.enums.ExecutionStatus; import org.apache.ibatis.annotations.Param; import java.util.Date; import java.util.List; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.baomidou.mybatisplus.core.metadata.IPage; /** * task instance mapper interface */ public interface TaskInstanceMapper extends BaseMapper<TaskInstance> {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapper.java
List<Integer> queryTaskByProcessIdAndState(@Param("processInstanceId") Integer processInstanceId, @Param("state") Integer state); List<TaskInstance> findValidTaskListByProcessId(@Param("processInstanceId") Integer processInstanceId, @Param("flag") Flag flag); List<TaskInstance> queryByHostAndStatus(@Param("host") String host, @Param("states") int[] stateArray); int setFailoverByHostAndStateArray(@Param("host") String host, @Param("states") int[] stateArray, @Param("destStatus") ExecutionStatus destStatus); TaskInstance queryByInstanceIdAndName(@Param("processInstanceId") int processInstanceId, @Param("name") String name); TaskInstance queryByInstanceIdAndCode(@Param("processInstanceId") int processInstanceId, @Param("taskCode") Long taskCode);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/TaskInstanceMapper.java
Integer countTask(@Param("projectCodes") Long[] projectCodes, @Param("taskIds") int[] taskIds); /** * Statistics task instance group by given project codes list * <p> * We only need project codes to determine whether the task instance belongs to the user or not. * * @param startTime Statistics start time * @param endTime Statistics end time * @param projectCodes Project codes list to filter * @return List of ExecuteStatusCount */ List<ExecuteStatusCount> countTaskInstanceStateByProjectCodes(@Param("startTime") Date startTime, @Param("endTime") Date endTime, @Param("projectCodes") Long[] projectCodes); IPage<TaskInstance> queryTaskInstanceListPaging(IPage<TaskInstance> page, @Param("projectCode") Long projectCode, @Param("processInstanceId") Integer processInstanceId, @Param("processInstanceName") String processInstanceName, @Param("searchVal") String searchVal, @Param("taskName") String taskName, @Param("executorId") int executorId, @Param("states") int[] statusArray, @Param("host") String host, @Param("startTime") Date startTime, @Param("endTime") Date endTime ); List<TaskInstance> loadAllInfosNoRelease(@Param("processInstanceId") int processInstanceId,@Param("status") int status); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/enums/ExecutionStatus.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/enums/ExecutionStatus.java
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.api.enums; import java.util.HashMap; import com.baomidou.mybatisplus.annotation.EnumValue; /** * running status for workflow and task nodes */ public enum ExecutionStatus { /** * status: * 0 submit success * 1 running * 2 ready pause * 3 pause * 4 ready stop * 5 stop * 6 failure * 7 success * 8 need fault tolerance * 9 kill * 10 waiting thread * 11 waiting depend node complete * 12 delay execution * 13 forced success * 14 serial wait * 15 ready block * 16 block
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/enums/ExecutionStatus.java
*/ SUBMITTED_SUCCESS(0, "submit success"), RUNNING_EXECUTION(1, "running"), READY_PAUSE(2, "ready pause"), PAUSE(3, "pause"), READY_STOP(4, "ready stop"), STOP(5, "stop"), FAILURE(6, "failure"), SUCCESS(7, "success"), NEED_FAULT_TOLERANCE(8, "need fault tolerance"), KILL(9, "kill"), WAITING_THREAD(10, "waiting thread"), WAITING_DEPEND(11, "waiting depend node complete"), DELAY_EXECUTION(12, "delay execution"), FORCED_SUCCESS(13, "forced success"), SERIAL_WAIT(14, "serial wait"), READY_BLOCK(15, "ready block"), BLOCK(16, "block"), DISPATCH(17, "dispatch"), ; ExecutionStatus(int code, String descp) { this.code = code; this.descp = descp; } @EnumValue private final int code; private final String descp; private static HashMap<Integer, ExecutionStatus> EXECUTION_STATUS_MAP = new HashMap<>(); static { for (ExecutionStatus executionStatus : ExecutionStatus.values()) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/enums/ExecutionStatus.java
EXECUTION_STATUS_MAP.put(executionStatus.code, executionStatus); } } /** * status is success * * @return status */ public boolean typeIsSuccess() { return this == SUCCESS || this == FORCED_SUCCESS; } /** * status is failure * * @return status */ public boolean typeIsFailure() { return this == FAILURE || this == NEED_FAULT_TOLERANCE; } /** * status is finished * * @return status */ public boolean typeIsFinished() { return typeIsSuccess() || typeIsFailure() || typeIsCancel() || typeIsPause() || typeIsStop() || typeIsBlock(); } /** * status is waiting thread
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/enums/ExecutionStatus.java
* * @return status */ public boolean typeIsWaitingThread() { return this == WAITING_THREAD; } /** * status is pause * * @return status */ public boolean typeIsPause() { return this == PAUSE; } /** * status is pause * * @return status */ public boolean typeIsStop() { return this == STOP; } /** * status is running * * @return status */ public boolean typeIsRunning() { return this == RUNNING_EXECUTION || this == WAITING_DEPEND || this == DELAY_EXECUTION; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,558
[Bug] [API] home page task instance didn't show dispatch states count
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/36755957/163785906-150b715c-30c9-49a2-b63b-68409b830e80.png) the program does not count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### What you expected to happen the program should count 0 submit success 8 need fault tolerance 9 kill 17 dispatch states task instance. ### How to reproduce start a new process and check ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9558
https://github.com/apache/dolphinscheduler/pull/9559
efe04863a09c2b951fedd05f4268d988c6bbfe43
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
2022-04-18T09:11:57Z
java
2022-04-19T07:23:57Z
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/enums/ExecutionStatus.java
/** * status is block * * @return status */ public boolean typeIsBlock() { return this == BLOCK; } /** * status is cancel * * @return status */ public boolean typeIsCancel() { return this == KILL || this == STOP; } public int getCode() { return code; } public String getDescp() { return descp; } public static ExecutionStatus of(int status) { if (EXECUTION_STATUS_MAP.containsKey(status)) { return EXECUTION_STATUS_MAP.get(status); } throw new IllegalArgumentException("invalid status : " + status); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.processor; import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS; import org.apache.dolphinscheduler.common.enums.Event; import org.apache.dolphinscheduler.plugin.task.api.TaskConstants; import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContext; import org.apache.dolphinscheduler.remote.NettyRemotingClient;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand; import org.apache.dolphinscheduler.remote.command.TaskExecuteRunningCommand; import org.apache.dolphinscheduler.remote.command.TaskKillResponseCommand; import org.apache.dolphinscheduler.remote.config.NettyClientConfig; import org.apache.dolphinscheduler.remote.processor.NettyRemoteChannel; import org.apache.dolphinscheduler.server.worker.cache.ResponseCache; import java.util.Arrays; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; /** * task callback service */ @Service public class TaskCallbackService { private final Logger logger = LoggerFactory.getLogger(TaskCallbackService.class); private static final int[] RETRY_BACKOFF = {1, 2, 3, 5, 10, 20, 40, 100, 100, 100, 100, 200, 200, 200}; @Autowired private TaskExecuteResponseAckProcessor taskExecuteRunningProcessor; @Autowired private TaskExecuteResponseAckProcessor taskExecuteResponseAckProcessor; /** * remote channels
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
*/ private static final ConcurrentHashMap<Integer, NettyRemoteChannel> REMOTE_CHANNELS = new ConcurrentHashMap<>(); /** * netty remoting client */ private final NettyRemotingClient nettyRemotingClient; public TaskCallbackService() { final NettyClientConfig clientConfig = new NettyClientConfig(); this.nettyRemotingClient = new NettyRemotingClient(clientConfig); this.nettyRemotingClient.registerProcessor(CommandType.TASK_EXECUTE_RUNNING_ACK, taskExecuteRunningProcessor); this.nettyRemotingClient.registerProcessor(CommandType.TASK_EXECUTE_RESPONSE_ACK, taskExecuteResponseAckProcessor); } /** * add callback channel * * @param taskInstanceId taskInstanceId * @param channel channel */ public void addRemoteChannel(int taskInstanceId, NettyRemoteChannel channel) { REMOTE_CHANNELS.put(taskInstanceId, channel); } /** * change remote channel */ public void changeRemoteChannel(int taskInstanceId, NettyRemoteChannel channel) { if (REMOTE_CHANNELS.containsKey(taskInstanceId)) { REMOTE_CHANNELS.remove(taskInstanceId); } REMOTE_CHANNELS.put(taskInstanceId, channel); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
/** * get callback channel * * @param taskInstanceId taskInstanceId * @return callback channel */ private NettyRemoteChannel getRemoteChannel(int taskInstanceId) { Channel newChannel; NettyRemoteChannel nettyRemoteChannel = REMOTE_CHANNELS.get(taskInstanceId); if (nettyRemoteChannel != null) { if (nettyRemoteChannel.isActive()) { return nettyRemoteChannel; } newChannel = nettyRemotingClient.getChannel(nettyRemoteChannel.getHost()); if (newChannel != null) { return getRemoteChannel(newChannel, nettyRemoteChannel.getOpaque(), taskInstanceId); } } return null; } public int pause(int ntries) { return SLEEP_TIME_MILLIS * RETRY_BACKOFF[ntries % RETRY_BACKOFF.length]; } private NettyRemoteChannel getRemoteChannel(Channel newChannel, long opaque, int taskInstanceId) { NettyRemoteChannel remoteChannel = new NettyRemoteChannel(newChannel, opaque); addRemoteChannel(taskInstanceId, remoteChannel); return remoteChannel; } private NettyRemoteChannel getRemoteChannel(Channel newChannel, int taskInstanceId) { NettyRemoteChannel remoteChannel = new NettyRemoteChannel(newChannel);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
addRemoteChannel(taskInstanceId, remoteChannel); return remoteChannel; } /** * remove callback channels * * @param taskInstanceId taskInstanceId */ public static void remove(int taskInstanceId) { REMOTE_CHANNELS.remove(taskInstanceId); } /** * send result * * @param taskInstanceId taskInstanceId * @param command command */ public void send(int taskInstanceId, Command command) { NettyRemoteChannel nettyRemoteChannel = getRemoteChannel(taskInstanceId); if (nettyRemoteChannel != null) { nettyRemoteChannel.writeAndFlush(command).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { return; } } }); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
} /** * build task execute running command * * @param taskExecutionContext taskExecutionContext * @return TaskExecuteAckCommand */ private TaskExecuteRunningCommand buildTaskExecuteRunningCommand(TaskExecutionContext taskExecutionContext) { TaskExecuteRunningCommand command = new TaskExecuteRunningCommand(); command.setTaskInstanceId(taskExecutionContext.getTaskInstanceId()); command.setProcessInstanceId(taskExecutionContext.getProcessInstanceId()); command.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode()); command.setLogPath(taskExecutionContext.getLogPath()); command.setHost(taskExecutionContext.getHost()); command.setStartTime(taskExecutionContext.getStartTime()); command.setExecutePath(taskExecutionContext.getExecutePath()); return command; } /** * build task execute response command * * @param taskExecutionContext taskExecutionContext * @return TaskExecuteResponseCommand */ private TaskExecuteResponseCommand buildTaskExecuteResponseCommand(TaskExecutionContext taskExecutionContext) { TaskExecuteResponseCommand command = new TaskExecuteResponseCommand(); command.setProcessInstanceId(taskExecutionContext.getProcessInstanceId()); command.setTaskInstanceId(taskExecutionContext.getTaskInstanceId()); command.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode()); command.setLogPath(taskExecutionContext.getLogPath());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
command.setExecutePath(taskExecutionContext.getExecutePath()); command.setAppIds(taskExecutionContext.getAppIds()); command.setProcessId(taskExecutionContext.getProcessId()); command.setHost(taskExecutionContext.getHost()); command.setStartTime(taskExecutionContext.getStartTime()); command.setEndTime(taskExecutionContext.getEndTime()); command.setVarPool(taskExecutionContext.getVarPool()); command.setExecutePath(taskExecutionContext.getExecutePath()); return command; } /** * build TaskKillResponseCommand * * @param taskExecutionContext taskExecutionContext * @return build TaskKillResponseCommand */ private TaskKillResponseCommand buildKillTaskResponseCommand(TaskExecutionContext taskExecutionContext) { TaskKillResponseCommand taskKillResponseCommand = new TaskKillResponseCommand(); taskKillResponseCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode()); taskKillResponseCommand.setAppIds(Arrays.asList(taskExecutionContext.getAppIds().split(TaskConstants.COMMA))); taskKillResponseCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId()); taskKillResponseCommand.setHost(taskExecutionContext.getHost()); taskKillResponseCommand.setProcessId(taskExecutionContext.getProcessId()); return taskKillResponseCommand; } /** * send task execute running command * todo unified callback command */ public void sendTaskExecuteRunningCommand(TaskExecutionContext taskExecutionContext) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,579
[Bug] [Worker] task kill fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task kill fail due to netty channel had be removed by `TaskKillProcessor` and task response fail. ### What you expected to happen task kill successfully when I click kill task. ### How to reproduce create a workflow with a long time shell, run it, and kill. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9579
https://github.com/apache/dolphinscheduler/pull/9578
c5b7e5adff40988f3dfd4f75dddb0b597509fff4
b4017d0afd29f2ccd497713d8a448125baf93313
2022-04-19T06:39:48Z
java
2022-04-19T07:26:12Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackService.java
TaskExecuteRunningCommand command = buildTaskExecuteRunningCommand(taskExecutionContext); ResponseCache.get().cache(taskExecutionContext.getTaskInstanceId(), command.convert2Command(), Event.RUNNING); send(taskExecutionContext.getTaskInstanceId(), command.convert2Command()); } /** * send task execute delay command * todo unified callback command */ public void sendTaskExecuteDelayCommand(TaskExecutionContext taskExecutionContext) { TaskExecuteRunningCommand command = buildTaskExecuteRunningCommand(taskExecutionContext); send(taskExecutionContext.getTaskInstanceId(), command.convert2Command()); } /** * send task execute response command * todo unified callback command */ public void sendTaskExecuteResponseCommand(TaskExecutionContext taskExecutionContext) { TaskExecuteResponseCommand command = buildTaskExecuteResponseCommand(taskExecutionContext); ResponseCache.get().cache(taskExecutionContext.getTaskInstanceId(), command.convert2Command(), Event.RESULT); send(taskExecutionContext.getTaskInstanceId(), command.convert2Command()); } public void sendTaskKillResponseCommand(TaskExecutionContext taskExecutionContext) { TaskKillResponseCommand taskKillResponseCommand = buildKillTaskResponseCommand(taskExecutionContext); send(taskExecutionContext.getTaskInstanceId(), taskKillResponseCommand.convert2Command()); TaskCallbackService.remove(taskExecutionContext.getTaskInstanceId()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http:www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ResUploadType; import org.apache.dolphinscheduler.common.exception.BaseException; import org.apache.dolphinscheduler.common.storage.StorageOperate; import org.apache.dolphinscheduler.plugin.task.api.enums.ExecutionStatus; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.*; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.client.cli.RMAdminCLI; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.security.PrivilegedExceptionAction; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.apache.dolphinscheduler.common.Constants.*; /** * hadoop utils * single instance */ public class HadoopUtils implements Closeable, StorageOperate {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class); private String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER); public static final String RM_HA_IDS = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS); public static final String APP_ADDRESS = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS); public static final String JOB_HISTORY_ADDRESS = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS); public static final int HADOOP_RESOURCE_MANAGER_HTTP_ADDRESS_PORT_VALUE = PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088); private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY"; private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder .newBuilder() .expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS) .build(new CacheLoader<String, HadoopUtils>() { @Override public HadoopUtils load(String key) throws Exception { return new HadoopUtils(); } }); private volatile boolean yarnEnabled = false; private Configuration configuration; private FileSystem fs; private HadoopUtils() { hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER); init(); initHdfsPath(); } public static HadoopUtils getInstance() { return cache.getUnchecked(HADOOP_UTILS_KEY); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
/** * init dolphinscheduler root path in hdfs */ private void initHdfsPath() { Path path = new Path(RESOURCE_UPLOAD_PATH); try { if (!fs.exists(path)) { fs.mkdirs(path); } } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * init hadoop configuration */ private void init() throws NullPointerException { try { configuration = new HdfsConfiguration(); if (CommonUtils.loadKerberosConf(configuration)) { hdfsUser = ""; } String defaultFS = configuration.get(Constants.FS_DEFAULT_FS); if (defaultFS.startsWith("file")) { String defaultFSProp = PropertyUtils.getString(Constants.FS_DEFAULT_FS); if (StringUtils.isNotBlank(defaultFSProp)) { Map<String, String> fsRelatedProps = PropertyUtils.getPrefixedProperties("fs."); configuration.set(Constants.FS_DEFAULT_FS, defaultFSProp);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
fsRelatedProps.forEach((key, value) -> configuration.set(key, value)); } else { logger.error("property:{} can not to be empty, please set!", Constants.FS_DEFAULT_FS); throw new NullPointerException( String.format("property: %s can not to be empty, please set!", Constants.FS_DEFAULT_FS) ); } } else { logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULT_FS, defaultFS); } if (StringUtils.isNotEmpty(hdfsUser)) { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser); ugi.doAs((PrivilegedExceptionAction<Boolean>) () -> { fs = FileSystem.get(configuration); return true; }); } else { logger.warn("hdfs.root.user is not set value!"); fs = FileSystem.get(configuration); } } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * @return Configuration */ public Configuration getConfiguration() { return configuration; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
/** * @return DefaultFS */ public String getDefaultFS() { return getConfiguration().get(Constants.FS_DEFAULT_FS); } /** * get application url * if rmHaIds contains xx, it signs not use resourcemanager * otherwise: * if rmHaIds is empty, single resourcemanager enabled * if rmHaIds not empty: resourcemanager HA enabled * * @param applicationId application id * @return url of application */ public String getApplicationUrl(String applicationId) throws BaseException { yarnEnabled = true; String appUrl = StringUtils.isEmpty(RM_HA_IDS) ? APP_ADDRESS : getAppAddress(APP_ADDRESS, RM_HA_IDS); if (StringUtils.isBlank(appUrl)) { throw new BaseException("yarn application url generation failed"); } if (logger.isDebugEnabled()) { logger.debug("yarn application url:{}, applicationId:{}", appUrl, applicationId); } return String.format(appUrl, HADOOP_RESOURCE_MANAGER_HTTP_ADDRESS_PORT_VALUE, applicationId); } public String getJobHistoryUrl(String applicationId) { String jobId = applicationId.replace("application", "job");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
return String.format(JOB_HISTORY_ADDRESS, jobId); } /** * cat file on hdfs * * @param hdfsFilePath hdfs file path * @return byte[] byte array * @throws IOException errors */ public byte[] catFile(String hdfsFilePath) throws IOException { if (StringUtils.isBlank(hdfsFilePath)) { logger.error("hdfs file path:{} is blank", hdfsFilePath); return new byte[0]; } try (FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath))) { return IOUtils.toByteArray(fsDataInputStream); } } /** * cat file on hdfs * * @param hdfsFilePath hdfs file path * @param skipLineNums skip line numbers * @param limit read how many lines * @return content of file * @throws IOException errors */ public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException { if (StringUtils.isBlank(hdfsFilePath)) { logger.error("hdfs file path:{} is blank", hdfsFilePath);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
return Collections.emptyList(); } try (FSDataInputStream in = fs.open(new Path(hdfsFilePath))) { BufferedReader br = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); Stream<String> stream = br.lines().skip(skipLineNums).limit(limit); return stream.collect(Collectors.toList()); } } @Override public List<String> vimFile(String bucketName, String hdfsFilePath, int skipLineNums, int limit) throws IOException { return catFile(hdfsFilePath, skipLineNums, limit); } @Override public void createTenantDirIfNotExists(String tenantCode) throws IOException { getInstance().mkdir(tenantCode, getHdfsResDir(tenantCode)); getInstance().mkdir(tenantCode, getHdfsUdfDir(tenantCode)); } @Override public String getResDir(String tenantCode) { return getHdfsResDir(tenantCode); } @Override public String getUdfDir(String tenantCode) { return getHdfsUdfDir(tenantCode); } /** * make the given file and all non-existent parents into * directories. Has the semantics of Unix 'mkdir -p'. * Existence of the directory hierarchy is not an error. *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
* @param hdfsPath path to create * @return mkdir result * @throws IOException errors */ @Override public boolean mkdir(String bucketName, String hdfsPath) throws IOException { return fs.mkdirs(new Path(hdfsPath)); } @Override public String getResourceFileName(String tenantCode, String fullName) { return getHdfsResourceFileName(tenantCode, fullName); } @Override public String getFileName(ResourceType resourceType, String tenantCode, String fileName) { return getHdfsFileName(resourceType, tenantCode, fileName); } @Override public void download(String bucketName, String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException { copyHdfsToLocal(srcHdfsFilePath, dstFile, deleteSource, overwrite); } /** * copy files between FileSystems * * @param srcPath source hdfs path * @param dstPath destination hdfs path * @param deleteSource whether to delete the src * @param overwrite whether to overwrite an existing file * @return if success or not * @throws IOException errors */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
@Override public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException { return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf()); } /** * the src file is on the local disk. Add it to FS at * the given dst name. * * @param srcFile local file * @param dstHdfsPath destination hdfs path * @param deleteSource whether to delete the src * @param overwrite whether to overwrite an existing file * @return if success or not * @throws IOException errors */ public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException { Path srcPath = new Path(srcFile); Path dstPath = new Path(dstHdfsPath); fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath); return true; } @Override public boolean upload(String buckName, String srcFile, String dstPath, boolean deleteSource, boolean overwrite) throws IOException { return copyLocalToHdfs(srcFile, dstPath, deleteSource, overwrite); } /* * copy hdfs file to local * * @param srcHdfsFilePath source hdfs file path * @param dstFile destination file
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
* @param deleteSource delete source * @param overwrite overwrite * @return result of copy hdfs file to local * @throws IOException errors */ public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException { Path srcPath = new Path(srcHdfsFilePath); File dstPath = new File(dstFile); if (dstPath.exists()) { if (dstPath.isFile()) { if (overwrite) { Files.delete(dstPath.toPath()); } } else { logger.error("destination file must be a file"); } } if (!dstPath.getParentFile().exists() && !dstPath.getParentFile().mkdirs()) { return false; } return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf()); } /** * delete a file * * @param hdfsFilePath the path to delete. * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false.
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
* @throws IOException errors */ @Override public boolean delete(String tenantCode, String hdfsFilePath, boolean recursive) throws IOException { return fs.delete(new Path(hdfsFilePath), recursive); } /** * check if exists * * @param hdfsFilePath source file path * @return result of exists or not * @throws IOException errors */ @Override public boolean exists(String tenantCode, String hdfsFilePath) throws IOException { return fs.exists(new Path(hdfsFilePath)); } /** * Gets a list of files in the directory * * @param filePath file path * @return {@link FileStatus} file status * @throws IOException errors */ public FileStatus[] listFileStatus(String filePath) throws IOException { try { return fs.listStatus(new Path(filePath)); } catch (IOException e) { logger.error("Get file list exception", e); throw new IOException("Get file list exception", e);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
} } /** * Renames Path src to Path dst. Can take place on local fs * or remote DFS. * * @param src path to be renamed * @param dst new path after rename * @return true if rename is successful * @throws IOException on failure */ public boolean rename(String src, String dst) throws IOException { return fs.rename(new Path(src), new Path(dst)); } /** * hadoop resourcemanager enabled or not * * @return result */ public boolean isYarnEnabled() { return yarnEnabled; } /** * get the state of an application * * @param applicationId application id * @return the return may be null or there may be other parse exceptions */ public ExecutionStatus getApplicationStatus(String applicationId) throws BaseException { if (StringUtils.isEmpty(applicationId)) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
return null; } String result; String applicationUrl = getApplicationUrl(applicationId); if (logger.isDebugEnabled()) { logger.debug("generate yarn application url, applicationUrl={}", applicationUrl); } String responseContent = Boolean.TRUE.equals(PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) ? KerberosHttpClient.get(applicationUrl) : HttpUtils.get(applicationUrl); if (responseContent != null) { ObjectNode jsonObject = JSONUtils.parseObject(responseContent); if (!jsonObject.has("app")) { return ExecutionStatus.FAILURE; } result = jsonObject.path("app").path("finalStatus").asText(); } else { String jobHistoryUrl = getJobHistoryUrl(applicationId); if (logger.isDebugEnabled()) { logger.debug("generate yarn job history application url, jobHistoryUrl={}", jobHistoryUrl); } responseContent = Boolean.TRUE.equals(PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) ? KerberosHttpClient.get(jobHistoryUrl) : HttpUtils.get(jobHistoryUrl); if (null != responseContent) { ObjectNode jsonObject = JSONUtils.parseObject(responseContent); if (!jsonObject.has("job")) { return ExecutionStatus.FAILURE; } result = jsonObject.path("job").path("state").asText(); } else { return ExecutionStatus.FAILURE; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
} return getExecutionStatus(result); } private ExecutionStatus getExecutionStatus(String result) { switch (result) { case Constants.ACCEPTED: return ExecutionStatus.SUBMITTED_SUCCESS; case Constants.SUCCEEDED: case Constants.ENDED: return ExecutionStatus.SUCCESS; case Constants.NEW: case Constants.NEW_SAVING: case Constants.SUBMITTED: case Constants.FAILED: return ExecutionStatus.FAILURE; case Constants.KILLED: return ExecutionStatus.KILL; case Constants.RUNNING: default: return ExecutionStatus.RUNNING_EXECUTION; } } /** * get data hdfs path * * @return data hdfs path */ public static String getHdfsDataBasePath() { if (FOLDER_SEPARATOR.equals(RESOURCE_UPLOAD_PATH)) { return "";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
} else { return RESOURCE_UPLOAD_PATH; } } /** * hdfs resource dir * * @param tenantCode tenant code * @param resourceType resource type * @return hdfs resource dir */ public static String getHdfsDir(ResourceType resourceType, String tenantCode) { String hdfsDir = ""; if (resourceType.equals(ResourceType.FILE)) { hdfsDir = getHdfsResDir(tenantCode); } else if (resourceType.equals(ResourceType.UDF)) { hdfsDir = getHdfsUdfDir(tenantCode); } return hdfsDir; } @Override public String getDir(ResourceType resourceType, String tenantCode) { return getHdfsDir(resourceType, tenantCode); } /** * hdfs resource dir * * @param tenantCode tenant code * @return hdfs resource dir */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
public static String getHdfsResDir(String tenantCode) { return String.format("%s/" + RESOURCE_TYPE_FILE, getHdfsTenantDir(tenantCode)); } /** * hdfs udf dir * * @param tenantCode tenant code * @return get udf dir on hdfs */ public static String getHdfsUdfDir(String tenantCode) { return String.format("%s/" + RESOURCE_TYPE_UDF, getHdfsTenantDir(tenantCode)); } /** * get hdfs file name * * @param resourceType resource type * @param tenantCode tenant code * @param fileName file name * @return hdfs file name */ public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) { if (fileName.startsWith(FOLDER_SEPARATOR)) { fileName = fileName.replaceFirst(FOLDER_SEPARATOR, ""); } return String.format(FORMAT_S_S, getHdfsDir(resourceType, tenantCode), fileName); } /** * get absolute path and name for resource file on hdfs * * @param tenantCode tenant code
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
* @param fileName file name * @return get absolute path and name for file on hdfs */ public static String getHdfsResourceFileName(String tenantCode, String fileName) { if (fileName.startsWith(FOLDER_SEPARATOR)) { fileName = fileName.replaceFirst(FOLDER_SEPARATOR, ""); } return String.format(FORMAT_S_S, getHdfsResDir(tenantCode), fileName); } /** * get absolute path and name for udf file on hdfs * * @param tenantCode tenant code * @param fileName file name * @return get absolute path and name for udf file on hdfs */ public static String getHdfsUdfFileName(String tenantCode, String fileName) { if (fileName.startsWith(FOLDER_SEPARATOR)) { fileName = fileName.replaceFirst(FOLDER_SEPARATOR, ""); } return String.format(FORMAT_S_S, getHdfsUdfDir(tenantCode), fileName); } /** * @param tenantCode tenant code * @return file directory of tenants on hdfs */ public static String getHdfsTenantDir(String tenantCode) { return String.format(FORMAT_S_S, getHdfsDataBasePath(), tenantCode); } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
* getAppAddress * * @param appAddress app address * @param rmHa resource manager ha * @return app address */ public static String getAppAddress(String appAddress, String rmHa) { String activeRM = YarnHAAdminUtils.getActiveRMName(rmHa); if (StringUtils.isEmpty(activeRM)) { return null; } String[] split1 = appAddress.split(Constants.DOUBLE_SLASH); if (split1.length != 2) { return null; } String start = split1[0] + Constants.DOUBLE_SLASH; String[] split2 = split1[1].split(Constants.COLON); if (split2.length != 2) { return null; } String end = Constants.COLON + split2[1]; return start + activeRM + end; } @Override public void close() throws IOException { if (fs != null) { try { fs.close(); } catch (IOException e) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
logger.error("Close HadoopUtils instance failed", e); throw new IOException("Close HadoopUtils instance failed", e); } } } /** * yarn ha admin utils */ private static final class YarnHAAdminUtils extends RMAdminCLI { /** * get active resourcemanager */ public static String getActiveRMName(String rmIds) { String[] rmIdArr = rmIds.split(Constants.COMMA); String yarnUrl = "http:%s:" + HADOOP_RESOURCE_MANAGER_HTTP_ADDRESS_PORT_VALUE + "/ws/v1/cluster/info"; try { /** * send http get request to rm */ for (String rmId : rmIdArr) { String state = getRMState(String.format(yarnUrl, rmId)); if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) { return rmId; } } } catch (Exception e) { logger.error("yarn ha application url generation failed, message:{}", e.getMessage()); } return null; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
/** * get ResourceManager state */ public static String getRMState(String url) { String retStr = Boolean.TRUE.equals(PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) ? KerberosHttpClient.get(url) : HttpUtils.get(url); if (StringUtils.isEmpty(retStr)) { return null; } ObjectNode jsonObject = JSONUtils.parseObject(retStr); if (!jsonObject.has("clusterInfo")) { return null; } return jsonObject.get("clusterInfo").path("haState").asText(); } } @Override public void deleteTenant(String tenantCode) throws Exception { String tenantPath = getHdfsDataBasePath() + FOLDER_SEPARATOR + tenantCode; if (exists(tenantCode, tenantPath)) { delete(tenantCode, tenantPath, true); } } @Override public ResUploadType returnStorageType() { return ResUploadType.HDFS; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
package org.apache.dolphinscheduler.common.utils; import com.amazonaws.AmazonServiceException; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.regions.Regions; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.*; import com.amazonaws.services.s3.transfer.MultipleFileDownload; import com.amazonaws.services.s3.transfer.TransferManager; import com.amazonaws.services.s3.transfer.TransferManagerBuilder; import org.apache.commons.lang.StringUtils; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ResUploadType; import org.apache.dolphinscheduler.common.storage.StorageOperate; import org.apache.dolphinscheduler.spi.enums.ResourceType; import org.jets3t.service.ServiceException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.apache.dolphinscheduler.common.Constants.*; public class S3Utils implements Closeable, StorageOperate { private static final Logger logger = LoggerFactory.getLogger(S3Utils.class); public static final String ACCESS_KEY_ID = PropertyUtils.getString(Constants.AWS_ACCESS_KEY_ID); public static final String SECRET_KEY_ID = PropertyUtils.getString(Constants.AWS_SECRET_ACCESS_KEY);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
public static final String REGION = PropertyUtils.getString(Constants.AWS_REGION); private AmazonS3 s3Client = null; private S3Utils() { if (PropertyUtils.getString(RESOURCE_STORAGE_TYPE).equals(STORAGE_S3)) { if (!StringUtils.isEmpty(PropertyUtils.getString(AWS_END_POINT))) { s3Client = AmazonS3ClientBuilder .standard() .withPathStyleAccessEnabled(true) .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(PropertyUtils.getString(AWS_END_POINT), Regions.fromName(REGION).getName())) .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(ACCESS_KEY_ID, SECRET_KEY_ID))) .build(); } else { s3Client = AmazonS3ClientBuilder .standard() .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(ACCESS_KEY_ID, SECRET_KEY_ID))) .withRegion(Regions.fromName(REGION)) .build(); } checkBucketNameIfNotPresent(BUCKET_NAME); } } /** * S3Utils single */ private enum S3Singleton { INSTANCE; private final S3Utils instance; S3Singleton() { instance = new S3Utils(); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
private S3Utils getInstance() { return instance; } } public static S3Utils getInstance() { return S3Singleton.INSTANCE.getInstance(); } @Override public void close() throws IOException { s3Client.shutdown(); } @Override public void createTenantDirIfNotExists(String tenantCode) throws ServiceException { createFolder(tenantCode+ FOLDER_SEPARATOR +RESOURCE_TYPE_UDF); createFolder(tenantCode+ FOLDER_SEPARATOR +RESOURCE_TYPE_FILE); } @Override public String getResDir(String tenantCode) { return tenantCode+ FOLDER_SEPARATOR +RESOURCE_TYPE_FILE+FOLDER_SEPARATOR; } @Override public String getUdfDir(String tenantCode) { return tenantCode+ FOLDER_SEPARATOR +RESOURCE_TYPE_UDF+FOLDER_SEPARATOR; } @Override public boolean mkdir(String tenantCode, String path) throws IOException { createFolder(path); return true; } @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
public String getResourceFileName(String tenantCode, String fileName) { if (fileName.startsWith(FOLDER_SEPARATOR)) { fileName = fileName.replaceFirst(FOLDER_SEPARATOR, ""); } return String.format(FORMAT_S_S, tenantCode+FOLDER_SEPARATOR+RESOURCE_TYPE_FILE, fileName); } @Override public String getFileName(ResourceType resourceType, String tenantCode, String fileName) { if (fileName.startsWith(FOLDER_SEPARATOR)) { fileName = fileName.replaceFirst(FOLDER_SEPARATOR, ""); } return getDir(resourceType, tenantCode)+fileName; } @Override public void download(String tenantCode, String srcFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException { S3Object o = s3Client.getObject(BUCKET_NAME, srcFilePath); try (S3ObjectInputStream s3is = o.getObjectContent(); FileOutputStream fos = new FileOutputStream(new File(dstFile))) { byte[] readBuf = new byte[1024]; int readLen = 0; while ((readLen = s3is.read(readBuf)) > 0) { fos.write(readBuf, 0, readLen); } } catch (AmazonServiceException e) { logger.error("the resource can`t be downloaded,the bucket is {},and the src is {}", tenantCode, srcFilePath); throw new IOException(e.getMessage()); } catch (FileNotFoundException e) { logger.error("the file isn`t exists"); throw new IOException("the file isn`t exists"); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
} @Override public boolean exists(String tenantCode, String fileName) throws IOException { return s3Client.doesObjectExist(BUCKET_NAME, fileName); } @Override public boolean delete(String tenantCode, String filePath, boolean recursive) throws IOException { try { s3Client.deleteObject(BUCKET_NAME, filePath); return true; } catch (AmazonServiceException e) { logger.error("delete the object error,the resource path is {}", filePath); return false; } } @Override public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException { s3Client.copyObject(BUCKET_NAME, srcPath, BUCKET_NAME, dstPath); s3Client.deleteObject(BUCKET_NAME, srcPath); return true; } @Override public String getDir(ResourceType resourceType, String tenantCode) { switch (resourceType) { case UDF: return getUdfDir(tenantCode); case FILE: return getResDir(tenantCode); default: return tenantCode+ FOLDER_SEPARATOR ;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
} } @Override public boolean upload(String tenantCode, String srcFile, String dstPath, boolean deleteSource, boolean overwrite) throws IOException { try { s3Client.putObject(BUCKET_NAME, dstPath, new File(srcFile)); return true; } catch (AmazonServiceException e) { logger.error("upload failed,the bucketName is {},the dstPath is {}", BUCKET_NAME, tenantCode+ FOLDER_SEPARATOR +dstPath); return false; } } @Override public List<String> vimFile(String tenantCode,String filePath, int skipLineNums, int limit) throws IOException { if (StringUtils.isBlank(filePath)) { logger.error("file path:{} is blank", filePath); return Collections.emptyList(); } S3Object s3Object=s3Client.getObject(BUCKET_NAME,filePath); try(BufferedReader bufferedReader=new BufferedReader(new InputStreamReader(s3Object.getObjectContent()))){ Stream<String> stream = bufferedReader.lines().skip(skipLineNums).limit(limit); return stream.collect(Collectors.toList()); } } private void createFolder( String folderName) { if (!s3Client.doesObjectExist(BUCKET_NAME, folderName + FOLDER_SEPARATOR)) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0); InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
PutObjectRequest putObjectRequest = new PutObjectRequest(BUCKET_NAME, folderName + FOLDER_SEPARATOR, emptyContent, metadata); s3Client.putObject(putObjectRequest); } } @Override public void deleteTenant(String tenantCode) throws Exception { deleteTenantCode(tenantCode); } private void deleteTenantCode(String tenantCode) { deleteDirectory(getResDir(tenantCode)); deleteDirectory(getUdfDir(tenantCode)); } /** * xxx untest * upload local directory to S3 * @param tenantCode * @param keyPrefix the name of directory * @param strPath */ private void uploadDirectory(String tenantCode, String keyPrefix, String strPath) { s3Client.putObject(BUCKET_NAME, tenantCode+ FOLDER_SEPARATOR +keyPrefix, new File(strPath)); } /** * xxx untest * download S3 Directory to local * @param tenantCode * @param keyPrefix the name of directory * @param srcPath */ private void downloadDirectory(String tenantCode, String keyPrefix, String srcPath){
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/S3Utils.java
TransferManager tm= TransferManagerBuilder.standard().withS3Client(s3Client).build(); try{ MultipleFileDownload download = tm.downloadDirectory(BUCKET_NAME, tenantCode + FOLDER_SEPARATOR + keyPrefix, new File(srcPath)); download.waitForCompletion(); } catch (AmazonS3Exception | InterruptedException e) { logger.error("download the directory failed with the bucketName is {} and the keyPrefix is {}", BUCKET_NAME, tenantCode + FOLDER_SEPARATOR + keyPrefix); Thread.currentThread().interrupt(); } finally { tm.shutdownNow(); } } public void checkBucketNameIfNotPresent(String bucketName) { if (!s3Client.doesBucketExistV2(bucketName)) { logger.info("the current regionName is {}", s3Client.getRegionName()); s3Client.createBucket(bucketName); } } /* only delete the object of directory ,it`s better to delete the files in it -r */ private void deleteDirectory(String directoryName) { if (s3Client.doesObjectExist(BUCKET_NAME, directoryName)) { s3Client.deleteObject(BUCKET_NAME, directoryName); } } @Override public ResUploadType returnStorageType() { return ResUploadType.S3; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.processor; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.CommonUtils;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContext; import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.plugin.task.api.enums.ExecutionStatus; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand; import org.apache.dolphinscheduler.remote.processor.NettyRemoteChannel; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.server.utils.LogUtils; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread; import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.task.TaskPluginManager; import java.util.Date; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import com.google.common.base.Preconditions; import io.netty.channel.Channel; /** * worker request processor */ @Component
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
public class TaskExecuteProcessor implements NettyRequestProcessor { private static final Logger logger = LoggerFactory.getLogger(TaskExecuteProcessor.class); /** * worker config */ @Autowired private WorkerConfig workerConfig; /** * task callback service */ @Autowired private TaskCallbackService taskCallbackService; /** * alert client service */ @Autowired private AlertClientService alertClientService; @Autowired private TaskPluginManager taskPluginManager;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
/** * task execute manager */ @Autowired private WorkerManagerThread workerManager; @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.TASK_EXECUTE_REQUEST == command.getType(), String.format("invalid command type : %s", command.getType())); TaskExecuteRequestCommand taskRequestCommand = JSONUtils.parseObject( command.getBody(), TaskExecuteRequestCommand.class); if (taskRequestCommand == null) { logger.error("task execute request command is null"); return; } logger.info("task execute request command : {}", taskRequestCommand); String contextJson = taskRequestCommand.getTaskExecutionContext(); TaskExecutionContext taskExecutionContext = JSONUtils.parseObject(contextJson, TaskExecutionContext.class); if (taskExecutionContext == null) { logger.error("task execution context is null"); return; } TaskExecutionContextCacheManager.cacheTaskExecutionContext(taskExecutionContext); taskExecutionContext.setHost(NetUtils.getAddr(workerConfig.getListenPort())); taskExecutionContext.setLogPath(LogUtils.getTaskLogPath(taskExecutionContext)); if (Constants.DRY_RUN_FLAG_NO == taskExecutionContext.getDryRun()) { if (CommonUtils.isSudoEnable() && workerConfig.isTenantAutoCreate()) { OSUtils.createUserIfAbsent(taskExecutionContext.getTenantCode());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
} if (!OSUtils.getUserList().contains(taskExecutionContext.getTenantCode())) { logger.error("tenantCode: {} does not exist, taskInstanceId: {}", taskExecutionContext.getTenantCode(), taskExecutionContext.getTaskInstanceId()); TaskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId()); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.FAILURE); taskExecutionContext.setEndTime(new Date()); taskCallbackService.sendTaskExecuteResponseCommand(taskExecutionContext); return; } String execLocalPath = getExecLocalPath(taskExecutionContext); logger.info("task instance local execute path : {}", execLocalPath); taskExecutionContext.setExecutePath(execLocalPath); try { FileUtils.createWorkDirIfAbsent(execLocalPath); } catch (Throwable ex) { logger.error("create execLocalPath fail, path: {}, taskInstanceId: {}", execLocalPath, taskExecutionContext.getTaskInstanceId()); logger.error("create executeLocalPath fail", ex); TaskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId()); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.FAILURE); taskCallbackService.sendTaskExecuteResponseCommand(taskExecutionContext); return; } } taskCallbackService.addRemoteChannel(taskExecutionContext.getTaskInstanceId(), new NettyRemoteChannel(channel, command.getOpaque())); long remainTime = DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getDelayTime() * 60L);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/processor/TaskExecuteProcessor.java
if (remainTime > 0) { logger.info("delay the execution of task instance {}, delay time: {} s", taskExecutionContext.getTaskInstanceId(), remainTime); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.DELAY_EXECUTION); taskExecutionContext.setStartTime(null); taskCallbackService.sendTaskExecuteDelayCommand(taskExecutionContext); } boolean offer = workerManager.offer(new TaskExecuteThread(taskExecutionContext, taskCallbackService, alertClientService, taskPluginManager)); if (!offer) { logger.error("submit task to manager error, queue is full, queue size is {}, taskInstanceId: {}", workerManager.getDelayQueueSize(), taskExecutionContext.getTaskInstanceId()); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.FAILURE); taskCallbackService.sendTaskExecuteResponseCommand(taskExecutionContext); } } /** * get execute local path * * @param taskExecutionContext taskExecutionContext * @return execute local path */ private String getExecLocalPath(TaskExecutionContext taskExecutionContext) { return FileUtils.getProcessExecDir(taskExecutionContext.getProjectCode(), taskExecutionContext.getProcessDefineCode(), taskExecutionContext.getProcessDefineVersion(), taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.runner; import static org.apache.dolphinscheduler.common.Constants.SINGLE_SLASH; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.storage.StorageOperate; import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.plugin.task.api.AbstractTask;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
import org.apache.dolphinscheduler.plugin.task.api.TaskChannel; import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContext; import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.plugin.task.api.enums.ExecutionStatus; import org.apache.dolphinscheduler.plugin.task.api.model.Property; import org.apache.dolphinscheduler.plugin.task.api.model.TaskAlertInfo; import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.exceptions.ServiceException; import org.apache.dolphinscheduler.service.task.TaskPluginManager; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; import java.io.File; import java.io.IOException; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * task scheduler thread */ public class TaskExecuteThread implements Runnable, Delayed { /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
* logger */ private final Logger logger = LoggerFactory.getLogger(TaskExecuteThread.class); /** * task instance */ private TaskExecutionContext taskExecutionContext; public StorageOperate getStorageOperate() { return storageOperate; } public void setStorageOperate(StorageOperate storageOperate) { this.storageOperate = storageOperate; } private StorageOperate storageOperate; /** * abstract task */ private AbstractTask task; /** * task callback service */ private TaskCallbackService taskCallbackService; /** * alert client server */ private AlertClientService alertClientService; private TaskPluginManager taskPluginManager; /** * constructor *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
* @param taskExecutionContext taskExecutionContext * @param taskCallbackService taskCallbackService */ public TaskExecuteThread(TaskExecutionContext taskExecutionContext, TaskCallbackService taskCallbackService, AlertClientService alertClientService) { this.taskExecutionContext = taskExecutionContext; this.taskCallbackService = taskCallbackService; this.alertClientService = alertClientService; } public TaskExecuteThread(TaskExecutionContext taskExecutionContext, TaskCallbackService taskCallbackService, AlertClientService alertClientService, TaskPluginManager taskPluginManager) { this.taskExecutionContext = taskExecutionContext; this.taskCallbackService = taskCallbackService; this.alertClientService = alertClientService; this.taskPluginManager = taskPluginManager; } @Override public void run() { if (Constants.DRY_RUN_FLAG_YES == taskExecutionContext.getDryRun()) { taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.SUCCESS); taskExecutionContext.setStartTime(new Date()); taskExecutionContext.setEndTime(new Date()); TaskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId()); taskCallbackService.sendTaskExecuteResponseCommand(taskExecutionContext); return; } try {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
logger.info("script path : {}", taskExecutionContext.getExecutePath()); if (taskExecutionContext.getStartTime() == null) { taskExecutionContext.setStartTime(new Date()); } logger.info("the task begins to execute. task instance id: {}", taskExecutionContext.getTaskInstanceId()); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); taskCallbackService.sendTaskExecuteRunningCommand(taskExecutionContext); downloadResource(taskExecutionContext.getExecutePath(), taskExecutionContext.getResources(), logger); taskExecutionContext.setEnvFile(CommonUtils.getSystemEnvPath()); taskExecutionContext.setDefinedParams(getGlobalParamsMap()); taskExecutionContext.setTaskAppId(String.format("%s_%s", taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId())); preBuildBusinessParams(); TaskChannel taskChannel = taskPluginManager.getTaskChannelMap().get(taskExecutionContext.getTaskType()); if (null == taskChannel) { throw new ServiceException(String.format("%s Task Plugin Not Found,Please Check Config File.", taskExecutionContext.getTaskType())); } String taskLogName = LoggerUtils.buildTaskId(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getProcessDefineCode(), taskExecutionContext.getProcessDefineVersion(), taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId()); taskExecutionContext.setTaskLogName(taskLogName); Thread.currentThread().setName(taskLogName); task = taskChannel.createTask(taskExecutionContext);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
this.task.init(); this.task.getParameters().setVarPool(taskExecutionContext.getVarPool()); this.task.handle(); if (this.task.getNeedAlert()) { sendAlert(this.task.getTaskAlertInfo(), this.task.getExitStatus().getCode()); } taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.of(this.task.getExitStatus().getCode())); taskExecutionContext.setEndTime(DateUtils.getCurrentDate()); taskExecutionContext.setProcessId(this.task.getProcessId()); taskExecutionContext.setAppIds(this.task.getAppIds()); taskExecutionContext.setVarPool(JSONUtils.toJsonString(this.task.getParameters().getVarPool())); logger.info("task instance id : {},task final status : {}", taskExecutionContext.getTaskInstanceId(), this.task.getExitStatus()); } catch (Throwable e) { logger.error("task scheduler failure", e); kill(); taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.FAILURE); taskExecutionContext.setEndTime(DateUtils.getCurrentDate()); taskExecutionContext.setProcessId(this.task.getProcessId()); taskExecutionContext.setAppIds(this.task.getAppIds()); } finally { TaskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId()); taskCallbackService.sendTaskExecuteResponseCommand(taskExecutionContext); clearTaskExecPath(); } } private void sendAlert(TaskAlertInfo taskAlertInfo, int status) { int strategy = status == ExecutionStatus.SUCCESS.getCode() ? WarningType.SUCCESS.getCode() : WarningType.FAILURE.getCode();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
alertClientService.sendAlert(taskAlertInfo.getAlertGroupId(), taskAlertInfo.getTitle(), taskAlertInfo.getContent(), strategy); } /** * when task finish, clear execute path. */ private void clearTaskExecPath() { logger.info("develop mode is: {}", CommonUtils.isDevelopMode()); if (!CommonUtils.isDevelopMode()) { String execLocalPath = taskExecutionContext.getExecutePath(); if (StringUtils.isEmpty(execLocalPath)) { logger.warn("task: {} exec local path is empty.", taskExecutionContext.getTaskName()); return; } if (SINGLE_SLASH.equals(execLocalPath)) { logger.warn("task: {} exec local path is '/', direct deletion is not allowed", taskExecutionContext.getTaskName()); return; } try { org.apache.commons.io.FileUtils.deleteDirectory(new File(execLocalPath)); logger.info("exec local path: {} cleared.", execLocalPath); } catch (IOException e) { logger.error("delete exec dir failed : {}", e.getMessage(), e); } } } /** * get global paras map * * @return map
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
*/ private Map<String, String> getGlobalParamsMap() { Map<String, String> globalParamsMap = new HashMap<>(16); String globalParamsStr = taskExecutionContext.getGlobalParams(); if (globalParamsStr != null) { List<Property> globalParamsList = JSONUtils.toList(globalParamsStr, Property.class); globalParamsMap.putAll(globalParamsList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue))); } return globalParamsMap; } /** * kill task */ public void kill() { if (task != null) { try { task.cancelApplication(true); ProcessUtils.killYarnJob(taskExecutionContext); } catch (Exception e) { logger.error(e.getMessage(), e); } } } /** * download resource file * * @param execLocalPath execLocalPath * @param projectRes projectRes * @param logger logger
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
*/ private void downloadResource(String execLocalPath, Map<String, String> projectRes, Logger logger) { if (MapUtils.isEmpty(projectRes)) { return; } Set<Map.Entry<String, String>> resEntries = projectRes.entrySet(); for (Map.Entry<String, String> resource : resEntries) { String fullName = resource.getKey(); String tenantCode = resource.getValue(); File resFile = new File(execLocalPath, fullName); if (!resFile.exists()) { try { String resHdfsPath = storageOperate.getResourceFileName(tenantCode, fullName); logger.info("get resource file from hdfs :{}", resHdfsPath); storageOperate.download(tenantCode, resHdfsPath, execLocalPath + File.separator + fullName, false, true); } catch (Exception e) { logger.error(e.getMessage(), e); throw new ServiceException(e.getMessage()); } } else { logger.info("file : {} exists ", resFile.getName()); } } } /** * get current TaskExecutionContext * * @return TaskExecutionContext */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,593
[BUG-BD] Files from the Resource Center are not working properly in the workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the Resource Center's File Manager, create a shell file. The shell node is then created in the workflow to recall the created file and run it to failure. ### What you expected to happen Files from the Resource Center can be used normally in the workflow. ### How to reproduce 1. create a shell file in the file manage. 2. create a shell task in the workflow. 3. the resource in the task configuration selects the created shell file. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9593
https://github.com/apache/dolphinscheduler/pull/9594
930d12031aa1b7ad7ce141fd97be906dccad53aa
9964c4c1e18d7abf9b116fdb8b3deef49156c0fd
2022-04-19T12:04:46Z
java
2022-04-20T01:58:37Z
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
public TaskExecutionContext getTaskExecutionContext() { return this.taskExecutionContext; } @Override public long getDelay(TimeUnit unit) { return unit.convert(DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getDelayTime() * 60L), TimeUnit.SECONDS); } @Override public int compareTo(Delayed o) { if (o == null) { return 1; } return Long.compare(this.getDelay(TimeUnit.MILLISECONDS), o.getDelay(TimeUnit.MILLISECONDS)); } private void preBuildBusinessParams() { Map<String, Property> paramsMap = new HashMap<>(); if (taskExecutionContext.getScheduleTime() != null) { Date date = taskExecutionContext.getScheduleTime(); String dateTime = DateUtils.format(date, Constants.PARAMETER_FORMAT_TIME, null); Property p = new Property(); p.setValue(dateTime); p.setProp(Constants.PARAMETER_DATETIME); paramsMap.put(Constants.PARAMETER_DATETIME, p); } taskExecutionContext.setParamsMap(paramsMap); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODES; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_PARAMS; import static org.apache.dolphinscheduler.common.Constants.MAX_TASK_TIMEOUT; import org.apache.dolphinscheduler.api.enums.ExecuteType; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ExecutorService; import org.apache.dolphinscheduler.api.service.MonitorService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.ComplementDependentMode; import org.apache.dolphinscheduler.common.enums.CycleEnum; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
import org.apache.dolphinscheduler.common.enums.RunMode; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskGroupQueueStatus; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.*; import org.apache.dolphinscheduler.dao.mapper.*; import org.apache.dolphinscheduler.plugin.task.api.TaskConstants; import org.apache.dolphinscheduler.plugin.task.api.enums.ExecutionStatus; import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.StateEventCallbackService; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.commons.beanutils.BeanUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; import java.util.*; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.fasterxml.jackson.core.type.TypeReference; /** * executor service impl */ @Service
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
public class ExecutorServiceImpl extends BaseServiceImpl implements ExecutorService { private static final Logger logger = LoggerFactory.getLogger(ExecutorServiceImpl.class); @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private MonitorService monitorService; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private ProcessService processService; @Autowired StateEventCallbackService stateEventCallbackService; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; /** * execute process instance * * @param loginUser login user * @param projectCode project code * @param processDefinitionCode process definition code * @param cronTime cron time * @param commandType command type
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* @param failureStrategy failure strategy * @param startNodeList start nodelist * @param taskDependType node dependency type * @param warningType warning type * @param warningGroupId notify group id * @param processInstancePriority process instance priority * @param workerGroup worker group name * @param environmentCode environment code * @param runMode run mode * @param timeout timeout * @param startParams the global param values which pass to new process instance * @param expectedParallelismNumber the expected parallelism number when execute complement in parallel mode * @return execute process instance code */ @Override public Map<String, Object> execProcessInstance(User loginUser, long projectCode, long processDefinitionCode, String cronTime, CommandType commandType, FailureStrategy failureStrategy, String startNodeList, TaskDependType taskDependType, WarningType warningType, int warningGroupId, RunMode runMode, Priority processInstancePriority, String workerGroup, Long environmentCode,Integer timeout, Map<String, String> startParams, Integer expectedParallelismNumber, int dryRun, ComplementDependentMode complementDependentMode) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) { putMsg(result, Status.TASK_TIMEOUT_PARAMS_ERROR); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode); result = checkProcessDefinitionValid(projectCode, processDefinition, processDefinitionCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (!checkTenantSuitable(processDefinition)) { logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ", processDefinition.getId(), processDefinition.getName()); putMsg(result, Status.TENANT_NOT_SUITABLE); return result; } if (!checkMasterExists(result)) { return result; } /** * create command */ int create = this.createCommand(commandType, processDefinition.getCode(), taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(), warningGroupId, runMode, processInstancePriority, workerGroup, environmentCode, startParams, expectedParallelismNumber, dryRun, complementDependentMode); if (create > 0) { processDefinition.setWarningGroupId(warningGroupId);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
processDefinitionMapper.updateById(processDefinition); putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.START_PROCESS_INSTANCE_ERROR); } return result; } /** * check whether master exists * * @param result result * @return master exists return true , otherwise return false */ private boolean checkMasterExists(Map<String, Object> result) { List<Server> masterServers = monitorService.getServerListFromRegistry(true); if (masterServers.isEmpty()) { putMsg(result, Status.MASTER_NOT_EXISTS); return false; } return true; } /** * check whether the process definition can be executed * * @param projectCode project code * @param processDefinition process definition * @param processDefineCode process definition code * @return check result code
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
*/ @Override public Map<String, Object> checkProcessDefinitionValid(long projectCode, ProcessDefinition processDefinition, long processDefineCode) { Map<String, Object> result = new HashMap<>(); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineCode); } else if (processDefinition.getReleaseState() != ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefineCode); } else if (!checkSubProcessDefinitionValid(processDefinition)){ putMsg(result, Status.SUB_PROCESS_DEFINE_NOT_RELEASE); } else { result.put(Constants.STATUS, Status.SUCCESS); } return result; } /** * check if the current process has subprocesses and all subprocesses are valid * @param processDefinition * @return check result */ @Override public boolean checkSubProcessDefinitionValid(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryDownstreamByProcessDefinitionCode(processDefinition.getCode()); if (processTaskRelations.isEmpty()){ return true; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
Set<Long> relationCodes = processTaskRelations.stream().map(ProcessTaskRelation::getPostTaskCode).collect(Collectors.toSet()); List<TaskDefinition> taskDefinitions = taskDefinitionMapper.queryByCodeList(relationCodes); Set<Long> processDefinitionCodeSet = new HashSet<>(); taskDefinitions.stream() .filter(task -> TaskConstants.TASK_TYPE_SUB_PROCESS.equalsIgnoreCase(task.getTaskType())) .forEach(taskDefinition -> processDefinitionCodeSet.add(Long.valueOf(JSONUtils.getNodeString(taskDefinition.getTaskParams(), Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE)))); List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryByCodes(processDefinitionCodeSet); return processDefinitions.stream().filter(definition -> definition.getReleaseState().equals(ReleaseState.OFFLINE)).collect(Collectors.toSet()).isEmpty(); } /** * do action to process instance:pause, stop, repeat, recover from pause, recover from stop * * @param loginUser login user * @param projectCode project code * @param processInstanceId process instance id * @param executeType execute type * @return execute result code */ @Override public Map<String, Object> execute(User loginUser, long projectCode, Integer processInstanceId, ExecuteType executeType) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (!checkMasterExists(result)) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
return result; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId); return result; } ProcessDefinition processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE) { result = checkProcessDefinitionValid(projectCode, processDefinition, processInstance.getProcessDefinitionCode()); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } } result = checkExecuteType(processInstance, executeType); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (!checkTenantSuitable(processDefinition)) { logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ", processDefinition.getId(), processDefinition.getName()); putMsg(result, Status.TENANT_NOT_SUITABLE); } // Map<String, Object> commandMap = JSONUtils.parseObject(processInstance.getCommandParam(), new TypeReference<Map<String, Object>>() {}); String startParams = null; if (MapUtils.isNotEmpty(commandMap) && executeType == ExecuteType.REPEAT_RUNNING) { Object startParamsJson = commandMap.get(Constants.CMD_PARAM_START_PARAMS); if (startParamsJson != null) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
startParams = startParamsJson.toString(); } } switch (executeType) { case REPEAT_RUNNING: result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), processDefinition.getVersion(), CommandType.REPEAT_RUNNING, startParams); break; case RECOVER_SUSPENDED_PROCESS: result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), processDefinition.getVersion(), CommandType.RECOVER_SUSPENDED_PROCESS, startParams); break; case START_FAILURE_TASK_PROCESS: result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), processDefinition.getVersion(), CommandType.START_FAILURE_TASK_PROCESS, startParams); break; case STOP: if (processInstance.getState() == ExecutionStatus.READY_STOP) { putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState()); } else { result = updateProcessInstancePrepare(processInstance, CommandType.STOP, ExecutionStatus.READY_STOP); } break; case PAUSE: if (processInstance.getState() == ExecutionStatus.READY_PAUSE) { putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState()); } else { result = updateProcessInstancePrepare(processInstance, CommandType.PAUSE, ExecutionStatus.READY_PAUSE); } break; default: logger.error("unknown execute type : {}", executeType); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
break; } return result; } /** * check tenant suitable * * @param processDefinition process definition * @return true if tenant suitable, otherwise return false */ private boolean checkTenantSuitable(ProcessDefinition processDefinition) { Tenant tenant = processService.getTenantForProcess(processDefinition.getTenantId(), processDefinition.getUserId()); return tenant != null; } /** * Check the state of process instance and the type of operation match * * @param processInstance process instance * @param executeType execute type * @return check result code */ private Map<String, Object> checkExecuteType(ProcessInstance processInstance, ExecuteType executeType) { Map<String, Object> result = new HashMap<>(); ExecutionStatus executionStatus = processInstance.getState(); boolean checkResult = false; switch (executeType) { case PAUSE: case STOP: if (executionStatus.typeIsRunning()) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
checkResult = true; } break; case REPEAT_RUNNING: if (executionStatus.typeIsFinished()) { checkResult = true; } break; case START_FAILURE_TASK_PROCESS: if (executionStatus.typeIsFailure()) { checkResult = true; } break; case RECOVER_SUSPENDED_PROCESS: if (executionStatus.typeIsPause() || executionStatus.typeIsCancel()) { checkResult = true; } break; default: break; } if (!checkResult) { putMsg(result, Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), executionStatus.toString(), executeType.toString()); } else { putMsg(result, Status.SUCCESS); } return result; } /** * prepare to update process instance command type and status
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* * @param processInstance process instance * @param commandType command type * @param executionStatus execute status * @return update result */ private Map<String, Object> updateProcessInstancePrepare(ProcessInstance processInstance, CommandType commandType, ExecutionStatus executionStatus) { Map<String, Object> result = new HashMap<>(); processInstance.setCommandType(commandType); processInstance.addHistoryCmd(commandType); processInstance.setState(executionStatus); int update = processService.updateProcessInstance(processInstance); // if (update > 0) { String host = processInstance.getHost(); String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); StateEventChangeCommand stateEventChangeCommand = new StateEventChangeCommand( processInstance.getId(), 0, processInstance.getState(), processInstance.getId(), 0 ); stateEventCallbackService.sendResult(address, port, stateEventChangeCommand.convert2Command()); putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR); } return result; } /** * prepare to update process instance command type and status *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* @param processInstance process instance * @return update result */ private Map<String, Object> forceStartTaskInstance(ProcessInstance processInstance, int taskId) { Map<String, Object> result = new HashMap<>(); TaskGroupQueue taskGroupQueue = processService.loadTaskGroupQueue(taskId); if (taskGroupQueue.getStatus() != TaskGroupQueueStatus.WAIT_QUEUE) { putMsg(result, Status.TASK_GROUP_QUEUE_ALREADY_START); return result; } taskGroupQueue.setForceStart(Flag.YES.getCode()); processService.updateTaskGroupQueue(taskGroupQueue); processService.sendStartTask2Master(processInstance,taskId ,org.apache.dolphinscheduler.remote.command.CommandType.TASK_FORCE_STATE_EVENT_REQUEST); putMsg(result, Status.SUCCESS); return result; } /** * insert command, used in the implementation of the page, re run, recovery (pause / failure) execution * * @param loginUser login user * @param instanceId instance id * @param processDefinitionCode process definition code * @param processVersion * @param commandType command type * @return insert result code */ private Map<String, Object> insertCommand(User loginUser, Integer instanceId, long processDefinitionCode, int processVersion, CommandType commandType, String startParams) { Map<String, Object> result = new HashMap<>(); //
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
Map<String, Object> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, instanceId); if (!StringUtils.isEmpty(startParams)) { cmdParam.put(CMD_PARAM_START_PARAMS, startParams); } Command command = new Command(); command.setCommandType(commandType); command.setProcessDefinitionCode(processDefinitionCode); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); command.setExecutorId(loginUser.getId()); command.setProcessDefinitionVersion(processVersion); command.setProcessInstanceId(instanceId); if (!processService.verifyIsNeedCreateCommand(command)) { putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND, processDefinitionCode); return result; } int create = processService.createCommand(command); if (create > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR); } return result; } /** * check if sub processes are offline before starting process definition * * @param processDefinitionCode process definition code * @return check result code */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
@Override public Map<String, Object> startCheckByProcessDefinedCode(long processDefinitionCode) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode); if (processDefinition == null) { logger.error("process definition is not found"); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "processDefinitionCode"); return result; } List<Long> codes = new ArrayList<>(); processService.recurseFindSubProcess(processDefinition.getCode(), codes); if (!codes.isEmpty()) { List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(codes); if (processDefinitionList != null) { for (ProcessDefinition processDefinitionTmp : processDefinitionList) { /** * if there is no online process, exit directly */ if (processDefinitionTmp.getReleaseState() != ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinitionTmp.getName()); logger.info("not release process definition id: {} , name : {}", processDefinitionTmp.getId(), processDefinitionTmp.getName()); return result; } } } } putMsg(result, Status.SUCCESS); return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
/** * create command * * @param commandType commandType * @param processDefineCode processDefineCode * @param nodeDep nodeDep * @param failureStrategy failureStrategy * @param startNodeList startNodeList * @param schedule schedule * @param warningType warningType * @param executorId executorId * @param warningGroupId warningGroupId * @param runMode runMode * @param processInstancePriority processInstancePriority * @param workerGroup workerGroup * @param environmentCode environmentCode * @return command id */ private int createCommand(CommandType commandType, long processDefineCode, TaskDependType nodeDep, FailureStrategy failureStrategy, String startNodeList, String schedule, WarningType warningType, int executorId, int warningGroupId, RunMode runMode, Priority processInstancePriority, String workerGroup, Long environmentCode, Map<String, String> startParams, Integer expectedParallelismNumber, int dryRun, ComplementDependentMode complementDependentMode) { /** * instantiate command schedule instance */ Command command = new Command(); Map<String, String> cmdParam = new HashMap<>(); if (commandType == null) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
command.setCommandType(CommandType.START_PROCESS); } else { command.setCommandType(commandType); } command.setProcessDefinitionCode(processDefineCode); if (nodeDep != null) { command.setTaskDependType(nodeDep); } if (failureStrategy != null) { command.setFailureStrategy(failureStrategy); } if (!StringUtils.isEmpty(startNodeList)) { cmdParam.put(CMD_PARAM_START_NODES, startNodeList); } if (warningType != null) { command.setWarningType(warningType); } if (startParams != null && startParams.size() > 0) { cmdParam.put(CMD_PARAM_START_PARAMS, JSONUtils.toJsonString(startParams)); } command.setCommandParam(JSONUtils.toJsonString(cmdParam)); command.setExecutorId(executorId); command.setWarningGroupId(warningGroupId); command.setProcessInstancePriority(processInstancePriority); command.setWorkerGroup(workerGroup); command.setEnvironmentCode(environmentCode); command.setDryRun(dryRun); ProcessDefinition processDefinition = processService.findProcessDefinitionByCode(processDefineCode); if (processDefinition != null) { command.setProcessDefinitionVersion(processDefinition.getVersion());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
} command.setProcessInstanceId(0); Date start = null; Date end = null; if (!StringUtils.isEmpty(schedule)) { String[] interval = schedule.split(","); if (interval.length == 2) { start = DateUtils.getScheduleDate(interval[0]); end = DateUtils.getScheduleDate(interval[1]); if (start.after(end)) { logger.info("complement data error, wrong date start:{} and end date:{} ", start, end ); return 0; } } } // if (commandType == CommandType.COMPLEMENT_DATA) { if (start == null || end == null) { return 0; } return createComplementCommandList(start, end, runMode, command, expectedParallelismNumber, complementDependentMode); } else { command.setCommandParam(JSONUtils.toJsonString(cmdParam)); return processService.createCommand(command); } } /** * create complement command
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,610
[Bug] Sub-workflow status check beyond what should be checked
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If there is no subtask flow, it will prompt that the status of subtasks is offline ### What you expected to happen normal execution ### How to reproduce Create a task that is not a subtask flow component and run it ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9610
https://github.com/apache/dolphinscheduler/pull/9611
a3bf10c88d63a27d0682834f8a81c0dc352f4907
e2ec489042d59e4f63c181345da0da3d23f2a642
2022-04-20T06:26:40Z
java
2022-04-20T07:04:27Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* close left and close right * * @param start * @param end * @param runMode * @return */ protected int createComplementCommandList(Date start, Date end, RunMode runMode, Command command, Integer expectedParallelismNumber, ComplementDependentMode complementDependentMode) { int createCount = 0; int dependentProcessDefinitionCreateCount = 0; runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode; Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); switch (runMode) { case RUN_MODE_SERIAL: { if (start.after(end)) { logger.warn("The startDate {} is later than the endDate {}", start, end); break; } cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start)); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end)); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); createCount = processService.createCommand(command); // List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(command.getProcessDefinitionCode()); if (schedules.isEmpty() || complementDependentMode == ComplementDependentMode.OFF_MODE) { logger.info("process code: {} complement dependent in off mode or schedule's size is 0, skip " + "dependent complement data", command.getProcessDefinitionCode()); } else { dependentProcessDefinitionCreateCount += createComplementDependentCommand(schedules, command);