status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
233
| body
stringlengths 0
186k
⌀ | issue_url
stringlengths 38
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
timestamp[us, tz=UTC] | language
stringclasses 5
values | commit_datetime
timestamp[us, tz=UTC] | updated_file
stringlengths 7
188
| chunk_content
stringlengths 1
1.03M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
|
dependentProcessDefinition.setWorkerGroup(workerGroup);
}
validDependentProcessDefinitionList.add(dependentProcessDefinition);
}
}
return validDependentProcessDefinitionList;
}
/**
* @param schedule
* @return check error return 0, otherwise 1
*/
private boolean isValidateScheduleTime(String schedule) {
Map<String, String> scheduleResult = JSONUtils.toMap(schedule);
if (scheduleResult == null) {
return false;
}
if (scheduleResult.containsKey(CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST)) {
if (scheduleResult.get(CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST) == null) {
return false;
}
}
if (scheduleResult.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
String startDate = scheduleResult.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
String endDate = scheduleResult.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
if (startDate == null || endDate == null) {
return false;
}
try {
ZonedDateTime start = DateUtils.stringToZoneDateTime(startDate);
ZonedDateTime end = DateUtils.stringToZoneDateTime(endDate);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
|
if (start == null || end == null) {
return false;
}
if (start.isAfter(end)) {
logger.error("complement data error, wrong date start:{} and end date:{} ", start, end);
return false;
}
} catch (Exception ex) {
logger.warn("Parse schedule time error, startDate: {}, endDate: {}", startDate, endDate);
return false;
}
}
return true;
}
/**
* @param scheduleTimeList
* @return remove duplicate date list
*/
private String removeDuplicates(String scheduleTimeList) {
if (StringUtils.isNotEmpty(scheduleTimeList)) {
Set<String> dateSet =
Arrays.stream(scheduleTimeList.split(COMMA)).map(String::trim).collect(Collectors.toSet());
return String.join(COMMA, dateSet);
}
return null;
}
/**
* query executing data of processInstance by master
* @param processInstanceId
* @return
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
|
*/
@Override
public WorkflowExecuteDto queryExecutingWorkflowByProcessInstanceId(Integer processInstanceId) {
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
return null;
}
Host host = new Host(processInstance.getHost());
WorkflowExecutingDataRequestCommand requestCommand = new WorkflowExecutingDataRequestCommand();
requestCommand.setProcessInstanceId(processInstanceId);
org.apache.dolphinscheduler.remote.command.Command command =
stateEventCallbackService.sendSync(host, requestCommand.convert2Command());
if (command == null) {
return null;
}
WorkflowExecutingDataResponseCommand responseCommand =
JSONUtils.parseObject(command.getBody(), WorkflowExecutingDataResponseCommand.class);
return responseCommand.getWorkflowExecuteDto();
}
@Override
public Map<String, Object> execStreamTaskInstance(User loginUser, long projectCode, long taskDefinitionCode,
int taskDefinitionVersion,
int warningGroupId, String workerGroup, Long environmentCode,
Map<String, String> startParams, int dryRun) {
Project project = projectMapper.queryByCode(projectCode);
Map<String, Object> result =
projectService.checkProjectAndAuth(loginUser, project, projectCode, WORKFLOW_START);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
|
}
if (!checkMasterExists(result)) {
return result;
}
// t
List<Server> masterServerList = monitorService.getServerListFromRegistry(true);
Host host = new Host(masterServerList.get(0).getHost(), masterServerList.get(0).getPort());
TaskExecuteStartCommand taskExecuteStartCommand = new TaskExecuteStartCommand();
taskExecuteStartCommand.setExecutorId(loginUser.getId());
taskExecuteStartCommand.setExecutorName(loginUser.getUserName());
taskExecuteStartCommand.setProjectCode(projectCode);
taskExecuteStartCommand.setTaskDefinitionCode(taskDefinitionCode);
taskExecuteStartCommand.setTaskDefinitionVersion(taskDefinitionVersion);
taskExecuteStartCommand.setWorkerGroup(workerGroup);
taskExecuteStartCommand.setWarningGroupId(warningGroupId);
taskExecuteStartCommand.setEnvironmentCode(environmentCode);
taskExecuteStartCommand.setStartParams(startParams);
taskExecuteStartCommand.setDryRun(dryRun);
org.apache.dolphinscheduler.remote.command.Command response =
stateEventCallbackService.sendSync(host, taskExecuteStartCommand.convert2Command());
if (response != null) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.START_TASK_INSTANCE_ERROR);
}
return result;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/DependentProcessDefinition.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.entity;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CycleEnum;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.plugin.task.api.model.DependentItem;
import org.apache.dolphinscheduler.plugin.task.api.model.DependentTaskModel;
import org.apache.dolphinscheduler.plugin.task.api.parameters.DependentParameters;
import java.util.List;
/**
* dependent process definition
*/
public class DependentProcessDefinition {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/DependentProcessDefinition.java
|
/**
* process definition code
*/
private long processDefinitionCode;
/**
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/DependentProcessDefinition.java
|
* process definition name
*/
private String processDefinitionName;
/**
* task definition name
*/
private long taskDefinitionCode;
/**
* task definition params
*/
private String taskParams;
/**
* schedule worker group
*/
private String workerGroup;
/**
* get dependent cycle
* @return CycleEnum
*/
public CycleEnum getDependentCycle() {
DependentParameters dependentParameters = this.getDependentParameters();
List<DependentTaskModel> dependentTaskModelList = dependentParameters.getDependTaskList();
for (DependentTaskModel dependentTaskModel : dependentTaskModelList) {
List<DependentItem> dependentItemList = dependentTaskModel.getDependItemList();
for (DependentItem dependentItem : dependentItemList) {
if (this.getProcessDefinitionCode() == dependentItem.getDefinitionCode()) {
return cycle2CycleEnum(dependentItem.getCycle());
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/DependentProcessDefinition.java
|
return CycleEnum.DAY;
}
public CycleEnum cycle2CycleEnum(String cycle) {
CycleEnum cycleEnum = null;
switch (cycle) {
case "day":
cycleEnum = CycleEnum.DAY;
break;
case "hour":
cycleEnum = CycleEnum.HOUR;
break;
case "week":
cycleEnum = CycleEnum.WEEK;
break;
case "month":
cycleEnum = CycleEnum.MONTH;
break;
default:
break;
}
return cycleEnum;
}
public DependentParameters getDependentParameters() {
return JSONUtils.parseObject(getDependence(), DependentParameters.class);
}
public String getDependence() {
return JSONUtils.getNodeString(this.taskParams, Constants.DEPENDENCE);
}
public String getProcessDefinitionName() {
return this.processDefinitionName;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,709 |
[Bug] [Dependent] Dependent triger error version downstream
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
The historical version of the workflow is triggered downstream when dependency mode complement is enabled upstream
### What you expected to happen
Use the version of the day of the run.
### How to reproduce




### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11709
|
https://github.com/apache/dolphinscheduler/pull/11734
|
2e61c76c225ccfd39a7860fb6fcd6653ef86b9a7
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
| 2022-08-31T07:15:08Z |
java
| 2022-09-08T07:08:10Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/DependentProcessDefinition.java
|
}
public void setProcessDefinitionName(String name) {
this.processDefinitionName = name;
}
public long getProcessDefinitionCode() {
return this.processDefinitionCode;
}
public void setProcessDefinitionCode(long code) {
this.processDefinitionCode = code;
}
public long getTaskDefinitionCode() {
return this.taskDefinitionCode;
}
public void setTaskDefinitionCode(long code) {
this.taskDefinitionCode = code;
}
public String getTaskParams() {
return this.taskParams;
}
public void setTaskParams(String taskParams) {
this.taskParams = taskParams;
}
public String getWorkerGroup() {
return this.workerGroup;
}
public void setWorkerGroup(String workerGroup) {
this.workerGroup = workerGroup;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,815 |
[Bug] [Datax] error when ck is dtType and column names contain special characters
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
<img width="1285" alt="image" src="https://user-images.githubusercontent.com/33984497/188775144-256ee91d-1a17-41be-b9c2-460454bf40e8.png">
<img width="1358" alt="image" src="https://user-images.githubusercontent.com/33984497/188775208-53077d96-356e-4032-bc9c-b0581320a14d.png">
### What you expected to happen
can explain special characters
### How to reproduce
fix the explain logic in ds
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11815
|
https://github.com/apache/dolphinscheduler/pull/11818
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
| 2022-09-07T02:33:35Z |
java
| 2022-09-08T07:09:17Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.datax;
import org.apache.dolphinscheduler.spi.enums.DbType;
import com.alibaba.druid.sql.dialect.clickhouse.parser.ClickhouseStatementParser;
import com.alibaba.druid.sql.dialect.hive.parser.HiveStatementParser;
import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
import com.alibaba.druid.sql.dialect.oracle.parser.OracleStatementParser;
import com.alibaba.druid.sql.dialect.postgresql.parser.PGSQLStatementParser;
import com.alibaba.druid.sql.dialect.sqlserver.parser.SQLServerStatementParser;
import com.alibaba.druid.sql.parser.SQLStatementParser;
public class DataxUtils {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,815 |
[Bug] [Datax] error when ck is dtType and column names contain special characters
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
<img width="1285" alt="image" src="https://user-images.githubusercontent.com/33984497/188775144-256ee91d-1a17-41be-b9c2-460454bf40e8.png">
<img width="1358" alt="image" src="https://user-images.githubusercontent.com/33984497/188775208-53077d96-356e-4032-bc9c-b0581320a14d.png">
### What you expected to happen
can explain special characters
### How to reproduce
fix the explain logic in ds
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11815
|
https://github.com/apache/dolphinscheduler/pull/11818
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
| 2022-09-07T02:33:35Z |
java
| 2022-09-08T07:09:17Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxUtils.java
|
public static final String DATAX_READER_PLUGIN_MYSQL = "mysqlreader";
public static final String DATAX_READER_PLUGIN_POSTGRESQL = "postgresqlreader";
public static final String DATAX_READER_PLUGIN_ORACLE = "oraclereader";
public static final String DATAX_READER_PLUGIN_SQLSERVER = "sqlserverreader";
public static final String DATAX_READER_PLUGIN_CLICKHOUSE = "clickhousereader";
public static final String DATAX_READER_PLUGIN_HIVE = "rdbmsreader";
public static final String DATAX_WRITER_PLUGIN_MYSQL = "mysqlwriter";
public static final String DATAX_WRITER_PLUGIN_POSTGRESQL = "postgresqlwriter";
public static final String DATAX_WRITER_PLUGIN_ORACLE = "oraclewriter";
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,815 |
[Bug] [Datax] error when ck is dtType and column names contain special characters
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
<img width="1285" alt="image" src="https://user-images.githubusercontent.com/33984497/188775144-256ee91d-1a17-41be-b9c2-460454bf40e8.png">
<img width="1358" alt="image" src="https://user-images.githubusercontent.com/33984497/188775208-53077d96-356e-4032-bc9c-b0581320a14d.png">
### What you expected to happen
can explain special characters
### How to reproduce
fix the explain logic in ds
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11815
|
https://github.com/apache/dolphinscheduler/pull/11818
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
| 2022-09-07T02:33:35Z |
java
| 2022-09-08T07:09:17Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxUtils.java
|
public static final String DATAX_WRITER_PLUGIN_SQLSERVER = "sqlserverwriter";
public static final String DATAX_WRITER_PLUGIN_CLICKHOUSE = "clickhousewriter";
public static final String DATAX_WRITER_PLUGIN_HIVE = "rdbmswriter";
public static String getReaderPluginName(DbType dbType) {
switch (dbType) {
case MYSQL:
return DATAX_READER_PLUGIN_MYSQL;
case POSTGRESQL:
return DATAX_READER_PLUGIN_POSTGRESQL;
case ORACLE:
return DATAX_READER_PLUGIN_ORACLE;
case SQLSERVER:
return DATAX_READER_PLUGIN_SQLSERVER;
case CLICKHOUSE:
return DATAX_READER_PLUGIN_CLICKHOUSE;
case HIVE:
return DATAX_READER_PLUGIN_HIVE;
default:
return null;
}
}
public static String getWriterPluginName(DbType dbType) {
switch (dbType) {
case MYSQL:
return DATAX_WRITER_PLUGIN_MYSQL;
case POSTGRESQL:
return DATAX_WRITER_PLUGIN_POSTGRESQL;
case ORACLE:
return DATAX_WRITER_PLUGIN_ORACLE;
case SQLSERVER:
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,815 |
[Bug] [Datax] error when ck is dtType and column names contain special characters
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
<img width="1285" alt="image" src="https://user-images.githubusercontent.com/33984497/188775144-256ee91d-1a17-41be-b9c2-460454bf40e8.png">
<img width="1358" alt="image" src="https://user-images.githubusercontent.com/33984497/188775208-53077d96-356e-4032-bc9c-b0581320a14d.png">
### What you expected to happen
can explain special characters
### How to reproduce
fix the explain logic in ds
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11815
|
https://github.com/apache/dolphinscheduler/pull/11818
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
| 2022-09-07T02:33:35Z |
java
| 2022-09-08T07:09:17Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxUtils.java
|
return DATAX_WRITER_PLUGIN_SQLSERVER;
case CLICKHOUSE:
return DATAX_WRITER_PLUGIN_CLICKHOUSE;
case HIVE:
return DATAX_WRITER_PLUGIN_HIVE;
default:
return null;
}
}
public static SQLStatementParser getSqlStatementParser(DbType dbType, String sql) {
switch (dbType) {
case MYSQL:
return new MySqlStatementParser(sql);
case POSTGRESQL:
return new PGSQLStatementParser(sql);
case ORACLE:
return new OracleStatementParser(sql);
case SQLSERVER:
return new SQLServerStatementParser(sql);
case CLICKHOUSE:
return new ClickhouseStatementParser(sql);
case HIVE:
return new HiveStatementParser(sql);
default:
return null;
}
}
public static String[] convertKeywordsColumns(DbType dbType, String[] columns) {
if (columns == null) {
return null;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,815 |
[Bug] [Datax] error when ck is dtType and column names contain special characters
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
<img width="1285" alt="image" src="https://user-images.githubusercontent.com/33984497/188775144-256ee91d-1a17-41be-b9c2-460454bf40e8.png">
<img width="1358" alt="image" src="https://user-images.githubusercontent.com/33984497/188775208-53077d96-356e-4032-bc9c-b0581320a14d.png">
### What you expected to happen
can explain special characters
### How to reproduce
fix the explain logic in ds
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11815
|
https://github.com/apache/dolphinscheduler/pull/11818
|
37325b4c3410c2fe2f025c16363ea0c3a157647e
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
| 2022-09-07T02:33:35Z |
java
| 2022-09-08T07:09:17Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxUtils.java
|
}
String[] toColumns = new String[columns.length];
for (int i = 0; i < columns.length; i++) {
toColumns[i] = doConvertKeywordsColumn(dbType, columns[i]);
}
return toColumns;
}
public static String doConvertKeywordsColumn(DbType dbType, String column) {
if (column == null) {
return column;
}
column = column.trim();
column = column.replace("`", "");
column = column.replace("\"", "");
column = column.replace("'", "");
switch (dbType) {
case MYSQL:
return String.format("`%s`", column);
case POSTGRESQL:
return String.format("\"%s\"", column);
case ORACLE:
return String.format("\"%s\"", column);
case SQLSERVER:
return String.format("`%s`", column);
default:
return column;
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/MonitorDBDao.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/MonitorDBDao.java
|
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao;
import org.apache.dolphinscheduler.dao.entity.MonitorRecord;
import org.apache.dolphinscheduler.dao.utils.MySQLPerformance;
import org.apache.dolphinscheduler.dao.utils.PostgreSQLPerformance;
import org.apache.dolphinscheduler.spi.enums.DbType;
import java.sql.Connection;
import java.sql.DriverManager;
import java.util.ArrayList;
import java.util.List;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
@Component
public class MonitorDBDao {
private static final Logger logger = LoggerFactory.getLogger(MonitorDBDao.class);
public static final String VARIABLE_NAME = "variable_name";
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/MonitorDBDao.java
|
@Autowired
private DataSource dataSource;
private MonitorRecord getCurrentDbPerformance() {
try (final Connection conn = dataSource.getConnection()) {
String driverClassName = DriverManager.getDriver(conn.getMetaData().getURL()).getClass().getName();
if (driverClassName.contains(DbType.MYSQL.toString().toLowerCase())) {
return new MySQLPerformance().getMonitorRecord(conn);
} else if (driverClassName.contains(DbType.POSTGRESQL.toString().toLowerCase())) {
return new PostgreSQLPerformance().getMonitorRecord(conn);
}
} catch (Exception e) {
logger.error("SQLException: {}", e.getMessage(), e);
}
return null;
}
/**
* query database state
*
* @return MonitorRecord list
*/
public List<MonitorRecord> queryDatabaseState() {
List<MonitorRecord> list = new ArrayList<>(1);
MonitorRecord monitorRecord = getCurrentDbPerformance();
if (monitorRecord != null) {
list.add(monitorRecord);
}
return list;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/MySQLPerformance.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.utils;
import static org.apache.dolphinscheduler.dao.MonitorDBDao.VARIABLE_NAME;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.dao.entity.MonitorRecord;
import org.apache.dolphinscheduler.spi.enums.DbType;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/MySQLPerformance.java
|
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Date;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* MySQL performance
*/
public class MySQLPerformance extends BaseDBPerformance {
private static Logger logger = LoggerFactory.getLogger(MySQLPerformance.class);
/**
* get monitor record
* @param conn connection
* @return MonitorRecord
*/
@Override
public MonitorRecord getMonitorRecord(Connection conn) {
MonitorRecord monitorRecord = new MonitorRecord();
monitorRecord.setDate(new Date());
monitorRecord.setDbType(DbType.MYSQL);
monitorRecord.setState(Flag.YES);
Statement pstmt= null;
try{
pstmt = conn.createStatement();
try (ResultSet rs1 = pstmt.executeQuery("show global variables")) {
while(rs1.next()){
if("MAX_CONNECTIONS".equalsIgnoreCase(rs1.getString(VARIABLE_NAME))){
monitorRecord.setMaxConnections( Long.parseLong(rs1.getString("value")));
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/MySQLPerformance.java
|
}
}
}
try (ResultSet rs2 = pstmt.executeQuery("show global status")) {
while(rs2.next()){
if("MAX_USED_CONNECTIONS".equalsIgnoreCase(rs2.getString(VARIABLE_NAME))){
monitorRecord.setMaxUsedConnections(Long.parseLong(rs2.getString("value")));
}else if("THREADS_CONNECTED".equalsIgnoreCase(rs2.getString(VARIABLE_NAME))){
monitorRecord.setThreadsConnections(Long.parseLong(rs2.getString("value")));
}else if("THREADS_RUNNING".equalsIgnoreCase(rs2.getString(VARIABLE_NAME))){
monitorRecord.setThreadsRunningConnections(Long.parseLong(rs2.getString("value")));
}
}
}
}catch (Exception e) {
monitorRecord.setState(Flag.NO);
logger.error("SQLException ", e);
}finally {
try {
if (pstmt != null) {
pstmt.close();
}
}catch (SQLException e) {
logger.error("SQLException ", e);
}
}
return monitorRecord;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/PostgreSQLPerformance.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.utils;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.dao.entity.MonitorRecord;
import org.apache.dolphinscheduler.spi.enums.DbType;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Date;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PostgreSQLPerformance extends BaseDBPerformance {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/PostgreSQLPerformance.java
|
private static final Logger logger = LoggerFactory.getLogger(PostgreSQLPerformance.class);
/**
* get monitor record
*
* @param conn connection
* @return MonitorRecord
*/
@Override
public MonitorRecord getMonitorRecord(Connection conn) {
MonitorRecord monitorRecord = new MonitorRecord();
monitorRecord.setDate(new Date());
monitorRecord.setState(Flag.YES);
monitorRecord.setDbType(DbType.POSTGRESQL);
Statement pstmt = null;
try {
pstmt = conn.createStatement();
try (ResultSet rs1 = pstmt.executeQuery("select count(*) from pg_stat_activity;")) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,768 |
[Improvement][Monitor] Support monitor h2 database in monitor page
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement.
### Description
Right now, we only support monitor mysql/pgsql in monitor page, when we start at standalone mode, we cannot get the db health in monitor page, it's better to support h2.
### Are you willing to submit a PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11768
|
https://github.com/apache/dolphinscheduler/pull/11813
|
e101a5cb2c1a273eb4f2b29230c769f69f698148
|
e20f17a7b72404bce3ce5c642668c428a5e768c0
| 2022-09-05T01:07:12Z |
java
| 2022-09-09T01:43:56Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/PostgreSQLPerformance.java
|
if (rs1.next()) {
monitorRecord.setThreadsConnections(rs1.getInt("count"));
}
}
try (ResultSet rs2 = pstmt.executeQuery("show max_connections")) {
if (rs2.next()) {
monitorRecord.setMaxConnections(rs2.getInt("max_connections"));
}
}
try (ResultSet rs3 = pstmt.executeQuery("select count(*) from pg_stat_activity pg where pg.state = 'active';")) {
if (rs3.next()) {
monitorRecord.setThreadsRunningConnections(rs3.getInt("count"));
}
}
} catch (Exception e) {
monitorRecord.setState(Flag.NO);
logger.error("SQLException ", e);
} finally {
try {
if (pstmt != null) {
pstmt.close();
}
} catch (SQLException e) {
logger.error("SQLException ", e);
}
}
return monitorRecord;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/MasterHeartBeat.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.model;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/MasterHeartBeat.java
|
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class MasterHeartBeat implements HeartBeat {
private long startupTime;
private long reportTime;
private double cpuUsage;
private double memoryUsage;
private double loadAverage;
private double availablePhysicalMemorySize;
private double maxCpuloadAvg;
private double reservedMemory;
private int processId;
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/WorkerHeartBeat.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.model;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/WorkerHeartBeat.java
|
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class WorkerHeartBeat implements HeartBeat {
private long startupTime;
private long reportTime;
private double cpuUsage;
private double memoryUsage;
private double loadAverage;
private double availablePhysicalMemorySize;
private double maxCpuloadAvg;
private double reservedMemory;
private int serverStatus;
private int processId;
private int workerHostWeight;
private int workerWaitingTaskCount;
private int workerExecThreadCount;
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/task/MasterHeartBeatTask.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.task;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.dolphinscheduler.common.lifecycle.ServerLifeCycleManager;
import org.apache.dolphinscheduler.common.model.BaseHeartBeatTask;
import org.apache.dolphinscheduler.common.model.MasterHeartBeat;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.service.registry.RegistryClient;
@Slf4j
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/task/MasterHeartBeatTask.java
|
public class MasterHeartBeatTask extends BaseHeartBeatTask<MasterHeartBeat> {
private final MasterConfig masterConfig;
private final RegistryClient registryClient;
private final String heartBeatPath;
private final int processId;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/task/MasterHeartBeatTask.java
|
public MasterHeartBeatTask(@NonNull MasterConfig masterConfig,
@NonNull RegistryClient registryClient) {
super("MasterHeartBeatTask", masterConfig.getHeartbeatInterval().toMillis());
this.masterConfig = masterConfig;
this.registryClient = registryClient;
this.heartBeatPath = masterConfig.getMasterRegistryPath();
this.processId = OSUtils.getProcessID();
}
@Override
public MasterHeartBeat getHeartBeat() {
return MasterHeartBeat.builder()
.startupTime(ServerLifeCycleManager.getServerStartupTime())
.reportTime(System.currentTimeMillis())
.cpuUsage(OSUtils.cpuUsage())
.loadAverage(OSUtils.loadAverage())
.availablePhysicalMemorySize(OSUtils.availablePhysicalMemorySize())
.maxCpuloadAvg(masterConfig.getMaxCpuLoadAvg())
.reservedMemory(masterConfig.getReservedMemory())
.processId(processId)
.build();
}
@Override
public void writeHeartBeat(MasterHeartBeat masterHeartBeat) {
String masterHeartBeatJson = JSONUtils.toJsonString(masterHeartBeat);
registryClient.persistEphemeral(heartBeatPath, masterHeartBeatJson);
log.info("Success write master heartBeatInfo into registry, masterRegistryPath: {}, heartBeatInfo: {}",
heartBeatPath, masterHeartBeatJson);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/task/WorkerHeartBeatTask.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.lifecycle.ServerLifeCycleManager;
import org.apache.dolphinscheduler.common.model.BaseHeartBeatTask;
import org.apache.dolphinscheduler.common.model.WorkerHeartBeat;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.service.registry.RegistryClient;
import java.util.function.Supplier;
@Slf4j
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/task/WorkerHeartBeatTask.java
|
public class WorkerHeartBeatTask extends BaseHeartBeatTask<WorkerHeartBeat> {
private final WorkerConfig workerConfig;
private final RegistryClient registryClient;
private final Supplier<Integer> workerWaitingTaskCount;
private final int processId;
public WorkerHeartBeatTask(@NonNull WorkerConfig workerConfig,
@NonNull RegistryClient registryClient,
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/task/WorkerHeartBeatTask.java
|
@NonNull Supplier<Integer> workerWaitingTaskCount) {
super("WorkerHeartBeatTask", workerConfig.getHeartbeatInterval().toMillis());
this.workerConfig = workerConfig;
this.registryClient = registryClient;
this.workerWaitingTaskCount = workerWaitingTaskCount;
this.processId = OSUtils.getProcessID();
}
@Override
public WorkerHeartBeat getHeartBeat() {
double loadAverage = OSUtils.loadAverage();
double cpuUsage = OSUtils.cpuUsage();
int maxCpuLoadAvg = workerConfig.getMaxCpuLoadAvg();
double reservedMemory = workerConfig.getReservedMemory();
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
int execThreads = workerConfig.getExecThreads();
int workerWaitingTaskCount = this.workerWaitingTaskCount.get();
int serverStatus = getServerStatus(loadAverage, maxCpuLoadAvg, availablePhysicalMemorySize, reservedMemory, execThreads, workerWaitingTaskCount);
return WorkerHeartBeat.builder()
.startupTime(ServerLifeCycleManager.getServerStartupTime())
.reportTime(System.currentTimeMillis())
.cpuUsage(cpuUsage)
.loadAverage(loadAverage)
.availablePhysicalMemorySize(availablePhysicalMemorySize)
.maxCpuloadAvg(maxCpuLoadAvg)
.reservedMemory(reservedMemory)
.processId(processId)
.workerHostWeight(workerConfig.getHostWeight())
.workerWaitingTaskCount(this.workerWaitingTaskCount.get())
.workerExecThreadCount(workerConfig.getExecThreads())
.serverStatus(serverStatus)
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,869 |
[Bug] [Monitor] MemoryUsage and DiskAvailable not show on monitor page (Both Worker and Master)
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Disk available and Memory usage not corret

### What you expected to happen
Disk available and Memory can show the real value
### How to reproduce
* Start standalone server and ui
* login
* click monitor tab (Worker and Master)
### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11869
|
https://github.com/apache/dolphinscheduler/pull/11870
|
c3a8dd5ca7cd2fa9bf97d745ad176a6f683c6ce3
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
| 2022-09-09T02:25:11Z |
java
| 2022-09-11T11:11:03Z |
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/task/WorkerHeartBeatTask.java
|
.build();
}
@Override
public void writeHeartBeat(WorkerHeartBeat workerHeartBeat) {
String workerHeartBeatJson = JSONUtils.toJsonString(workerHeartBeat);
for (String workerGroupRegistryPath : workerConfig.getWorkerGroupRegistryPaths()) {
registryClient.persistEphemeral(workerGroupRegistryPath, workerHeartBeatJson);
}
log.info("Success write worker group heartBeatInfo into registry, workGroupPath: {} workerHeartBeatInfo: {}",
workerConfig.getWorkerGroupRegistryPaths(), workerHeartBeatJson);
}
public int getServerStatus(double loadAverage,
double maxCpuloadAvg,
double availablePhysicalMemorySize,
double reservedMemory,
int workerExecThreadCount,
int workerWaitingTaskCount) {
if (loadAverage > maxCpuloadAvg || availablePhysicalMemorySize < reservedMemory) {
log.warn("current cpu load average {} is too high or available memory {}G is too low, under max.cpuload.avg={} and reserved.memory={}G",
loadAverage, availablePhysicalMemorySize, maxCpuloadAvg, reservedMemory);
return Constants.ABNORMAL_NODE_STATUS;
} else if (workerWaitingTaskCount > workerExecThreadCount) {
log.warn("current waiting task count {} is large than worker thread count {}, worker is busy", workerWaitingTaskCount, workerExecThreadCount);
return Constants.BUSY_NODE_STATUE;
} else {
return Constants.NORMAL_NODE_STATUS;
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,854 |
[Bug] [dolphinscheduler-api] Data Analysis Service API responds with jackson deserialization exception
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Remote call DataAnalysisService API:
1. `/dolphinscheduler/projects/analysis/task-state-count`
2. `/dolphinscheduler/projects/analysis/process-state-count`
after then, deserialize to `org.apache.dolphinscheduler.api.dto.TaskCountDto`
```java
import org.apache.dolphinscheduler.common.utils.JSONUtils;
@Data
public class QueryTaskStatesResponse {
protected Map<String, String> headers;
protected Result<T> body;
}
...
final Map<String, ?> res = callApi(xxx);
JSONUtils.parseObject(JSONUtils.toJsonString(values), QueryTaskStatesResponse.class);
...
```
then raise exception:
```java
com.fasterxml.jackson.databind.exc.InvalidDefinitionException: Cannot construct instance of `org.apache.dolphinscheduler.api.dto.TaskStateCount` (no Creators, like default constructor, exist): cannot deserialize from Object value (no delegate- or property-based Creator)
at [Source: (String)"{"headers":{"date":"Thu, 08 Sep 2022 06:46:35 GMT","transfer-encoding":"chunked","vary":"Origin;Access-Control-Request-Method;Access-Control-Request-Headers;Accept-Encoding, User-Agent","content-type":"application/json"},"body":{"code":0,"msg":"success","data":{"totalCount":2,"taskCountDtos":[{"count":0,"taskStateType":"SUBMITTED_SUCCESS"},{"count":2,"taskStateType":"RUNNING_EXECUTION"},{"count":0,"taskStateType":"READY_PAUSE"},{"count":0,"taskStateType":"PAUSE"},{"count":0,"taskStateType":"READ"[truncated 392 chars]; line: 1, column: 296] (through reference chain: com.alibaba.teasdk.model.analysis.QueryProcessInstanceStatesResponse["body"]->org.apache.dolphinscheduler.api.utils.Result["data"]->org.apache.dolphinscheduler.api.dto.TaskCountDto["taskCountDtos"]->java.util.ArrayList[0])
at com.fasterxml.jackson.databind.exc.InvalidDefinitionException.from(InvalidDefinitionException.java:67) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.DeserializationContext.reportBadDefinition(DeserializationContext.java:1904) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.DatabindContext.reportBadDefinition(DatabindContext.java:400) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.DeserializationContext.handleMissingInstantiator(DeserializationContext.java:1349) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1415) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:351) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:184) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer._deserializeFromArray(CollectionDeserializer.java:355) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:244) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:28) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.impl.MethodProperty.deserializeAndSet(MethodProperty.java:129) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.vanillaDeserialize(BeanDeserializer.java:313) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:176) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.impl.MethodProperty.deserializeAndSet(MethodProperty.java:129) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.vanillaDeserialize(BeanDeserializer.java:313) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:176) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.impl.MethodProperty.deserializeAndSet(MethodProperty.java:129) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.vanillaDeserialize(BeanDeserializer.java:313) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:176) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:323) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:4674) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:3629) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:3597) ~[jackson-databind-2.13.3.jar:2.13.3]
at org.apache.dolphinscheduler.common.utils.JSONUtils.parseObject(JSONUtils.java:127) ~[dolphinscheduler-common-2.0.6-SNAPSHOT.jar:2.0.6-SNAPSHOT]
at com.alibaba.teasdk.Client.toResponse(Client.java:227) [classes/:na]
at com.alibaba.teasdk.Client.doRequest(Client.java:78) [classes/:na]
at com.alibaba.teasdk.Client.call(Client.java:39) [classes/:na]
at com.alibaba.teasdk.ApplicationTests.testAnalysis(ApplicationTests.java:325) [test-classes/:na]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:1.8.0_321]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:1.8.0_321]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_321]
at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_321]
at org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:725) [junit-platform-commons-1.8.2.jar:1.8.2]
at org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:131) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:149) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:140) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:84) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(ExecutableInvoker.java:115) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker.lambda$invoke$0(ExecutableInvoker.java:105) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:106) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:64) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:45) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:37) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:104) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:98) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.lambda$invokeTestMethod$7(TestMethodTestDescriptor.java:214) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.invokeTestMethod(TestMethodTestDescriptor.java:210) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:135) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:66) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$6(NodeTestTask.java:151) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:141) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$9(NodeTestTask.java:139) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:138) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:95) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at java.util.ArrayList.forEach(ArrayList.java:1259) ~[na:1.8.0_321]
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:41) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$6(NodeTestTask.java:155) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:141) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$9(NodeTestTask.java:139) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:138) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:95) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at java.util.ArrayList.forEach(ArrayList.java:1259) ~[na:1.8.0_321]
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:41) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$6(NodeTestTask.java:155) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:141) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$9(NodeTestTask.java:139) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:138) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:95) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.submit(SameThreadHierarchicalTestExecutorService.java:35) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.HierarchicalTestExecutor.execute(HierarchicalTestExecutor.java:57) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.HierarchicalTestEngine.execute(HierarchicalTestEngine.java:54) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:107) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:88) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.lambda$execute$0(EngineExecutionOrchestrator.java:54) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.withInterceptedStreams(EngineExecutionOrchestrator.java:67) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:52) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:114) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:86) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.DefaultLauncherSession$DelegatingLauncher.execute(DefaultLauncherSession.java:86) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.SessionPerRequestLauncher.execute(SessionPerRequestLauncher.java:53) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at com.intellij.junit5.JUnit5IdeaTestRunner.startRunnerWithArgs(JUnit5IdeaTestRunner.java:71) ~[junit5-rt.jar:na]
at com.intellij.rt.junit.IdeaTestRunner$Repeater$1.execute(IdeaTestRunner.java:38) ~[junit-rt.jar:na]
at com.intellij.rt.execution.junit.TestsRepeater.repeat(TestsRepeater.java:11) ~[idea_rt.jar:na]
at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:35) ~[junit-rt.jar:na]
at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:235) ~[junit-rt.jar:na]
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54) ~[junit-rt.jar:na]
```
### What you expected to happen
Response deserializes correctly
### How to reproduce
`org.apache.dolphinscheduler.api.dto.TaskCountDto` and `org.apache.dolphinscheduler.api.dto.TaskStateCount` adds default constructor
### Anything else
_No response_
### Version
2.0.6
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11854
|
https://github.com/apache/dolphinscheduler/pull/11858
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
|
db46e4786aa0c5f5f16fac4e977a0570c8702fe1
| 2022-09-08T07:11:10Z |
java
| 2022-09-11T11:12:03Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/TaskCountDto.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto;
import lombok.Data;
import org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount;
import org.apache.dolphinscheduler.plugin.task.api.enums.TaskExecutionStatus;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Data
public class TaskCountDto {
/**
* total count
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,854 |
[Bug] [dolphinscheduler-api] Data Analysis Service API responds with jackson deserialization exception
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
Remote call DataAnalysisService API:
1. `/dolphinscheduler/projects/analysis/task-state-count`
2. `/dolphinscheduler/projects/analysis/process-state-count`
after then, deserialize to `org.apache.dolphinscheduler.api.dto.TaskCountDto`
```java
import org.apache.dolphinscheduler.common.utils.JSONUtils;
@Data
public class QueryTaskStatesResponse {
protected Map<String, String> headers;
protected Result<T> body;
}
...
final Map<String, ?> res = callApi(xxx);
JSONUtils.parseObject(JSONUtils.toJsonString(values), QueryTaskStatesResponse.class);
...
```
then raise exception:
```java
com.fasterxml.jackson.databind.exc.InvalidDefinitionException: Cannot construct instance of `org.apache.dolphinscheduler.api.dto.TaskStateCount` (no Creators, like default constructor, exist): cannot deserialize from Object value (no delegate- or property-based Creator)
at [Source: (String)"{"headers":{"date":"Thu, 08 Sep 2022 06:46:35 GMT","transfer-encoding":"chunked","vary":"Origin;Access-Control-Request-Method;Access-Control-Request-Headers;Accept-Encoding, User-Agent","content-type":"application/json"},"body":{"code":0,"msg":"success","data":{"totalCount":2,"taskCountDtos":[{"count":0,"taskStateType":"SUBMITTED_SUCCESS"},{"count":2,"taskStateType":"RUNNING_EXECUTION"},{"count":0,"taskStateType":"READY_PAUSE"},{"count":0,"taskStateType":"PAUSE"},{"count":0,"taskStateType":"READ"[truncated 392 chars]; line: 1, column: 296] (through reference chain: com.alibaba.teasdk.model.analysis.QueryProcessInstanceStatesResponse["body"]->org.apache.dolphinscheduler.api.utils.Result["data"]->org.apache.dolphinscheduler.api.dto.TaskCountDto["taskCountDtos"]->java.util.ArrayList[0])
at com.fasterxml.jackson.databind.exc.InvalidDefinitionException.from(InvalidDefinitionException.java:67) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.DeserializationContext.reportBadDefinition(DeserializationContext.java:1904) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.DatabindContext.reportBadDefinition(DatabindContext.java:400) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.DeserializationContext.handleMissingInstantiator(DeserializationContext.java:1349) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializerBase.deserializeFromObjectUsingNonDefault(BeanDeserializerBase.java:1415) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:351) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:184) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer._deserializeFromArray(CollectionDeserializer.java:355) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:244) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:28) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.impl.MethodProperty.deserializeAndSet(MethodProperty.java:129) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.vanillaDeserialize(BeanDeserializer.java:313) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:176) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.impl.MethodProperty.deserializeAndSet(MethodProperty.java:129) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.vanillaDeserialize(BeanDeserializer.java:313) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:176) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.impl.MethodProperty.deserializeAndSet(MethodProperty.java:129) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.vanillaDeserialize(BeanDeserializer.java:313) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.BeanDeserializer.deserialize(BeanDeserializer.java:176) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.readRootValue(DefaultDeserializationContext.java:323) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:4674) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:3629) ~[jackson-databind-2.13.3.jar:2.13.3]
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:3597) ~[jackson-databind-2.13.3.jar:2.13.3]
at org.apache.dolphinscheduler.common.utils.JSONUtils.parseObject(JSONUtils.java:127) ~[dolphinscheduler-common-2.0.6-SNAPSHOT.jar:2.0.6-SNAPSHOT]
at com.alibaba.teasdk.Client.toResponse(Client.java:227) [classes/:na]
at com.alibaba.teasdk.Client.doRequest(Client.java:78) [classes/:na]
at com.alibaba.teasdk.Client.call(Client.java:39) [classes/:na]
at com.alibaba.teasdk.ApplicationTests.testAnalysis(ApplicationTests.java:325) [test-classes/:na]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:1.8.0_321]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:1.8.0_321]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_321]
at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_321]
at org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:725) [junit-platform-commons-1.8.2.jar:1.8.2]
at org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:131) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:149) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:140) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:84) [junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(ExecutableInvoker.java:115) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker.lambda$invoke$0(ExecutableInvoker.java:105) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:106) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:64) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:45) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:37) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:104) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:98) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.lambda$invokeTestMethod$7(TestMethodTestDescriptor.java:214) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.invokeTestMethod(TestMethodTestDescriptor.java:210) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:135) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:66) ~[junit-jupiter-engine-5.8.2.jar:5.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$6(NodeTestTask.java:151) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:141) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$9(NodeTestTask.java:139) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:138) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:95) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at java.util.ArrayList.forEach(ArrayList.java:1259) ~[na:1.8.0_321]
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:41) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$6(NodeTestTask.java:155) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:141) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$9(NodeTestTask.java:139) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:138) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:95) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at java.util.ArrayList.forEach(ArrayList.java:1259) ~[na:1.8.0_321]
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:41) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$6(NodeTestTask.java:155) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:141) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:137) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$9(NodeTestTask.java:139) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:138) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:95) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.submit(SameThreadHierarchicalTestExecutorService.java:35) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.HierarchicalTestExecutor.execute(HierarchicalTestExecutor.java:57) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.engine.support.hierarchical.HierarchicalTestEngine.execute(HierarchicalTestEngine.java:54) ~[junit-platform-engine-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:107) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:88) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.lambda$execute$0(EngineExecutionOrchestrator.java:54) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.withInterceptedStreams(EngineExecutionOrchestrator.java:67) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.EngineExecutionOrchestrator.execute(EngineExecutionOrchestrator.java:52) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:114) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:86) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.DefaultLauncherSession$DelegatingLauncher.execute(DefaultLauncherSession.java:86) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at org.junit.platform.launcher.core.SessionPerRequestLauncher.execute(SessionPerRequestLauncher.java:53) ~[junit-platform-launcher-1.8.2.jar:1.8.2]
at com.intellij.junit5.JUnit5IdeaTestRunner.startRunnerWithArgs(JUnit5IdeaTestRunner.java:71) ~[junit5-rt.jar:na]
at com.intellij.rt.junit.IdeaTestRunner$Repeater$1.execute(IdeaTestRunner.java:38) ~[junit-rt.jar:na]
at com.intellij.rt.execution.junit.TestsRepeater.repeat(TestsRepeater.java:11) ~[idea_rt.jar:na]
at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:35) ~[junit-rt.jar:na]
at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:235) ~[junit-rt.jar:na]
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54) ~[junit-rt.jar:na]
```
### What you expected to happen
Response deserializes correctly
### How to reproduce
`org.apache.dolphinscheduler.api.dto.TaskCountDto` and `org.apache.dolphinscheduler.api.dto.TaskStateCount` adds default constructor
### Anything else
_No response_
### Version
2.0.6
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11854
|
https://github.com/apache/dolphinscheduler/pull/11858
|
3ca9680b20a4e5b3c0aff65abbcf4faf3379cd3f
|
db46e4786aa0c5f5f16fac4e977a0570c8702fe1
| 2022-09-08T07:11:10Z |
java
| 2022-09-11T11:12:03Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/TaskCountDto.java
|
*/
private int totalCount;
/**
* task state count list
*/
private List<TaskStateCount> taskCountDtos;
public TaskCountDto(List<ExecuteStatusCount> taskInstanceStateCounts) {
countTaskDtos(taskInstanceStateCounts);
}
private void countTaskDtos(List<ExecuteStatusCount> taskInstanceStateCounts) {
Map<TaskExecutionStatus, Integer> statusCountMap = taskInstanceStateCounts.stream()
.collect(Collectors.toMap(ExecuteStatusCount::getState, ExecuteStatusCount::getCount, Integer::sum));
taskCountDtos = Arrays.stream(TaskExecutionStatus.values())
.map(status -> new TaskStateCount(status, statusCountMap.getOrDefault(status, 0)))
.collect(Collectors.toList());
totalCount = taskCountDtos.stream()
.mapToInt(TaskStateCount::getCount)
.sum();
}
public void removeStateFromCountList(TaskExecutionStatus status) {
for (TaskStateCount count : this.taskCountDtos) {
if (count.getTaskStateType().equals(status)) {
this.taskCountDtos.remove(count);
break;
}
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.spark;
import static org.apache.dolphinscheduler.plugin.task.api.TaskConstants.RWXR_XR_X;
import org.apache.dolphinscheduler.plugin.task.api.AbstractYarnTask;
import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContext;
import org.apache.dolphinscheduler.plugin.task.api.model.Property;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
import org.apache.dolphinscheduler.plugin.task.api.model.ResourceInfo;
import org.apache.dolphinscheduler.plugin.task.api.parameters.AbstractParameters;
import org.apache.dolphinscheduler.plugin.task.api.parser.ParamUtils;
import org.apache.dolphinscheduler.plugin.task.api.parser.ParameterUtils;
import org.apache.dolphinscheduler.plugin.task.api.utils.ArgsUtils;
import org.apache.dolphinscheduler.plugin.task.api.utils.MapUtils;
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
import org.apache.dolphinscheduler.spi.utils.StringUtils;
import org.apache.commons.lang3.SystemUtils;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class SparkTask extends AbstractYarnTask {
/**
* spark parameters
*/
private SparkParameters sparkParameters;
/**
* taskExecutionContext
*/
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
private TaskExecutionContext taskExecutionContext;
public SparkTask(TaskExecutionContext taskExecutionContext) {
super(taskExecutionContext);
this.taskExecutionContext = taskExecutionContext;
}
@Override
public void init() {
logger.info("spark task params {}", taskExecutionContext.getTaskParams());
sparkParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SparkParameters.class);
if (null == sparkParameters) {
logger.error("Spark params is null");
return;
}
if (!sparkParameters.checkParameters()) {
throw new RuntimeException("spark task params is not valid");
}
sparkParameters.setQueue(taskExecutionContext.getQueue());
if (sparkParameters.getProgramType() != ProgramType.SQL) {
setMainJarName();
}
}
/**
* create command
*
* @return command
*/
@Override
protected String buildCommand() {
/**
* (1) spark-submit [options] <app jar | python file> [app arguments]
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
* (2) spark-sql [options] -f <filename>
*/
List<String> args = new ArrayList<>();
String sparkCommand = SparkVersion.SPARK2.getCommand();
if (SparkVersion.SPARK1.name().equals(sparkParameters.getSparkVersion())) {
sparkCommand = SparkVersion.SPARK1.getCommand();
}
if (sparkParameters.getProgramType() == ProgramType.SQL) {
sparkCommand = SparkVersion.SPARKSQL.getCommand();
}
args.add(sparkCommand);
args.addAll(populateSparkOptions());
Map<String, Property> paramsMap = taskExecutionContext.getPrepareParamsMap();
String command = ParameterUtils.convertParameterPlaceholders(String.join(" ", args), ParamUtils.convert(paramsMap));
logger.info("spark task command: {}", command);
return command;
}
/**
* build spark options
*
* @return argument list
*/
private List<String> populateSparkOptions() {
List<String> args = new ArrayList<>();
args.add(SparkConstants.MASTER);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
String deployMode = StringUtils.isNotEmpty(sparkParameters.getDeployMode()) ? sparkParameters.getDeployMode() : SparkConstants.DEPLOY_MODE_LOCAL;
if (!SparkConstants.DEPLOY_MODE_LOCAL.equals(deployMode)) {
args.add(SparkConstants.SPARK_ON_YARN);
args.add(SparkConstants.DEPLOY_MODE);
}
args.add(deployMode);
ProgramType programType = sparkParameters.getProgramType();
String mainClass = sparkParameters.getMainClass();
if (programType != ProgramType.PYTHON && programType != ProgramType.SQL && StringUtils.isNotEmpty(mainClass)) {
args.add(SparkConstants.MAIN_CLASS);
args.add(mainClass);
}
populateSparkResourceDefinitions(args);
String appName = sparkParameters.getAppName();
if (StringUtils.isNotEmpty(appName)) {
args.add(SparkConstants.SPARK_NAME);
args.add(ArgsUtils.escape(appName));
}
String others = sparkParameters.getOthers();
if (!SparkConstants.DEPLOY_MODE_LOCAL.equals(deployMode) && (StringUtils.isEmpty(others) || !others.contains(SparkConstants.SPARK_QUEUE))) {
String queue = sparkParameters.getQueue();
if (StringUtils.isNotEmpty(queue)) {
args.add(SparkConstants.SPARK_QUEUE);
args.add(queue);
}
}
if (StringUtils.isNotEmpty(others)) {
args.add(others);
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
ResourceInfo mainJar = sparkParameters.getMainJar();
if (programType != ProgramType.SQL) {
args.add(mainJar.getRes());
}
String mainArgs = sparkParameters.getMainArgs();
if (programType != ProgramType.SQL && StringUtils.isNotEmpty(mainArgs)) {
args.add(mainArgs);
}
if (ProgramType.SQL == programType) {
args.add(SparkConstants.SQL_FROM_FILE);
args.add(generateScriptFile());
}
return args;
}
private void populateSparkResourceDefinitions(List<String> args) {
int driverCores = sparkParameters.getDriverCores();
if (driverCores > 0) {
args.add(SparkConstants.DRIVER_CORES);
args.add(String.format("%d", driverCores));
}
String driverMemory = sparkParameters.getDriverMemory();
if (StringUtils.isNotEmpty(driverMemory)) {
args.add(SparkConstants.DRIVER_MEMORY);
args.add(driverMemory);
}
int numExecutors = sparkParameters.getNumExecutors();
if (numExecutors > 0) {
args.add(SparkConstants.NUM_EXECUTORS);
args.add(String.format("%d", numExecutors));
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
}
int executorCores = sparkParameters.getExecutorCores();
if (executorCores > 0) {
args.add(SparkConstants.EXECUTOR_CORES);
args.add(String.format("%d", executorCores));
}
String executorMemory = sparkParameters.getExecutorMemory();
if (StringUtils.isNotEmpty(executorMemory)) {
args.add(SparkConstants.EXECUTOR_MEMORY);
args.add(executorMemory);
}
}
private String generateScriptFile() {
String scriptFileName = String.format("%s/%s_node.sql", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId());
File file = new File(scriptFileName);
Path path = file.toPath();
if (!Files.exists(path)) {
String script = replaceParam(sparkParameters.getRawScript());
sparkParameters.setRawScript(script);
logger.info("raw script : {}", sparkParameters.getRawScript());
logger.info("task execute path : {}", taskExecutionContext.getExecutePath());
Set<PosixFilePermission> perms = PosixFilePermissions.fromString(RWXR_XR_X);
FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms);
try {
if (SystemUtils.IS_OS_WINDOWS) {
Files.createFile(path);
} else {
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTask.java
|
Files.createFile(path, attr);
}
Files.write(path, sparkParameters.getRawScript().getBytes(), StandardOpenOption.APPEND);
} catch (IOException e) {
throw new RuntimeException("generate spark sql script error", e);
}
}
return scriptFileName;
}
private String replaceParam(String script) {
script = script.replaceAll("\\r\\n", "\n");
Map<String, Property> paramsMap = taskExecutionContext.getPrepareParamsMap();
script = ParameterUtils.convertParameterPlaceholders(script, ParamUtils.convert(paramsMap));
return script;
}
@Override
protected void setMainJarName() {
ResourceInfo mainJar = sparkParameters.getMainJar();
String resourceName = getResourceNameOfMainJar(mainJar);
mainJar.setRes(resourceName);
sparkParameters.setMainJar(mainJar);
}
@Override
public AbstractParameters getParameters() {
return sparkParameters;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkVersion.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.spark;
public enum SparkVersion {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/main/java/org/apache/dolphinscheduler/plugin/task/spark/SparkVersion.java
|
/**
* 0 SPARK1
* 1 SPARK2
* 2 SPARKSQL
*/
SPARK1(0, "SPARK1", "${SPARK_HOME1}/bin/spark-submit"),
SPARK2(1, "SPARK2", "${SPARK_HOME2}/bin/spark-submit"),
SPARKSQL(2, "SPARKSQL", "${SPARK_HOME2}/bin/spark-sql");
private final int code;
private final String descp;
/**
* usage: spark-submit [options] <app jar | python file> [app arguments]
*/
private final String command;
SparkVersion(int code, String descp, String command) {
this.code = code;
this.descp = descp;
this.command = command;
}
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
public String getCommand() {
return command;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/test/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTaskTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.plugin.task.spark;
import java.util.Collections;
import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContext;
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import static org.powermock.api.mockito.PowerMockito.spy;
import static org.powermock.api.mockito.PowerMockito.when;
@RunWith(PowerMockRunner.class)
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/test/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTaskTest.java
|
@PrepareForTest({
JSONUtils.class
})
@PowerMockIgnore({"javax.*"})
public class SparkTaskTest {
@Test
public void testBuildCommandWithSparkSql() throws Exception {
String parameters = buildSparkParametersWithSparkSql();
TaskExecutionContext taskExecutionContext = PowerMockito.mock(TaskExecutionContext.class);
when(taskExecutionContext.getTaskParams()).thenReturn(parameters);
when(taskExecutionContext.getExecutePath()).thenReturn("/tmp");
when(taskExecutionContext.getTaskAppId()).thenReturn("5536");
SparkTask sparkTask = spy(new SparkTask(taskExecutionContext));
sparkTask.init();
Assert.assertEquals(sparkTask.buildCommand(),
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,720 |
[Bug] [spark-sql] In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
In spark-sql, select both SPARK1 and SPARK2 versions and execute ${SPARK_HOME2}/bin/spark-sql
### What you expected to happen
In spark-sql, selecting SPARK1 should execute ${SPARK_HOME1}/bin/spark-sql and selecting SPARK2 should execute ${SPARK_HOME1}/bin/spark-sql
### How to reproduce

[INFO] 2022-08-31 09:57:14.923 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[69] - spark task params {"localParams":[],"rawScript":"SELECT * from ods.ods_pigeon_day LIMIT 1","resourceList":[],"programType":"SQL","mainClass":"","deployMode":"client","appName":"SparkSQLDemo","sparkVersion":"SPARK1","driverCores":1,"driverMemory":"512M","numExecutors":2,"executorMemory":"2G","executorCores":2}
[INFO] 2022-08-31 09:57:14.933 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[240] - raw script : SELECT * from ods.ods_pigeon_day LIMIT 1
[INFO] 2022-08-31 09:57:14.934 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[241] - task execute path : /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class org.apache.dolphinscheduler.plugin.task.spark.SparkTask:[130] - spark task command: ${SPARK_HOME2}/bin/spark-sql --master yarn --deploy-mode client --driver-cores 1 --driver-memory 512M --num-executors 2 --executor-cores 2 --executor-memory 2G --name SparkSQLDemo --queue haodf -f /tmp/dolphinscheduler/exec/process/6710440832960/6710611439552_3/40/44/40_44_node.sql
[INFO] 2022-08-31 09:57:14.937 +0000 [taskAppId=TASK-20220831-6710611439552_3-40-44] TaskLogLogger-class
### Anything else
_No response_
### Version
3.0.0
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11720
|
https://github.com/apache/dolphinscheduler/pull/11721
|
147e0ae8d74c39ed82ca36f068b96c941fcdcf50
|
25b78a80037c4a7edb551dd04328276ebdc7efc1
| 2022-08-31T11:27:50Z |
java
| 2022-09-12T02:35:27Z |
dolphinscheduler-task-plugin/dolphinscheduler-task-spark/src/test/java/org/apache/dolphinscheduler/plugin/task/spark/SparkTaskTest.java
|
"${SPARK_HOME2}/bin/spark-sql " +
"--master yarn " +
"--deploy-mode client " +
"--driver-cores 1 " +
"--driver-memory 512M " +
"--num-executors 2 " +
"--executor-cores 2 " +
"--executor-memory 1G " +
"--name sparksql " +
"-f /tmp/5536_node.sql");
}
private String buildSparkParametersWithSparkSql() {
SparkParameters sparkParameters = new SparkParameters();
sparkParameters.setLocalParams(Collections.emptyList());
sparkParameters.setRawScript("selcet 11111;");
sparkParameters.setProgramType(ProgramType.SQL);
sparkParameters.setMainClass("");
sparkParameters.setDeployMode("client");
sparkParameters.setAppName("sparksql");
sparkParameters.setOthers("");
sparkParameters.setSparkVersion("SPARK2");
sparkParameters.setDriverCores(1);
sparkParameters.setDriverMemory("512M");
sparkParameters.setNumExecutors(2);
sparkParameters.setExecutorMemory("1G");
sparkParameters.setExecutorCores(2);
return JSONUtils.toJsonString(sparkParameters);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/event/TaskTimeoutStateEventHandler.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.event;
import com.google.auto.service.AutoService;
import org.apache.dolphinscheduler.common.enums.StateEventType;
import org.apache.dolphinscheduler.common.enums.TimeoutFlag;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.plugin.task.api.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.server.master.metrics.TaskMetrics;
import org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteRunnable;
import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor;
import org.apache.dolphinscheduler.server.master.runner.task.TaskAction;
import java.util.Map;
@AutoService(StateEventHandler.class)
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/event/TaskTimeoutStateEventHandler.java
|
public class TaskTimeoutStateEventHandler implements StateEventHandler {
@Override
public boolean handleStateEvent(WorkflowExecuteRunnable workflowExecuteRunnable,
StateEvent stateEvent) throws StateEventHandleError {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/event/TaskTimeoutStateEventHandler.java
|
TaskStateEvent taskStateEvent = (TaskStateEvent) stateEvent;
TaskMetrics.incTaskInstanceByState("timeout");
workflowExecuteRunnable.checkTaskInstanceByStateEvent(taskStateEvent);
TaskInstance taskInstance =
workflowExecuteRunnable.getTaskInstance(taskStateEvent.getTaskInstanceId()).orElseThrow(
() -> new StateEventHandleError(String.format(
"Cannot find the task instance from workflow execute runnable, taskInstanceId: %s",
taskStateEvent.getTaskInstanceId())));
if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) {
return true;
}
TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy();
Map<Long, ITaskProcessor> activeTaskProcessMap = workflowExecuteRunnable.getActiveTaskProcessMap();
if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy
|| TaskTimeoutStrategy.WARNFAILED == taskTimeoutStrategy) {
ITaskProcessor taskProcessor = activeTaskProcessMap.get(taskInstance.getTaskCode());
taskProcessor.action(TaskAction.TIMEOUT);
}
if (TaskTimeoutStrategy.WARN == taskTimeoutStrategy || TaskTimeoutStrategy.WARNFAILED == taskTimeoutStrategy) {
workflowExecuteRunnable.processTimeout();
workflowExecuteRunnable.taskTimeout(taskInstance);
}
return true;
}
@Override
public StateEventType getEventType() {
return StateEventType.TASK_TIMEOUT;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
/*
* Lcensed to the Apache Software Foundaton (ASF) under one or more
* contrbutor lcense agreements. See the NOTICE fle dstrbuted wth
* ths work for addtonal nformaton regardng copyrght ownershp.
* The ASF lcenses ths fle to You under the Apache Lcense, Verson 2.0
* (the "Lcense"); you may not use ths fle except n complance wth
* the Lcense. You may obtan a copy of the Lcense at
*
* http://www.apache.org/lcenses/LICENSE-2.0
*
* Unless requred by applcable law or agreed to n wrtng, software
* dstrbuted under the Lcense s dstrbuted on an "AS IS" BASIS,
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, ether express or mpled.
* See the Lcense for the specfc language governng permssons and
* lmtatons under the Lcense.
*/
package org.apache.dolphnscheduler.server.master.runner;
mport statc org.apache.dolphnscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
mport statc org.apache.dolphnscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST;
mport statc org.apache.dolphnscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
mport statc org.apache.dolphnscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING;
mport statc org.apache.dolphnscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING;
mport statc org.apache.dolphnscheduler.common.Constants.CMD_PARAM_START_NODES;
mport statc org.apache.dolphnscheduler.common.Constants.COMMA;
mport statc org.apache.dolphnscheduler.common.Constants.DEFAULT_WORKER_GROUP;
mport statc org.apache.dolphnscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS;
mport statc org.apache.dolphnscheduler.plugn.task.ap.TaskConstants.TASK_TYPE_BLOCKING;
mport statc org.apache.dolphnscheduler.plugn.task.ap.enums.DataType.VARCHAR;
mport statc org.apache.dolphnscheduler.plugn.task.ap.enums.Drect.IN;
mport org.apache.dolphnscheduler.common.Constants;
mport org.apache.dolphnscheduler.common.enums.CommandType;
mport org.apache.dolphnscheduler.common.enums.FalureStrategy;
mport org.apache.dolphnscheduler.common.enums.Flag;
mport org.apache.dolphnscheduler.common.enums.Prorty;
mport org.apache.dolphnscheduler.common.enums.StateEventType;
mport org.apache.dolphnscheduler.common.enums.TaskDependType;
mport org.apache.dolphnscheduler.common.enums.TaskGroupQueueStatus;
mport org.apache.dolphnscheduler.common.enums.WorkflowExecutonStatus;
mport org.apache.dolphnscheduler.common.graph.DAG;
mport org.apache.dolphnscheduler.common.model.TaskNode;
mport org.apache.dolphnscheduler.common.model.TaskNodeRelaton;
mport org.apache.dolphnscheduler.common.process.ProcessDag;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
mport org.apache.dolphnscheduler.common.thread.ThreadUtls;
mport org.apache.dolphnscheduler.common.utls.DateUtls;
mport org.apache.dolphnscheduler.common.utls.JSONUtls;
mport org.apache.dolphnscheduler.common.utls.LoggerUtls;
mport org.apache.dolphnscheduler.common.utls.NetUtls;
mport org.apache.dolphnscheduler.dao.entty.Command;
mport org.apache.dolphnscheduler.dao.entty.Envronment;
mport org.apache.dolphnscheduler.dao.entty.ProcessDefnton;
mport org.apache.dolphnscheduler.dao.entty.ProcessInstance;
mport org.apache.dolphnscheduler.dao.entty.ProcessTaskRelaton;
mport org.apache.dolphnscheduler.dao.entty.ProjectUser;
mport org.apache.dolphnscheduler.dao.entty.Schedule;
mport org.apache.dolphnscheduler.dao.entty.TaskDefntonLog;
mport org.apache.dolphnscheduler.dao.entty.TaskGroupQueue;
mport org.apache.dolphnscheduler.dao.entty.TaskInstance;
mport org.apache.dolphnscheduler.dao.repostory.ProcessInstanceDao;
mport org.apache.dolphnscheduler.dao.utls.DagHelper;
mport org.apache.dolphnscheduler.plugn.task.ap.enums.DependResult;
mport org.apache.dolphnscheduler.plugn.task.ap.enums.Drect;
mport org.apache.dolphnscheduler.plugn.task.ap.enums.TaskExecutonStatus;
mport org.apache.dolphnscheduler.plugn.task.ap.model.Property;
mport org.apache.dolphnscheduler.remote.command.HostUpdateCommand;
mport org.apache.dolphnscheduler.remote.utls.Host;
mport org.apache.dolphnscheduler.server.master.confg.MasterConfg;
mport org.apache.dolphnscheduler.server.master.dspatch.executor.NettyExecutorManager;
mport org.apache.dolphnscheduler.server.master.event.StateEvent;
mport org.apache.dolphnscheduler.server.master.event.StateEventHandleError;
mport org.apache.dolphnscheduler.server.master.event.StateEventHandleExcepton;
mport org.apache.dolphnscheduler.server.master.event.StateEventHandler;
mport org.apache.dolphnscheduler.server.master.event.StateEventHandlerManager;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
mport org.apache.dolphnscheduler.server.master.event.TaskStateEvent;
mport org.apache.dolphnscheduler.server.master.event.WorkflowStateEvent;
mport org.apache.dolphnscheduler.server.master.metrcs.TaskMetrcs;
mport org.apache.dolphnscheduler.server.master.runner.task.ITaskProcessor;
mport org.apache.dolphnscheduler.server.master.runner.task.TaskActon;
mport org.apache.dolphnscheduler.server.master.runner.task.TaskProcessorFactory;
mport org.apache.dolphnscheduler.servce.alert.ProcessAlertManager;
mport org.apache.dolphnscheduler.servce.cron.CronUtls;
mport org.apache.dolphnscheduler.servce.exceptons.CronParseExcepton;
mport org.apache.dolphnscheduler.servce.expand.CurngParamsServce;
mport org.apache.dolphnscheduler.servce.process.ProcessServce;
mport org.apache.dolphnscheduler.servce.queue.PeerTaskInstancePrortyQueue;
mport org.apache.commons.collectons.CollectonUtls;
mport org.apache.commons.lang3.StrngUtls;
mport org.apache.commons.lang3.math.NumberUtls;
mport java.utl.ArrayLst;
mport java.utl.Arrays;
mport java.utl.Collecton;
mport java.utl.Collectons;
mport java.utl.Date;
mport java.utl.HashMap;
mport java.utl.HashSet;
mport java.utl.Iterator;
mport java.utl.Lst;
mport java.utl.Map;
mport java.utl.Objects;
mport java.utl.Optonal;
mport java.utl.Set;
mport java.utl.concurrent.Callable;
mport java.utl.concurrent.ConcurrentHashMap;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
mport java.utl.concurrent.ConcurrentLnkedQueue;
mport java.utl.concurrent.atomc.AtomcBoolean;
mport java.utl.stream.Collectors;
mport lombok.NonNull;
mport org.slf4j.Logger;
mport org.slf4j.LoggerFactory;
mport org.sprngframework.beans.BeanUtls;
mport com.google.common.collect.Lsts;
mport com.google.common.collect.Sets;
/**
* Workflow execute task, used to execute a workflow nstance.
*/
publc class WorkflowExecuteRunnable mplements Callable<WorkflowSubmtStatue> {
prvate statc fnal Logger logger = LoggerFactory.getLogger(WorkflowExecuteRunnable.class);
prvate fnal ProcessServce processServce;
prvate ProcessInstanceDao processInstanceDao;
prvate fnal ProcessAlertManager processAlertManager;
prvate fnal NettyExecutorManager nettyExecutorManager;
prvate fnal ProcessInstance processInstance;
prvate ProcessDefnton processDefnton;
prvate DAG<Strng, TaskNode, TaskNodeRelaton> dag;
/**
* unque key of workflow
*/
prvate Strng key;
prvate WorkflowRunnableStatus workflowRunnableStatus = WorkflowRunnableStatus.CREATED;
/**
* submt falure nodes
*/
prvate boolean taskFaledSubmt = false;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
/**
* task nstance hash map, taskId as key
*/
prvate fnal Map<Integer, TaskInstance> taskInstanceMap = new ConcurrentHashMap<>();
/**
* runnng taskProcessor, taskCode as key, taskProcessor as value
* only on taskProcessor per taskCode
*/
prvate fnal Map<Long, ITaskProcessor> actveTaskProcessorMaps = new ConcurrentHashMap<>();
/**
* vald task map, taskCode as key, taskId as value
* n a DAG, only one taskInstance per taskCode s vald
*/
prvate fnal Map<Long, Integer> valdTaskMap = new ConcurrentHashMap<>();
/**
* error task map, taskCode as key, taskInstanceId as value
* n a DAG, only one taskInstance per taskCode s vald
*/
prvate fnal Map<Long, Integer> errorTaskMap = new ConcurrentHashMap<>();
/**
* complete task map, taskCode as key, taskInstanceId as value
* n a DAG, only one taskInstance per taskCode s vald
*/
prvate fnal Map<Long, Integer> completeTaskMap = new ConcurrentHashMap<>();
/**
* depend faled task set
*/
prvate fnal Set<Long> dependFaledTaskSet = Sets.newConcurrentHashSet();
/**
* forbdden task map, code as key
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
*/
prvate fnal Map<Long, TaskNode> forbddenTaskMap = new ConcurrentHashMap<>();
/**
* skp task map, code as key
*/
prvate fnal Map<Strng, TaskNode> skpTaskNodeMap = new ConcurrentHashMap<>();
/**
* complement date lst
*/
prvate Lst<Date> complementLstDate = Lsts.newLnkedLst();
/**
* state event queue
*/
prvate fnal ConcurrentLnkedQueue<StateEvent> stateEvents = new ConcurrentLnkedQueue<>();
/**
* The StandBy task lst, wll be executed, need to know, the taskInstance n ths queue may doesn't have d.
*/
prvate fnal PeerTaskInstancePrortyQueue readyToSubmtTaskQueue = new PeerTaskInstancePrortyQueue();
/**
* wat to retry taskInstance map, taskCode as key, taskInstance as value
* before retry, the taskInstance d s 0
*/
prvate fnal Map<Long, TaskInstance> watToRetryTaskInstanceMap = new ConcurrentHashMap<>();
prvate fnal StateWheelExecuteThread stateWheelExecuteThread;
prvate fnal CurngParamsServce curngParamsServce;
prvate fnal Strng masterAddress;
/**
* @param processInstance processInstance
* @param processServce processServce
* @param processInstanceDao processInstanceDao
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
* @param nettyExecutorManager nettyExecutorManager
* @param processAlertManager processAlertManager
* @param masterConfg masterConfg
* @param stateWheelExecuteThread stateWheelExecuteThread
*/
publc WorkflowExecuteRunnable(
@NonNull ProcessInstance processInstance,
@NonNull ProcessServce processServce,
@NonNull ProcessInstanceDao processInstanceDao,
@NonNull NettyExecutorManager nettyExecutorManager,
@NonNull ProcessAlertManager processAlertManager,
@NonNull MasterConfg masterConfg,
@NonNull StateWheelExecuteThread stateWheelExecuteThread,
@NonNull CurngParamsServce curngParamsServce) {
ths.processServce = processServce;
ths.processInstanceDao = processInstanceDao;
ths.processInstance = processInstance;
ths.nettyExecutorManager = nettyExecutorManager;
ths.processAlertManager = processAlertManager;
ths.stateWheelExecuteThread = stateWheelExecuteThread;
ths.curngParamsServce = curngParamsServce;
ths.masterAddress = NetUtls.getAddr(masterConfg.getLstenPort());
TaskMetrcs.regsterTaskPrepared(readyToSubmtTaskQueue::sze);
}
/**
* the process start nodes are submtted completely.
*/
publc boolean sStart() {
return WorkflowRunnableStatus.STARTED == workflowRunnableStatus;
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
/**
* handle event
*/
publc vod handleEvents() {
f (!sStart()) {
logger.nfo(
"The workflow nstance s not started, wll not handle ts state event, current state event sze: {}",
stateEvents);
return;
}
StateEvent stateEvent = null;
whle (!ths.stateEvents.sEmpty()) {
try {
stateEvent = ths.stateEvents.peek();
LoggerUtls.setWorkflowAndTaskInstanceIDMDC(stateEvent.getProcessInstanceId(),
stateEvent.getTaskInstanceId());
checkProcessInstance(stateEvent);
StateEventHandler stateEventHandler =
StateEventHandlerManager.getStateEventHandler(stateEvent.getType())
.orElseThrow(() -> new StateEventHandleError(
"Cannot fnd handler for the gven state event"));
logger.nfo("Begn to handle state event, {}", stateEvent);
f (stateEventHandler.handleStateEvent(ths, stateEvent)) {
ths.stateEvents.remove(stateEvent);
}
} catch (StateEventHandleError stateEventHandleError) {
logger.error("State event handle error, wll remove ths event: {}", stateEvent, stateEventHandleError);
ths.stateEvents.remove(stateEvent);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
ThreadUtls.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (StateEventHandleExcepton stateEventHandleExcepton) {
logger.error("State event handle error, wll retry ths event: {}",
stateEvent,
stateEventHandleExcepton);
ThreadUtls.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (Excepton e) {
logger.error("State event handle error, get a unknown excepton, wll retry ths event: {}",
stateEvent,
e);
ThreadUtls.sleep(Constants.SLEEP_TIME_MILLIS);
} fnally {
LoggerUtls.removeWorkflowAndTaskInstanceIdMDC();
}
}
}
publc Strng getKey() {
f (StrngUtls.sNotEmpty(key) || ths.processDefnton == null) {
return key;
}
key = Strng.format("%d_%d_%d",
ths.processDefnton.getCode(),
ths.processDefnton.getVerson(),
ths.processInstance.getId());
return key;
}
publc boolean addStateEvent(StateEvent stateEvent) {
f (processInstance.getId() != stateEvent.getProcessInstanceId()) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
logger.nfo("state event would be abounded :{}", stateEvent);
return false;
}
ths.stateEvents.add(stateEvent);
return true;
}
publc nt eventSze() {
return ths.stateEvents.sze();
}
publc ProcessInstance getProcessInstance() {
return ths.processInstance;
}
publc boolean checkForceStartAndWakeUp(StateEvent stateEvent) {
TaskGroupQueue taskGroupQueue = ths.processServce.loadTaskGroupQueue(stateEvent.getTaskInstanceId());
f (taskGroupQueue.getForceStart() == Flag.YES.getCode()) {
TaskInstance taskInstance = ths.processServce.fndTaskInstanceById(stateEvent.getTaskInstanceId());
ITaskProcessor taskProcessor = actveTaskProcessorMaps.get(taskInstance.getTaskCode());
taskProcessor.acton(TaskActon.DISPATCH);
ths.processServce.updateTaskGroupQueueStatus(taskGroupQueue.getTaskId(),
TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode());
return true;
}
f (taskGroupQueue.getInQueue() == Flag.YES.getCode()) {
boolean acqureTaskGroup = processServce.robTaskGroupResource(taskGroupQueue);
f (acqureTaskGroup) {
TaskInstance taskInstance = ths.processServce.fndTaskInstanceById(stateEvent.getTaskInstanceId());
ITaskProcessor taskProcessor = actveTaskProcessorMaps.get(taskInstance.getTaskCode());
taskProcessor.acton(TaskActon.DISPATCH);
return true;
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
}
return false;
}
publc vod processTmeout() {
ProjectUser projectUser = processServce.queryProjectWthUserByProcessInstanceId(processInstance.getId());
ths.processAlertManager.sendProcessTmeoutAlert(ths.processInstance, projectUser);
}
publc vod taskTmeout(TaskInstance taskInstance) {
ProjectUser projectUser = processServce.queryProjectWthUserByProcessInstanceId(processInstance.getId());
processAlertManager.sendTaskTmeoutAlert(processInstance, taskInstance, projectUser);
}
publc vod taskFnshed(TaskInstance taskInstance) throws StateEventHandleExcepton {
logger.nfo("TaskInstance fnshed task code:{} state:{}", taskInstance.getTaskCode(), taskInstance.getState());
try {
actveTaskProcessorMaps.remove(taskInstance.getTaskCode());
stateWheelExecuteThread.removeTask4TmeoutCheck(processInstance, taskInstance);
stateWheelExecuteThread.removeTask4RetryCheck(processInstance, taskInstance);
stateWheelExecuteThread.removeTask4StateCheck(processInstance, taskInstance);
f (taskInstance.getState().sSuccess()) {
completeTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId());
processInstance.setVarPool(taskInstance.getVarPool());
processInstanceDao.upsertProcessInstance(processInstance);
f (!processInstance.sBlocked()) {
submtPostNode(Long.toStrng(taskInstance.getTaskCode()));
}
} else f (taskInstance.taskCanRetry() && !processInstance.getState().sReadyStop()) {
logger.nfo("Retry taskInstance taskInstance state: {}", taskInstance.getState());
retryTaskInstance(taskInstance);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
} else f (taskInstance.getState().sFalure()) {
completeTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId());
f (processInstance.getFalureStrategy() == FalureStrategy.CONTINUE && DagHelper.haveAllNodeAfterNode(
Long.toStrng(taskInstance.getTaskCode()),
dag)) {
submtPostNode(Long.toStrng(taskInstance.getTaskCode()));
} else {
errorTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId());
f (processInstance.getFalureStrategy() == FalureStrategy.END) {
kllAllTasks();
}
}
} else f (taskInstance.getState().sFnshed()) {
completeTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId());
}
logger.nfo("TaskInstance fnshed wll try to update the workflow nstance state, task code:{} state:{}",
taskInstance.getTaskCode(),
taskInstance.getState());
ths.updateProcessInstanceState();
} catch (Excepton ex) {
logger.error("Task fnsh faled, get a excepton, wll remove ths taskInstance from completeTaskMap", ex);
completeTaskMap.remove(taskInstance.getTaskCode());
throw ex;
}
}
/**
* release task group
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
*
* @param taskInstance
*/
publc vod releaseTaskGroup(TaskInstance taskInstance) {
logger.nfo("Release task group");
f (taskInstance.getTaskGroupId() > 0) {
TaskInstance nextTaskInstance = ths.processServce.releaseTaskGroup(taskInstance);
f (nextTaskInstance != null) {
f (nextTaskInstance.getProcessInstanceId() == taskInstance.getProcessInstanceId()) {
TaskStateEvent nextEvent = TaskStateEvent.bulder()
.processInstanceId(processInstance.getId())
.taskInstanceId(nextTaskInstance.getId())
.type(StateEventType.WAIT_TASK_GROUP)
.buld();
ths.stateEvents.add(nextEvent);
} else {
ProcessInstance processInstance =
ths.processServce.fndProcessInstanceById(nextTaskInstance.getProcessInstanceId());
ths.processServce.sendStartTask2Master(processInstance, nextTaskInstance.getId(),
org.apache.dolphnscheduler.remote.command.CommandType.TASK_WAKEUP_EVENT_REQUEST);
}
}
}
}
/**
* crate new task nstance to retry, dfferent objects from the orgnal
*
* @param taskInstance
*/
prvate vod retryTaskInstance(TaskInstance taskInstance) throws StateEventHandleExcepton {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (!taskInstance.taskCanRetry()) {
return;
}
TaskInstance newTaskInstance = cloneRetryTaskInstance(taskInstance);
f (newTaskInstance == null) {
logger.error("retry fal, new taskInstance s null, task code:{}, task d:{}",
taskInstance.getTaskCode(),
taskInstance.getId());
return;
}
watToRetryTaskInstanceMap.put(newTaskInstance.getTaskCode(), newTaskInstance);
f (!taskInstance.retryTaskIntervalOverTme()) {
logger.nfo(
"falure task wll be submtted: process d: {}, task nstance code: {} state:{} retry tmes:{} / {}, nterval:{}",
processInstance.getId(), newTaskInstance.getTaskCode(),
newTaskInstance.getState(), newTaskInstance.getRetryTmes(), newTaskInstance.getMaxRetryTmes(),
newTaskInstance.getRetryInterval());
stateWheelExecuteThread.addTask4TmeoutCheck(processInstance, newTaskInstance);
stateWheelExecuteThread.addTask4RetryCheck(processInstance, newTaskInstance);
} else {
addTaskToStandByLst(newTaskInstance);
submtStandByTask();
watToRetryTaskInstanceMap.remove(newTaskInstance.getTaskCode());
}
}
/**
* update process nstance
*/
publc vod refreshProcessInstance(nt processInstanceId) {
logger.nfo("process nstance update: {}", processInstanceId);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
ProcessInstance newProcessInstance = processServce.fndProcessInstanceById(processInstanceId);
BeanUtls.copyPropertes(newProcessInstance, processInstance);
processDefnton = processServce.fndProcessDefnton(processInstance.getProcessDefntonCode(),
processInstance.getProcessDefntonVerson());
processInstance.setProcessDefnton(processDefnton);
}
/**
* update task nstance
*/
publc vod refreshTaskInstance(nt taskInstanceId) {
logger.nfo("task nstance update: {} ", taskInstanceId);
TaskInstance taskInstance = processServce.fndTaskInstanceById(taskInstanceId);
f (taskInstance == null) {
logger.error("can not fnd task nstance, d:{}", taskInstanceId);
return;
}
processServce.packageTaskInstance(taskInstance, processInstance);
taskInstanceMap.put(taskInstance.getId(), taskInstance);
valdTaskMap.remove(taskInstance.getTaskCode());
f (Flag.YES == taskInstance.getFlag()) {
valdTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId());
}
}
/**
* check process nstance by state event
*/
publc vod checkProcessInstance(StateEvent stateEvent) throws StateEventHandleError {
f (ths.processInstance.getId() != stateEvent.getProcessInstanceId()) {
throw new StateEventHandleError("The event doesn't contans process nstance d");
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
}
}
/**
* check f task nstance exst by state event
*/
publc vod checkTaskInstanceByStateEvent(TaskStateEvent stateEvent) throws StateEventHandleError {
f (stateEvent.getTaskInstanceId() == 0) {
throw new StateEventHandleError("The taskInstanceId s 0");
}
f (!taskInstanceMap.contansKey(stateEvent.getTaskInstanceId())) {
throw new StateEventHandleError("Cannot fnd the taskInstance from taskInstanceMap");
}
}
/**
* check f task nstance exst by d
*/
publc boolean checkTaskInstanceById(nt taskInstanceId) {
f (taskInstanceMap.sEmpty()) {
return false;
}
return taskInstanceMap.contansKey(taskInstanceId);
}
/**
* get task nstance from memory
*/
publc Optonal<TaskInstance> getTaskInstance(nt taskInstanceId) {
f (taskInstanceMap.contansKey(taskInstanceId)) {
return Optonal.ofNullable(taskInstanceMap.get(taskInstanceId));
}
return Optonal.empty();
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
}
publc Optonal<TaskInstance> getTaskInstance(long taskCode) {
f (taskInstanceMap.sEmpty()) {
return Optonal.empty();
}
for (TaskInstance taskInstance : taskInstanceMap.values()) {
f (taskInstance.getTaskCode() == taskCode) {
return Optonal.of(taskInstance);
}
}
return Optonal.empty();
}
publc Optonal<TaskInstance> getActveTaskInstanceByTaskCode(long taskCode) {
Integer taskInstanceId = valdTaskMap.get(taskCode);
f (taskInstanceId != null) {
return Optonal.ofNullable(taskInstanceMap.get(taskInstanceId));
}
return Optonal.empty();
}
publc Optonal<TaskInstance> getRetryTaskInstanceByTaskCode(long taskCode) {
f (watToRetryTaskInstanceMap.contansKey(taskCode)) {
return Optonal.ofNullable(watToRetryTaskInstanceMap.get(taskCode));
}
return Optonal.empty();
}
publc vod processBlock() {
ProjectUser projectUser = processServce.queryProjectWthUserByProcessInstanceId(processInstance.getId());
processAlertManager.sendProcessBlockngAlert(processInstance, projectUser);
logger.nfo("processInstance {} block alert send successful!", processInstance.getId());
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
publc boolean processComplementData() {
f (!needComplementProcess()) {
return false;
}
f (processInstance.getState().sReadyStop() || !processInstance.getState().sFnshed()) {
return false;
}
Date scheduleDate = processInstance.getScheduleTme();
f (scheduleDate == null) {
scheduleDate = complementLstDate.get(0);
} else f (processInstance.getState().sFnshed()) {
endProcess();
f (complementLstDate.sEmpty()) {
logger.nfo("process complement end. process d:{}", processInstance.getId());
return true;
}
nt ndex = complementLstDate.ndexOf(scheduleDate);
f (ndex >= complementLstDate.sze() - 1 || !processInstance.getState().sSuccess()) {
logger.nfo("process complement end. process d:{}", processInstance.getId());
return true;
}
logger.nfo("process complement contnue. process d:{}, schedule tme:{} complementLstDate:{}",
processInstance.getId(), processInstance.getScheduleTme(), complementLstDate);
scheduleDate = complementLstDate.get(ndex + 1);
}
nt create = ths.createComplementDataCommand(scheduleDate);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (create > 0) {
logger.nfo("create complement data command successfully.");
}
return true;
}
prvate nt createComplementDataCommand(Date scheduleDate) {
Command command = new Command();
command.setScheduleTme(scheduleDate);
command.setCommandType(CommandType.COMPLEMENT_DATA);
command.setProcessDefntonCode(processInstance.getProcessDefntonCode());
Map<Strng, Strng> cmdParam = JSONUtls.toMap(processInstance.getCommandParam());
f (cmdParam.contansKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) {
cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING);
}
f (cmdParam.contansKey(CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST)) {
cmdParam.replace(CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST,
cmdParam.get(CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST)
.substrng(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST).ndexOf(COMMA) + 1));
}
f (cmdParam.contansKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) {
cmdParam.replace(CMDPARAM_COMPLEMENT_DATA_START_DATE,
DateUtls.format(scheduleDate, YYYY_MM_DD_HH_MM_SS, null));
}
command.setCommandParam(JSONUtls.toJsonStrng(cmdParam));
command.setTaskDependType(processInstance.getTaskDependType());
command.setFalureStrategy(processInstance.getFalureStrategy());
command.setWarnngType(processInstance.getWarnngType());
command.setWarnngGroupId(processInstance.getWarnngGroupId());
command.setStartTme(new Date());
command.setExecutorId(processInstance.getExecutorId());
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
command.setUpdateTme(new Date());
command.setProcessInstanceProrty(processInstance.getProcessInstanceProrty());
command.setWorkerGroup(processInstance.getWorkerGroup());
command.setEnvronmentCode(processInstance.getEnvronmentCode());
command.setDryRun(processInstance.getDryRun());
command.setProcessInstanceId(0);
command.setProcessDefntonVerson(processInstance.getProcessDefntonVerson());
return processServce.createCommand(command);
}
prvate boolean needComplementProcess() {
f (processInstance.sComplementData() && Flag.NO == processInstance.getIsSubProcess()) {
return true;
}
return false;
}
/**
* ProcessInstance start entrypont.
*/
@Overrde
publc WorkflowSubmtStatue call() {
f (sStart()) {
logger.warn("[WorkflowInstance-{}] The workflow has already been started", processInstance.getId());
return WorkflowSubmtStatue.DUPLICATED_SUBMITTED;
}
try {
LoggerUtls.setWorkflowInstanceIdMDC(processInstance.getId());
f (workflowRunnableStatus == WorkflowRunnableStatus.CREATED) {
buldFlowDag();
workflowRunnableStatus = WorkflowRunnableStatus.INITIALIZE_DAG;
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
logger.nfo("workflowStatue changed to :{}", workflowRunnableStatus);
}
f (workflowRunnableStatus == WorkflowRunnableStatus.INITIALIZE_DAG) {
ntTaskQueue();
workflowRunnableStatus = WorkflowRunnableStatus.INITIALIZE_QUEUE;
logger.nfo("workflowStatue changed to :{}", workflowRunnableStatus);
}
f (workflowRunnableStatus == WorkflowRunnableStatus.INITIALIZE_QUEUE) {
submtPostNode(null);
workflowRunnableStatus = WorkflowRunnableStatus.STARTED;
logger.nfo("workflowStatue changed to :{}", workflowRunnableStatus);
}
return WorkflowSubmtStatue.SUCCESS;
} catch (Excepton e) {
logger.error("Start workflow error", e);
return WorkflowSubmtStatue.FAILED;
} fnally {
LoggerUtls.removeWorkflowInstanceIdMDC();
}
}
/**
* process end handle
*/
publc vod endProcess() {
ths.stateEvents.clear();
f (processDefnton.getExecutonType().typeIsSeralWat() || processDefnton.getExecutonType()
.typeIsSeralProrty()) {
checkSeralProcess(processDefnton);
}
ProjectUser projectUser = processServce.queryProjectWthUserByProcessInstanceId(processInstance.getId());
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (processAlertManager.sNeedToSendWarnng(processInstance)) {
processAlertManager.sendAlertProcessInstance(processInstance, getValdTaskLst(), projectUser);
}
f (processInstance.getState().sSuccess()) {
processAlertManager.closeAlert(processInstance);
}
f (checkTaskQueue()) {
processServce.releaseAllTaskGroup(processInstance.getId());
}
}
publc vod checkSeralProcess(ProcessDefnton processDefnton) {
nt nextInstanceId = processInstance.getNextProcessInstanceId();
f (nextInstanceId == 0) {
ProcessInstance nextProcessInstance =
ths.processServce.loadNextProcess4Seral(processInstance.getProcessDefnton().getCode(),
WorkflowExecutonStatus.SERIAL_WAIT.getCode(), processInstance.getId());
f (nextProcessInstance == null) {
return;
}
ProcessInstance nextReadyStopProcessInstance =
ths.processServce.loadNextProcess4Seral(processInstance.getProcessDefnton().getCode(),
WorkflowExecutonStatus.READY_STOP.getCode(), processInstance.getId());
f (processDefnton.getExecutonType().typeIsSeralProrty() && nextReadyStopProcessInstance != null) {
return;
}
nextInstanceId = nextProcessInstance.getId();
}
ProcessInstance nextProcessInstance = ths.processServce.fndProcessInstanceById(nextInstanceId);
f (nextProcessInstance.getState().sFnshed() || nextProcessInstance.getState().sRunnng()) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
return;
}
Map<Strng, Object> cmdParam = new HashMap<>();
cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, nextInstanceId);
Command command = new Command();
command.setCommandType(CommandType.RECOVER_SERIAL_WAIT);
command.setProcessInstanceId(nextProcessInstance.getId());
command.setProcessDefntonCode(processDefnton.getCode());
command.setProcessDefntonVerson(processDefnton.getVerson());
command.setCommandParam(JSONUtls.toJsonStrng(cmdParam));
processServce.createCommand(command);
}
/**
* Generate process dag
*
* @throws Excepton excepton
*/
prvate vod buldFlowDag() throws Excepton {
processDefnton = processServce.fndProcessDefnton(processInstance.getProcessDefntonCode(),
processInstance.getProcessDefntonVerson());
processInstance.setProcessDefnton(processDefnton);
Lst<TaskInstance> recoverNodeLst = getRecoverTaskInstanceLst(processInstance.getCommandParam());
Lst<ProcessTaskRelaton> processTaskRelatons =
processServce.fndRelatonByCode(processDefnton.getCode(), processDefnton.getVerson());
Lst<TaskDefntonLog> taskDefntonLogs =
processServce.getTaskDefneLogLstByRelaton(processTaskRelatons);
Lst<TaskNode> taskNodeLst = processServce.transformTask(processTaskRelatons, taskDefntonLogs);
forbddenTaskMap.clear();
taskNodeLst.forEach(taskNode -> {
f (taskNode.sForbdden()) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
forbddenTaskMap.put(taskNode.getCode(), taskNode);
}
});
Lst<Strng> recoveryNodeCodeLst = getRecoveryNodeCodeLst(recoverNodeLst);
Lst<Strng> startNodeNameLst = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(taskNodeLst, startNodeNameLst, recoveryNodeCodeLst,
processInstance.getTaskDependType());
f (processDag == null) {
logger.error("processDag s null");
return;
}
dag = DagHelper.buldDagGraph(processDag);
logger.nfo("Buld dag success, dag: {}", dag);
}
/**
* nt task queue
*/
prvate vod ntTaskQueue() throws StateEventHandleExcepton, CronParseExcepton {
taskFaledSubmt = false;
actveTaskProcessorMaps.clear();
dependFaledTaskSet.clear();
completeTaskMap.clear();
errorTaskMap.clear();
f (!sNewProcessInstance()) {
logger.nfo("The workflowInstance s not a newly runnng nstance, runtmes: {}, recover flag: {}",
processInstance.getRunTmes(),
processInstance.getRecovery());
Lst<TaskInstance> valdTaskInstanceLst =
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
processServce.fndValdTaskLstByProcessId(processInstance.getId());
for (TaskInstance task : valdTaskInstanceLst) {
try {
LoggerUtls.setWorkflowAndTaskInstanceIDMDC(task.getProcessInstanceId(), task.getId());
logger.nfo(
"Check the taskInstance from a exst workflowInstance, exstTaskInstanceCode: {}, taskInstanceStatus: {}",
task.getTaskCode(),
task.getState());
f (valdTaskMap.contansKey(task.getTaskCode())) {
nt oldTaskInstanceId = valdTaskMap.get(task.getTaskCode());
TaskInstance oldTaskInstance = taskInstanceMap.get(oldTaskInstanceId);
f (!oldTaskInstance.getState().sFnshed() && task.getState().sFnshed()) {
task.setFlag(Flag.NO);
processServce.updateTaskInstance(task);
contnue;
}
logger.warn("have same taskCode taskInstance when nt task queue, taskCode:{}",
task.getTaskCode());
}
valdTaskMap.put(task.getTaskCode(), task.getId());
taskInstanceMap.put(task.getId(), task);
f (task.sTaskComplete()) {
completeTaskMap.put(task.getTaskCode(), task.getId());
contnue;
}
f (task.sCondtonsTask() || DagHelper.haveCondtonsAfterNode(Long.toStrng(task.getTaskCode()),
dag)) {
contnue;
}
f (task.taskCanRetry()) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (task.getState().sNeedFaultTolerance()) {
task.setFlag(Flag.NO);
processServce.updateTaskInstance(task);
TaskInstance tolerantTaskInstance = cloneTolerantTaskInstance(task);
addTaskToStandByLst(tolerantTaskInstance);
} else {
retryTaskInstance(task);
}
contnue;
}
f (task.getState().sFalure()) {
errorTaskMap.put(task.getTaskCode(), task.getId());
}
} fnally {
LoggerUtls.removeWorkflowAndTaskInstanceIdMDC();
}
}
} else {
logger.nfo("The current workflowInstance s a newly runnng workflowInstance");
}
f (processInstance.sComplementData() && complementLstDate.sEmpty()) {
Map<Strng, Strng> cmdParam = JSONUtls.toMap(processInstance.getCommandParam());
f (cmdParam != null) {
setGlobalParamIfCommanded(processDefnton, cmdParam);
Date start = null;
Date end = null;
f (cmdParam.contansKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)
&& cmdParam.contansKey(CMDPARAM_COMPLEMENT_DATA_END_DATE)) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
start = DateUtls.strngToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
end = DateUtls.strngToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
}
f (complementLstDate.sEmpty() && needComplementProcess()) {
f (start != null && end != null) {
Lst<Schedule> schedules = processServce.queryReleaseSchedulerLstByProcessDefntonCode(
processInstance.getProcessDefntonCode());
complementLstDate = CronUtls.getSelfFreDateLst(start, end, schedules);
}
f (cmdParam.contansKey(CMDPARAM_COMPLEMENT_DATA_SCHEDULE_DATE_LIST)) {
complementLstDate = CronUtls.getSelfScheduleDateLst(cmdParam);
}
logger.nfo(" process defnton code:{} complement data: {}",
processInstance.getProcessDefntonCode(), complementLstDate);
f (!complementLstDate.sEmpty() && Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTme(complementLstDate.get(0));
Strng globalParams = curngParamsServce.curngGlobalParams(processInstance.getId(),
processDefnton.getGlobalParamMap(),
processDefnton.getGlobalParamLst(),
CommandType.COMPLEMENT_DATA,
processInstance.getScheduleTme(),
cmdParam.get(Constants.SCHEDULE_TIMEZONE));
processInstance.setGlobalParams(globalParams);
processInstanceDao.updateProcessInstance(processInstance);
}
}
}
}
logger.nfo("Intalze task queue, dependFaledTaskSet: {}, completeTaskMap: {}, errorTaskMap: {}",
dependFaledTaskSet,
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
completeTaskMap,
errorTaskMap);
}
/**
* submt task to execute
*
* @param taskInstance task nstance
* @return TaskInstance
*/
prvate Optonal<TaskInstance> submtTaskExec(TaskInstance taskInstance) {
try {
processServce.packageTaskInstance(taskInstance, processInstance);
ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType());
taskProcessor.nt(taskInstance, processInstance);
f (taskInstance.getState().sRunnng()
&& taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) {
notfyProcessHostUpdate(taskInstance);
}
boolean submt = taskProcessor.acton(TaskActon.SUBMIT);
f (!submt) {
logger.error("process d:{} name:{} submt standby task d:{} name:{} faled!",
processInstance.getId(),
processInstance.getName(),
taskInstance.getId(),
taskInstance.getName());
return Optonal.empty();
}
f (valdTaskMap.contansKey(taskInstance.getTaskCode())) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
nt oldTaskInstanceId = valdTaskMap.get(taskInstance.getTaskCode());
f (taskInstance.getId() != oldTaskInstanceId) {
TaskInstance oldTaskInstance = taskInstanceMap.get(oldTaskInstanceId);
oldTaskInstance.setFlag(Flag.NO);
processServce.updateTaskInstance(oldTaskInstance);
valdTaskMap.remove(taskInstance.getTaskCode());
actveTaskProcessorMaps.remove(taskInstance.getTaskCode());
}
}
valdTaskMap.put(taskInstance.getTaskCode(), taskInstance.getId());
taskInstanceMap.put(taskInstance.getId(), taskInstance);
actveTaskProcessorMaps.put(taskInstance.getTaskCode(), taskProcessor);
nt taskGroupId = taskInstance.getTaskGroupId();
f (taskGroupId > 0) {
boolean acqureTaskGroup = processServce.acqureTaskGroup(taskInstance.getId(),
taskInstance.getName(),
taskGroupId,
taskInstance.getProcessInstanceId(),
taskInstance.getTaskGroupProrty());
f (!acqureTaskGroup) {
logger.nfo("submt task name :{}, but the frst tme to try to acqure task group faled",
taskInstance.getName());
return Optonal.of(taskInstance);
}
}
boolean dspatchSuccess = taskProcessor.acton(TaskActon.DISPATCH);
f (!dspatchSuccess) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
logger.error("process d:{} name:{} dspatch standby task d:{} name:{} faled!",
processInstance.getId(),
processInstance.getName(),
taskInstance.getId(),
taskInstance.getName());
return Optonal.empty();
}
taskProcessor.acton(TaskActon.RUN);
stateWheelExecuteThread.addTask4TmeoutCheck(processInstance, taskInstance);
stateWheelExecuteThread.addTask4StateCheck(processInstance, taskInstance);
f (taskProcessor.taskInstance().getState().sFnshed()) {
f (processInstance.sBlocked()) {
TaskStateEvent processBlockEvent = TaskStateEvent.bulder()
.processInstanceId(processInstance.getId())
.taskInstanceId(taskInstance.getId())
.status(taskProcessor.taskInstance().getState())
.type(StateEventType.PROCESS_BLOCKED)
.buld();
ths.stateEvents.add(processBlockEvent);
}
TaskStateEvent taskStateChangeEvent = TaskStateEvent.bulder()
.processInstanceId(processInstance.getId())
.taskInstanceId(taskInstance.getId())
.status(taskProcessor.taskInstance().getState())
.type(StateEventType.TASK_STATE_CHANGE)
.buld();
ths.stateEvents.add(taskStateChangeEvent);
}
return Optonal.of(taskInstance);
} catch (Excepton e) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
logger.error("submt standby task error, taskCode: {}, taskInstanceId: {}",
taskInstance.getTaskCode(),
taskInstance.getId(),
e);
return Optonal.empty();
}
}
prvate vod notfyProcessHostUpdate(TaskInstance taskInstance) {
f (StrngUtls.sEmpty(taskInstance.getHost())) {
return;
}
try {
HostUpdateCommand hostUpdateCommand = new HostUpdateCommand();
hostUpdateCommand.setProcessHost(masterAddress);
hostUpdateCommand.setTaskInstanceId(taskInstance.getId());
Host host = new Host(taskInstance.getHost());
nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command());
} catch (Excepton e) {
logger.error("notfy process host update", e);
}
}
/**
* fnd task nstance n db.
* n case submt more than one same name task n the same tme.
*
* @param taskCode task code
* @param taskVerson task verson
* @return TaskInstance
*/
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
prvate TaskInstance fndTaskIfExsts(Long taskCode, nt taskVerson) {
Lst<TaskInstance> valdTaskInstanceLst = getValdTaskLst();
for (TaskInstance taskInstance : valdTaskInstanceLst) {
f (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefntonVerson() == taskVerson) {
return taskInstance;
}
}
return null;
}
/**
* encapsulaton task, ths method wll only create a new task nstance, the return task nstance wll not contan d.
*
* @param processInstance process nstance
* @param taskNode taskNode
* @return TaskInstance
*/
prvate TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) {
TaskInstance taskInstance = fndTaskIfExsts(taskNode.getCode(), taskNode.getVerson());
f (taskInstance != null) {
return taskInstance;
}
return newTaskInstance(processInstance, taskNode);
}
/**
* clone a new taskInstance for retry and reset some logc felds
*
* @return
*/
publc TaskInstance cloneRetryTaskInstance(TaskInstance taskInstance) {
TaskNode taskNode = dag.getNode(Long.toStrng(taskInstance.getTaskCode()));
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (taskNode == null) {
logger.error("taskNode s null, code:{}", taskInstance.getTaskCode());
return null;
}
TaskInstance newTaskInstance = newTaskInstance(processInstance, taskNode);
newTaskInstance.setTaskDefne(taskInstance.getTaskDefne());
newTaskInstance.setProcessDefne(taskInstance.getProcessDefne());
newTaskInstance.setProcessInstance(processInstance);
newTaskInstance.setRetryTmes(taskInstance.getRetryTmes() + 1);
newTaskInstance.setState(taskInstance.getState());
newTaskInstance.setEndTme(taskInstance.getEndTme());
f (taskInstance.getState() == TaskExecutonStatus.NEED_FAULT_TOLERANCE) {
newTaskInstance.setAppLnk(taskInstance.getAppLnk());
}
return newTaskInstance;
}
/**
* clone a new taskInstance for tolerant and reset some logc felds
*
* @return
*/
publc TaskInstance cloneTolerantTaskInstance(TaskInstance taskInstance) {
TaskNode taskNode = dag.getNode(Long.toStrng(taskInstance.getTaskCode()));
f (taskNode == null) {
logger.error("taskNode s null, code:{}", taskInstance.getTaskCode());
return null;
}
TaskInstance newTaskInstance = newTaskInstance(processInstance, taskNode);
newTaskInstance.setTaskDefne(taskInstance.getTaskDefne());
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
newTaskInstance.setProcessDefne(taskInstance.getProcessDefne());
newTaskInstance.setProcessInstance(processInstance);
newTaskInstance.setRetryTmes(taskInstance.getRetryTmes());
newTaskInstance.setState(taskInstance.getState());
newTaskInstance.setAppLnk(taskInstance.getAppLnk());
return newTaskInstance;
}
/**
* new a taskInstance
*
* @param processInstance
* @param taskNode
* @return
*/
publc TaskInstance newTaskInstance(ProcessInstance processInstance, TaskNode taskNode) {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setTaskCode(taskNode.getCode());
taskInstance.setTaskDefntonVerson(taskNode.getVerson());
taskInstance.setName(taskNode.getName());
taskInstance.setState(TaskExecutonStatus.SUBMITTED_SUCCESS);
taskInstance.setProcessInstanceId(processInstance.getId());
taskInstance.setTaskType(taskNode.getType().toUpperCase());
taskInstance.setAlertFlag(Flag.NO);
taskInstance.setStartTme(null);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
taskInstance.setFlag(Flag.YES);
taskInstance.setDryRun(processInstance.getDryRun());
taskInstance.setRetryTmes(0);
taskInstance.setMaxRetryTmes(taskNode.getMaxRetryTmes());
taskInstance.setRetryInterval(taskNode.getRetryInterval());
taskInstance.setTaskParams(taskNode.getTaskParams());
taskInstance.setTaskGroupId(taskNode.getTaskGroupId());
taskInstance.setTaskGroupProrty(taskNode.getTaskGroupProrty());
taskInstance.setCpuQuota(taskNode.getCpuQuota());
taskInstance.setMemoryMax(taskNode.getMemoryMax());
f (taskNode.getTaskInstanceProrty() == null) {
taskInstance.setTaskInstanceProrty(Prorty.MEDIUM);
} else {
taskInstance.setTaskInstanceProrty(taskNode.getTaskInstanceProrty());
}
Strng processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StrngUtls.sBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
Strng taskWorkerGroup =
StrngUtls.sBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
Long processEnvronmentCode =
Objects.sNull(processInstance.getEnvronmentCode()) ? -1 : processInstance.getEnvronmentCode();
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
Long taskEnvronmentCode =
Objects.sNull(taskNode.getEnvronmentCode()) ? processEnvronmentCode : taskNode.getEnvronmentCode();
f (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
taskInstance.setEnvronmentCode(processEnvronmentCode);
} else {
taskInstance.setWorkerGroup(taskWorkerGroup);
taskInstance.setEnvronmentCode(taskEnvronmentCode);
}
f (!taskInstance.getEnvronmentCode().equals(-1L)) {
Envronment envronment = processServce.fndEnvronmentByCode(taskInstance.getEnvronmentCode());
f (Objects.nonNull(envronment) && StrngUtls.sNotEmpty(envronment.getConfg())) {
taskInstance.setEnvronmentConfg(envronment.getConfg());
}
}
taskInstance.setDelayTme(taskNode.getDelayTme());
taskInstance.setTaskExecuteType(taskNode.getTaskExecuteType());
return taskInstance;
}
publc vod getPreVarPool(TaskInstance taskInstance, Set<Strng> preTask) {
Map<Strng, Property> allProperty = new HashMap<>();
Map<Strng, TaskInstance> allTaskInstance = new HashMap<>();
f (CollectonUtls.sNotEmpty(preTask)) {
for (Strng preTaskCode : preTask) {
Integer taskId = completeTaskMap.get(Long.parseLong(preTaskCode));
f (taskId == null) {
contnue;
}
TaskInstance preTaskInstance = taskInstanceMap.get(taskId);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (preTaskInstance == null) {
contnue;
}
Strng preVarPool = preTaskInstance.getVarPool();
f (StrngUtls.sNotEmpty(preVarPool)) {
Lst<Property> propertes = JSONUtls.toLst(preVarPool, Property.class);
for (Property nfo : propertes) {
setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, nfo);
}
}
}
f (allProperty.sze() > 0) {
taskInstance.setVarPool(JSONUtls.toJsonStrng(allProperty.values()));
}
} else {
f (StrngUtls.sNotEmpty(processInstance.getVarPool())) {
taskInstance.setVarPool(processInstance.getVarPool());
}
}
}
publc Collecton<TaskInstance> getAllTaskInstances() {
return taskInstanceMap.values();
}
prvate vod setVarPoolValue(Map<Strng, Property> allProperty, Map<Strng, TaskInstance> allTaskInstance,
TaskInstance preTaskInstance, Property thsProperty) {
thsProperty.setDrect(Drect.IN);
Strng proName = thsProperty.getProp();
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (allProperty.contansKey(proName)) {
Property otherPro = allProperty.get(proName);
f (StrngUtls.sEmpty(thsProperty.getValue())) {
allProperty.put(proName, otherPro);
} else f (StrngUtls.sNotEmpty(otherPro.getValue())) {
TaskInstance otherTask = allTaskInstance.get(proName);
f (otherTask.getEndTme().getTme() > preTaskInstance.getEndTme().getTme()) {
allProperty.put(proName, thsProperty);
allTaskInstance.put(proName, preTaskInstance);
} else {
allProperty.put(proName, otherPro);
}
} else {
allProperty.put(proName, thsProperty);
allTaskInstance.put(proName, preTaskInstance);
}
} else {
allProperty.put(proName, thsProperty);
allTaskInstance.put(proName, preTaskInstance);
}
}
/**
* get complete task nstance map, taskCode as key
*/
prvate Map<Strng, TaskInstance> getCompleteTaskInstanceMap() {
Map<Strng, TaskInstance> completeTaskInstanceMap = new HashMap<>();
for (Map.Entry<Long, Integer> entry : completeTaskMap.entrySet()) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
Long taskConde = entry.getKey();
Integer taskInstanceId = entry.getValue();
TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId);
f (taskInstance == null) {
logger.warn("Cannot fnd the taskInstance from taskInstanceMap, taskInstanceId: {}, taskConde: {}",
taskInstanceId,
taskConde);
contnue;
}
completeTaskInstanceMap.put(Long.toStrng(taskInstance.getTaskCode()), taskInstance);
}
return completeTaskInstanceMap;
}
/**
* get vald task lst
*/
prvate Lst<TaskInstance> getValdTaskLst() {
Lst<TaskInstance> valdTaskInstanceLst = new ArrayLst<>();
for (Integer taskInstanceId : valdTaskMap.values()) {
valdTaskInstanceLst.add(taskInstanceMap.get(taskInstanceId));
}
return valdTaskInstanceLst;
}
prvate vod submtPostNode(Strng parentNodeCode) throws StateEventHandleExcepton {
Set<Strng> submtTaskNodeLst =
DagHelper.parsePostNodes(parentNodeCode, skpTaskNodeMap, dag, getCompleteTaskInstanceMap());
Lst<TaskInstance> taskInstances = new ArrayLst<>();
for (Strng taskNode : submtTaskNodeLst) {
TaskNode taskNodeObject = dag.getNode(taskNode);
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
Optonal<TaskInstance> exstTaskInstanceOptonal = getTaskInstance(taskNodeObject.getCode());
f (exstTaskInstanceOptonal.sPresent()) {
taskInstances.add(exstTaskInstanceOptonal.get());
contnue;
}
TaskInstance task = createTaskInstance(processInstance, taskNodeObject);
taskInstances.add(task);
}
f (StrngUtls.sNotEmpty(parentNodeCode) && dag.getEndNode().contans(parentNodeCode)) {
TaskInstance endTaskInstance = taskInstanceMap.get(completeTaskMap.get(NumberUtls.toLong(parentNodeCode)));
Strng taskInstanceVarPool = endTaskInstance.getVarPool();
f (StrngUtls.sNotEmpty(taskInstanceVarPool)) {
Set<Property> taskPropertes = new HashSet<>(JSONUtls.toLst(taskInstanceVarPool, Property.class));
Strng processInstanceVarPool = processInstance.getVarPool();
f (StrngUtls.sNotEmpty(processInstanceVarPool)) {
Set<Property> propertes = new HashSet<>(JSONUtls.toLst(processInstanceVarPool, Property.class));
propertes.addAll(taskPropertes);
processInstance.setVarPool(JSONUtls.toJsonStrng(propertes));
} else {
processInstance.setVarPool(JSONUtls.toJsonStrng(taskPropertes));
}
}
}
for (TaskInstance task : taskInstances) {
f (readyToSubmtTaskQueue.contans(task)) {
logger.warn("Task s already at submt queue, taskInstanceId: {}", task.getId());
contnue;
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
f (task.getId() != null && completeTaskMap.contansKey(task.getTaskCode())) {
logger.nfo("task {} has already run success", task.getName());
contnue;
}
f (task.getState().sKll()) {
logger.nfo("task {} stopped, the state s {}", task.getName(), task.getState());
contnue;
}
addTaskToStandByLst(task);
}
submtStandByTask();
updateProcessInstanceState();
}
/**
* determne whether the dependences of the task node are complete
*
* @return DependResult
*/
prvate DependResult sTaskDepsComplete(Strng taskCode) {
Collecton<Strng> startNodes = dag.getBegnNode();
f (startNodes.contans(taskCode)) {
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskCode);
Lst<Strng> ndrectDepCodeLst = new ArrayLst<>();
setIndrectDepLst(taskCode, ndrectDepCodeLst);
for (Strng depsNode : ndrectDepCodeLst) {
f (dag.contansNode(depsNode) && !skpTaskNodeMap.contansKey(depsNode)) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
Long despNodeTaskCode = Long.parseLong(depsNode);
f (!completeTaskMap.contansKey(despNodeTaskCode)) {
return DependResult.WAITING;
}
Integer depsTaskId = completeTaskMap.get(despNodeTaskCode);
TaskExecutonStatus depTaskState = taskInstanceMap.get(depsTaskId).getState();
f (depTaskState.sKll()) {
return DependResult.NON_EXEC;
}
f (taskNode.sBlockngTask()) {
contnue;
}
f (taskNode.sCondtonsTask()) {
contnue;
}
f (!dependTaskSuccess(depsNode, taskCode)) {
return DependResult.FAILED;
}
}
}
logger.nfo("taskCode: {} completeDependTaskLst: {}", taskCode,
Arrays.toStrng(completeTaskMap.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* Ths functon s specally used to handle the dependency stuaton where the parent node s a prohbted node.
* When the parent node s a forbdden node, the dependency relatonshp should contnue to be traced
*
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
* @param taskCode taskCode
* @param ndrectDepCodeLst All ndrectly dependent nodes
*/
prvate vod setIndrectDepLst(Strng taskCode, Lst<Strng> ndrectDepCodeLst) {
TaskNode taskNode = dag.getNode(taskCode);
Lst<Strng> depCodeLst = taskNode.getDepLst();
for (Strng depsNode : depCodeLst) {
f (forbddenTaskMap.contansKey(Long.parseLong(depsNode))) {
setIndrectDepLst(depsNode, ndrectDepCodeLst);
} else {
ndrectDepCodeLst.add(depsNode);
}
}
}
/**
* depend node s completed, but here need check the condton task branch s the next node
*/
prvate boolean dependTaskSuccess(Strng dependNodeName, Strng nextNodeName) {
f (dag.getNode(dependNodeName).sCondtonsTask()) {
Lst<Strng> nextTaskLst =
DagHelper.parseCondtonTask(dependNodeName, skpTaskNodeMap, dag, getCompleteTaskInstanceMap());
f (!nextTaskLst.contans(nextNodeName)) {
return false;
}
} else {
long taskCode = Long.parseLong(dependNodeName);
Integer taskInstanceId = completeTaskMap.get(taskCode);
TaskExecutonStatus depTaskState = taskInstanceMap.get(taskInstanceId).getState();
f (depTaskState.sFalure()) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
return false;
}
}
return true;
}
/**
* query task nstance by complete state
*
* @param state state
* @return task nstance lst
*/
prvate Lst<TaskInstance> getCompleteTaskByState(TaskExecutonStatus state) {
Lst<TaskInstance> resultLst = new ArrayLst<>();
for (Integer taskInstanceId : completeTaskMap.values()) {
TaskInstance taskInstance = taskInstanceMap.get(taskInstanceId);
f (taskInstance != null && taskInstance.getState() == state) {
resultLst.add(taskInstance);
}
}
return resultLst;
}
/**
* where there are ongong tasks
*
* @param state state
* @return ExecutonStatus
*/
prvate WorkflowExecutonStatus runnngState(WorkflowExecutonStatus state) {
f (state == WorkflowExecutonStatus.READY_STOP || state == WorkflowExecutonStatus.READY_PAUSE
|| state == WorkflowExecutonStatus.READY_BLOCK ||
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
state == WorkflowExecutonStatus.DELAY_EXECUTION) {
return state;
} else {
return WorkflowExecutonStatus.RUNNING_EXECUTION;
}
}
/**
* exsts falure task,contans submt falure、dependency falure,execute falure(retry after)
*
* @return Boolean whether has faled task
*/
prvate boolean hasFaledTask() {
f (ths.taskFaledSubmt) {
return true;
}
f (ths.errorTaskMap.sze() > 0) {
return true;
}
return ths.dependFaledTaskSet.sze() > 0;
}
/**
* process nstance falure
*
* @return Boolean whether process nstance faled
*/
prvate boolean processFaled() {
f (hasFaledTask()) {
logger.nfo("The current process has faled task, the current process faled");
f (processInstance.getFalureStrategy() == FalureStrategy.END) {
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
return true;
}
f (processInstance.getFalureStrategy() == FalureStrategy.CONTINUE) {
return readyToSubmtTaskQueue.sze() == 0 && actveTaskProcessorMaps.sze() == 0
&& watToRetryTaskInstanceMap.sze() == 0;
}
}
return false;
}
/**
* prepare for pause
* 1,faled retry task n the preparaton queue , returns to falure drectly
* 2,exsts pause task,complement not completed, pendng submsson of tasks, return to suspenson
* 3,success
*
* @return ExecutonStatus
*/
prvate WorkflowExecutonStatus processReadyPause() {
f (hasRetryTaskInStandBy()) {
return WorkflowExecutonStatus.FAILURE;
}
Lst<TaskInstance> pauseLst = getCompleteTaskByState(TaskExecutonStatus.PAUSE);
f (CollectonUtls.sNotEmpty(pauseLst) || processInstance.sBlocked() || !sComplementEnd()
|| readyToSubmtTaskQueue.sze() > 0) {
return WorkflowExecutonStatus.PAUSE;
} else {
return WorkflowExecutonStatus.SUCCESS;
}
}
/**
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 11,838 |
[Bug] [Master] WorkflowExecuteRunnable will face a infinite loop
|
### Search before asking
- [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues.
### What happened
when we confige the `Timeout period` and `Failed retry interval` at the same time in Shell task. When shell task failed, it will submit a `TASK_STATE_CHANGE` TaskEvent, and then `TaskStateEventHandler` will call `WorkflowExecuteRunnable.taskFinished` and remove the _<taskInstanceCode,TaskProcessor>_ in `activeTaskProcessorMaps`

Then `StateWheelExecuteThread` will submit a `TASK_TIMEOUT` and a `TASK_RETRY` TaskEvent.
But `TaskTimeoutStateEventHandler` will query a TaskProcessor form `activeTaskProcessorMaps` to process the TIMEOUT, this will casue a NPE:

And `WorkflowExecuteRunnable` will not remove this TaskEvent if this taskEventHandler throw a Exception, so it keep loop, but it will no any update,just a infinite loop.

Besides, the `WorkflowExecuteThreadPool.executeEvent` will keep print the `The workflow has been executed by another thread` each 100ms, because event count > 0
So u can find the master log prints NPE and `The workflow has been executed by another thread` at the same time like crazy
This bug may cause #11796
But I think this needs to be discussed separately
### What you expected to happen
this process instance will stop caused by TIMEOUT, not keep a infinite loop, and show it's keep running.
### How to reproduce
just create a shell task like the following screenshot:

### Anything else
_No response_
### Version
dev
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
|
https://github.com/apache/dolphinscheduler/issues/11838
|
https://github.com/apache/dolphinscheduler/pull/11864
|
dec6197a6788cef027dd9f58a7a4958622534065
|
e938fdbe968ac88f184175f3ef4d0b2a7836c3ea
| 2022-09-07T13:57:28Z |
java
| 2022-09-15T01:46:30Z |
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteRunnable.java
|
* prepare for block
* f process has tasks stll runnng, pause them
* f readyToSubmtTaskQueue s not empty, kll them
* else return block status drectly
*
* @return ExecutonStatus
*/
prvate WorkflowExecutonStatus processReadyBlock() {
f (actveTaskProcessorMaps.sze() > 0) {
for (ITaskProcessor taskProcessor : actveTaskProcessorMaps.values()) {
f (!TASK_TYPE_BLOCKING.equals(taskProcessor.getType())) {
taskProcessor.acton(TaskActon.PAUSE);
}
}
}
f (readyToSubmtTaskQueue.sze() > 0) {
for (Iterator<TaskInstance> ter = readyToSubmtTaskQueue.terator(); ter.hasNext();) {
ter.next().setState(TaskExecutonStatus.PAUSE);
}
}
return WorkflowExecutonStatus.BLOCK;
}
/**
* generate the latest process nstance status by the tasks state
*
* @return process nstance executon status
*/
prvate WorkflowExecutonStatus getProcessInstanceState(ProcessInstance nstance) {
WorkflowExecutonStatus state = nstance.getState();
f (actveTaskProcessorMaps.sze() > 0 || hasRetryTaskInStandBy()) {
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.