status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,519
[Feature][JsonSplit-api]executors interface
from #5498 Change the request parameter processDefinitionId to processDefinitionCode,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5519
https://github.com/apache/dolphinscheduler/pull/5863
8f0c400ee094e9f93fd74e9d09f6258903f56d91
c5bc4fc48e67d3e8e1b40157403c8c4017ffae57
"2021-05-18T14:03:18Z"
java
"2021-07-20T09:22:10Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
public void testParallelWithSchedule() { Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(oneSchedulerList()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.COMPLEMENT_DATA, null, null, null, null, 0, RunMode.RUN_MODE_PARALLEL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); verify(processService, times(15)).createCommand(any(Command.class)); } @Test public void testNoMsterServers() { Mockito.when(monitorService.getServerListFromRegistry(true)).thenReturn(new ArrayList<>()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.COMPLEMENT_DATA, null, null, null, null, 0, RunMode.RUN_MODE_PARALLEL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(result.get(Constants.STATUS), Status.MASTER_NOT_EXISTS); } @Test public void testExecuteRepeatRunning() { Mockito.when(processService.verifyIsNeedCreateCommand(any(Command.class))).thenReturn(true); Map<String, Object> result = executorService.execute(loginUser, projectName, processInstanceId, ExecuteType.REPEAT_RUNNING); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } private List<Server> getMasterServersList() {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,519
[Feature][JsonSplit-api]executors interface
from #5498 Change the request parameter processDefinitionId to processDefinitionCode,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5519
https://github.com/apache/dolphinscheduler/pull/5863
8f0c400ee094e9f93fd74e9d09f6258903f56d91
c5bc4fc48e67d3e8e1b40157403c8c4017ffae57
"2021-05-18T14:03:18Z"
java
"2021-07-20T09:22:10Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
List<Server> masterServerList = new ArrayList<>(); Server masterServer1 = new Server(); masterServer1.setId(1); masterServer1.setHost("192.168.220.188"); masterServer1.setPort(1121); masterServerList.add(masterServer1); Server masterServer2 = new Server(); masterServer2.setId(2); masterServer2.setHost("192.168.220.189"); masterServer2.setPort(1122); masterServerList.add(masterServer2); return masterServerList; } private List zeroSchedulerList() { return Collections.EMPTY_LIST; } private List<Schedule> oneSchedulerList() { List<Schedule> schedulerList = new LinkedList<>(); Schedule schedule = new Schedule(); schedule.setCrontab("0 0 0 1/2 * ?"); schedulerList.add(schedule); return schedulerList; } private Map<String, Object> checkProjectAndAuth() { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,519
[Feature][JsonSplit-api]executors interface
from #5498 Change the request parameter processDefinitionId to processDefinitionCode,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5519
https://github.com/apache/dolphinscheduler/pull/5863
8f0c400ee094e9f93fd74e9d09f6258903f56d91
c5bc4fc48e67d3e8e1b40157403c8c4017ffae57
"2021-05-18T14:03:18Z"
java
"2021-07-20T09:22:10Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.api.controller.AbstractControllerTest; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.ExecutorServiceImpl; import org.apache.dolphinscheduler.common.Constants; import java.text.MessageFormat; import java.util.HashMap; import java.util.Map; import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,519
[Feature][JsonSplit-api]executors interface
from #5498 Change the request parameter processDefinitionId to processDefinitionCode,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5519
https://github.com/apache/dolphinscheduler/pull/5863
8f0c400ee094e9f93fd74e9d09f6258903f56d91
c5bc4fc48e67d3e8e1b40157403c8c4017ffae57
"2021-05-18T14:03:18Z"
java
"2021-07-20T09:22:10Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorServiceTest.java
import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; /** * executor service test */ public class ExecutorServiceTest extends AbstractControllerTest { private static final Logger logger = LoggerFactory.getLogger(ExecutorServiceTest.class); @Autowired private ExecutorServiceImpl executorService; @Ignore @Test public void startCheckByProcessDefinedId() { Map<String, Object> map = executorService.startCheckByProcessDefinedId(1234); Assert.assertNull(map); } @Test public void putMsgWithParamsTest() { Map<String, Object> map = new HashMap<>(); putMsgWithParams(map, Status.PROJECT_ALREADY_EXISTS); logger.info(map.toString()); } void putMsgWithParams(Map<String, Object> result, Status status, Object... statusParams) { result.put(Constants.STATUS, status); if (statusParams != null && statusParams.length > 0) { result.put(Constants.MSG, MessageFormat.format(status.getMsg(), statusParams)); } else { result.put(Constants.MSG, status.getMsg()); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
*/ package org.apache.dolphinscheduler.server.worker.task.datax; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.datax.DataxParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.DataxUtils; import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.worker.task.AbstractTask; import org.apache.dolphinscheduler.server.worker.task.CommandExecuteResult; import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor; import org.apache.commons.io.FileUtils; import java.io.File; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardOpenOption;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.slf4j.Logger; import com.alibaba.druid.sql.ast.SQLStatement; import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr; import com.alibaba.druid.sql.ast.statement.SQLSelect; import com.alibaba.druid.sql.ast.statement.SQLSelectItem; import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock; import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; import com.alibaba.druid.sql.ast.statement.SQLUnionQuery; import com.alibaba.druid.sql.parser.SQLStatementParser; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; /** * DataX task */ public class DataxTask extends AbstractTask {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
/** * jvm parameters */ public static final String JVM_PARAM = " --jvm=\"-Xms%sG -Xmx%sG\" "; /** * python process(datax only supports version 2.7 by default) */ private static final String DATAX_PYTHON = "python2.7"; private static final Pattern PYTHON_PATH_PATTERN = Pattern.compile("/bin/python[\\d.]*$"); /** * datax path */ private static final String DATAX_PATH = "${DATAX_HOME}/bin/datax.py"; /** * datax channel count */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
private static final int DATAX_CHANNEL_COUNT = 1; /** * datax parameters */ private DataxParameters dataXParameters; /** * shell command executor */ private ShellCommandExecutor shellCommandExecutor; /** * taskExecutionContext */ private TaskExecutionContext taskExecutionContext; /** * constructor * * @param taskExecutionContext taskExecutionContext * @param logger logger */ public DataxTask(TaskExecutionContext taskExecutionContext, Logger logger) { super(taskExecutionContext, logger); this.taskExecutionContext = taskExecutionContext; this.shellCommandExecutor = new ShellCommandExecutor(this::logHandle, taskExecutionContext, logger); } /** * init DataX config */ @Override public void init() {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
logger.info("datax task params {}", taskExecutionContext.getTaskParams()); dataXParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), DataxParameters.class); if (!dataXParameters.checkParameters()) { throw new RuntimeException("datax task params is not valid"); } } /** * run DataX process * * @throws Exception if error throws Exception */ @Override public void handle() throws Exception { try { String threadLoggerInfoName = String.format("TaskLogInfo-%s", taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext,getParameters()); String jsonFilePath = buildDataxJsonFile(paramsMap); String shellCommandFilePath = buildShellCommandFile(jsonFilePath, paramsMap); CommandExecuteResult commandExecuteResult = shellCommandExecutor.run(shellCommandFilePath); setExitStatusCode(commandExecuteResult.getExitStatusCode()); setAppIds(commandExecuteResult.getAppIds()); setProcessId(commandExecuteResult.getProcessId()); } catch (Exception e) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); throw e; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
} /** * cancel DataX process * * @param cancelApplication cancelApplication * @throws Exception if error throws Exception */ @Override public void cancelApplication(boolean cancelApplication) throws Exception { shellCommandExecutor.cancelApplication(); } /** * build datax configuration file * * @return datax json file name * @throws Exception if error throws Exception */ private String buildDataxJsonFile(Map<String, Property> paramsMap) throws Exception { String fileName = String.format("%s/%s_job.json", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId()); String json; Path path = new File(fileName).toPath(); if (Files.exists(path)) { return fileName; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
if (dataXParameters.getCustomConfig() == Flag.YES.ordinal()) { json = dataXParameters.getJson().replaceAll("\\r\\n", "\n"); } else { ObjectNode job = JSONUtils.createObjectNode(); job.putArray("content").addAll(buildDataxJobContentJson()); job.set("setting", buildDataxJobSettingJson()); ObjectNode root = JSONUtils.createObjectNode(); root.set("job", job); root.set("core", buildDataxCoreJson()); json = root.toString(); } json = ParameterUtils.convertParameterPlaceholders(json, ParamUtils.convert(paramsMap)); logger.debug("datax job json : {}", json); FileUtils.writeStringToFile(new File(fileName), json, StandardCharsets.UTF_8); return fileName; } /** * build datax job config * * @return collection of datax job config JSONObject * @throws SQLException if error throws SQLException */ private List<ObjectNode> buildDataxJobContentJson() { DataxTaskExecutionContext dataxTaskExecutionContext = taskExecutionContext.getDataxTaskExecutionContext(); BaseConnectionParam dataSourceCfg = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.of(dataxTaskExecutionContext.getSourcetype()), dataxTaskExecutionContext.getSourceConnectionParams()); BaseConnectionParam dataTargetCfg = (BaseConnectionParam) DatasourceUtil.buildConnectionParams(
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
DbType.of(dataxTaskExecutionContext.getTargetType()), dataxTaskExecutionContext.getTargetConnectionParams()); List<ObjectNode> readerConnArr = new ArrayList<>(); ObjectNode readerConn = JSONUtils.createObjectNode(); ArrayNode sqlArr = readerConn.putArray("querySql"); for (String sql : new String[]{dataXParameters.getSql()}) { sqlArr.add(sql); } ArrayNode urlArr = readerConn.putArray("jdbcUrl"); urlArr.add(DatasourceUtil.getJdbcUrl(DbType.valueOf(dataXParameters.getDtType()), dataSourceCfg)); readerConnArr.add(readerConn); ObjectNode readerParam = JSONUtils.createObjectNode(); readerParam.put("username", dataSourceCfg.getUser()); readerParam.put("password", CommonUtils.decodePassword(dataSourceCfg.getPassword())); readerParam.putArray("connection").addAll(readerConnArr); ObjectNode reader = JSONUtils.createObjectNode(); reader.put("name", DataxUtils.getReaderPluginName(DbType.of(dataxTaskExecutionContext.getSourcetype()))); reader.set("parameter", readerParam); List<ObjectNode> writerConnArr = new ArrayList<>(); ObjectNode writerConn = JSONUtils.createObjectNode(); ArrayNode tableArr = writerConn.putArray("table"); tableArr.add(dataXParameters.getTargetTable()); writerConn.put("jdbcUrl", DatasourceUtil.getJdbcUrl(DbType.valueOf(dataXParameters.getDsType()), dataTargetCfg)); writerConnArr.add(writerConn); ObjectNode writerParam = JSONUtils.createObjectNode(); writerParam.put("username", dataTargetCfg.getUser()); writerParam.put("password", CommonUtils.decodePassword(dataTargetCfg.getPassword())); String[] columns = parsingSqlColumnNames(DbType.of(dataxTaskExecutionContext.getSourcetype()), DbType.of(dataxTaskExecutionContext.getTargetType()), dataSourceCfg, dataXParameters.getSql());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
ArrayNode columnArr = writerParam.putArray("column"); for (String column : columns) { columnArr.add(column); } writerParam.putArray("connection").addAll(writerConnArr); if (CollectionUtils.isNotEmpty(dataXParameters.getPreStatements())) { ArrayNode preSqlArr = writerParam.putArray("preSql"); for (String preSql : dataXParameters.getPreStatements()) { preSqlArr.add(preSql); } } if (CollectionUtils.isNotEmpty(dataXParameters.getPostStatements())) { ArrayNode postSqlArr = writerParam.putArray("postSql"); for (String postSql : dataXParameters.getPostStatements()) { postSqlArr.add(postSql); } } ObjectNode writer = JSONUtils.createObjectNode(); writer.put("name", DataxUtils.getWriterPluginName(DbType.of(dataxTaskExecutionContext.getTargetType()))); writer.set("parameter", writerParam); List<ObjectNode> contentList = new ArrayList<>(); ObjectNode content = JSONUtils.createObjectNode(); content.set("reader", reader); content.set("writer", writer); contentList.add(content); return contentList; } /** * build datax setting config *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
* @return datax setting config JSONObject */ private ObjectNode buildDataxJobSettingJson() { ObjectNode speed = JSONUtils.createObjectNode(); speed.put("channel", DATAX_CHANNEL_COUNT); if (dataXParameters.getJobSpeedByte() > 0) { speed.put("byte", dataXParameters.getJobSpeedByte()); } if (dataXParameters.getJobSpeedRecord() > 0) { speed.put("record", dataXParameters.getJobSpeedRecord()); } ObjectNode errorLimit = JSONUtils.createObjectNode(); errorLimit.put("record", 0); errorLimit.put("percentage", 0); ObjectNode setting = JSONUtils.createObjectNode(); setting.set("speed", speed); setting.set("errorLimit", errorLimit); return setting; } private ObjectNode buildDataxCoreJson() { ObjectNode speed = JSONUtils.createObjectNode(); speed.put("channel", DATAX_CHANNEL_COUNT); if (dataXParameters.getJobSpeedByte() > 0) { speed.put("byte", dataXParameters.getJobSpeedByte()); } if (dataXParameters.getJobSpeedRecord() > 0) { speed.put("record", dataXParameters.getJobSpeedRecord()); } ObjectNode channel = JSONUtils.createObjectNode(); channel.set("speed", speed);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
ObjectNode transport = JSONUtils.createObjectNode(); transport.set("channel", channel); ObjectNode core = JSONUtils.createObjectNode(); core.set("transport", transport); return core; } /** * create command * * @return shell command file name * @throws Exception if error throws Exception */ private String buildShellCommandFile(String jobConfigFilePath, Map<String, Property> paramsMap) throws Exception { String fileName = String.format("%s/%s_node.%s", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId(), OSUtils.isWindows() ? "bat" : "sh"); Path path = new File(fileName).toPath(); if (Files.exists(path)) { return fileName; } StringBuilder sbr = new StringBuilder(); sbr.append(getPythonCommand()); sbr.append(" "); sbr.append(DATAX_PATH); sbr.append(" "); sbr.append(loadJvmEnv(dataXParameters));
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
sbr.append(jobConfigFilePath); String dataxCommand = ParameterUtils.convertParameterPlaceholders(sbr.toString(), ParamUtils.convert(paramsMap)); logger.debug("raw script : {}", dataxCommand); Set<PosixFilePermission> perms = PosixFilePermissions.fromString(Constants.RWXR_XR_X); FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms); if (OSUtils.isWindows()) { Files.createFile(path); } else { Files.createFile(path, attr); } Files.write(path, dataxCommand.getBytes(), StandardOpenOption.APPEND); return fileName; } public String getPythonCommand() { String pythonHome = System.getenv("PYTHON_HOME"); return getPythonCommand(pythonHome); } public String getPythonCommand(String pythonHome) { if (StringUtils.isEmpty(pythonHome)) { return DATAX_PYTHON; } String pythonBinPath = "/bin/" + DATAX_PYTHON; Matcher matcher = PYTHON_PATH_PATTERN.matcher(pythonHome); if (matcher.find()) { return matcher.replaceAll(pythonBinPath); } return Paths.get(pythonHome, pythonBinPath).toString(); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
public String loadJvmEnv(DataxParameters dataXParameters) { int xms = dataXParameters.getXms() < 1 ? 1 : dataXParameters.getXms(); int xmx = dataXParameters.getXmx() < 1 ? 1 : dataXParameters.getXmx(); return String.format(JVM_PARAM, xms, xmx); } /** * parsing synchronized column names in SQL statements * * @param dsType the database type of the data source * @param dtType the database type of the data target * @param dataSourceCfg the database connection parameters of the data source * @param sql sql for data synchronization * @return Keyword converted column names */ private String[] parsingSqlColumnNames(DbType dsType, DbType dtType, BaseConnectionParam dataSourceCfg, String sql) { String[] columnNames = tryGrammaticalAnalysisSqlColumnNames(dsType, sql); if (columnNames == null || columnNames.length == 0) { logger.info("try to execute sql analysis query column name"); columnNames = tryExecuteSqlResolveColumnNames(dataSourceCfg, sql); } notNull(columnNames, String.format("parsing sql columns failed : %s", sql)); return DataxUtils.convertKeywordsColumns(dtType, columnNames); } /** * try grammatical parsing column * * @param dbType database type * @param sql sql for data synchronization * @return column name array * @throws RuntimeException if error throws RuntimeException
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
*/ private String[] tryGrammaticalAnalysisSqlColumnNames(DbType dbType, String sql) { String[] columnNames; try { SQLStatementParser parser = DataxUtils.getSqlStatementParser(dbType, sql); if (parser == null) { logger.warn("database driver [{}] is not support grammatical analysis sql", dbType); return new String[0]; } SQLStatement sqlStatement = parser.parseStatement(); SQLSelectStatement sqlSelectStatement = (SQLSelectStatement) sqlStatement; SQLSelect sqlSelect = sqlSelectStatement.getSelect(); List<SQLSelectItem> selectItemList = null; if (sqlSelect.getQuery() instanceof SQLSelectQueryBlock) { SQLSelectQueryBlock block = (SQLSelectQueryBlock) sqlSelect.getQuery(); selectItemList = block.getSelectList(); } else if (sqlSelect.getQuery() instanceof SQLUnionQuery) { SQLUnionQuery unionQuery = (SQLUnionQuery) sqlSelect.getQuery(); SQLSelectQueryBlock block = (SQLSelectQueryBlock) unionQuery.getRight(); selectItemList = block.getSelectList(); } notNull(selectItemList, String.format("select query type [%s] is not support", sqlSelect.getQuery().toString())); columnNames = new String[selectItemList.size()]; for (int i = 0; i < selectItemList.size(); i++) { SQLSelectItem item = selectItemList.get(i); String columnName = null; if (item.getAlias() != null) { columnName = item.getAlias(); } else if (item.getExpr() != null) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
if (item.getExpr() instanceof SQLPropertyExpr) { SQLPropertyExpr expr = (SQLPropertyExpr) item.getExpr(); columnName = expr.getName(); } else if (item.getExpr() instanceof SQLIdentifierExpr) { SQLIdentifierExpr expr = (SQLIdentifierExpr) item.getExpr(); columnName = expr.getName(); } } else { throw new RuntimeException( String.format("grammatical analysis sql column [ %s ] failed", item.toString())); } if (columnName == null) { throw new RuntimeException( String.format("grammatical analysis sql column [ %s ] failed", item.toString())); } columnNames[i] = columnName; } } catch (Exception e) { logger.warn(e.getMessage(), e); return new String[0]; } return columnNames; } /** * try to execute sql to resolve column names * * @param baseDataSource the database connection parameters * @param sql sql for data synchronization * @return column name array */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,808
[Bug][Server] When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException
**Describe the bug** When we try to transfer data using datax between different types of data sources, the worker will exit with ClassCastException ![image](https://user-images.githubusercontent.com/52202080/125315605-e0f29e80-e369-11eb-8b9c-a94d8faa9eda.png) ```java [INFO] 2021-07-12 23:36:28.682 - [taskAppId=TASK-545173233664_3-3-3]:[464] - try to execute sql analysis query column name [ERROR] 2021-07-12 23:36:28.688 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[171] - task scheduler failure java.lang.RuntimeException: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:84) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.tryExecuteSqlResolveColumnNames(DataxTask.java:557) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.parsingSqlColumnNames(DataxTask.java:465) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJobContentJson(DataxTask.java:286) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.buildDataxJsonFile(DataxTask.java:215) at org.apache.dolphinscheduler.server.worker.task.datax.DataxTask.handle(DataxTask.java:166) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:159) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266) at java.util.concurrent.FutureTask.run(FutureTask.java) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassCastException: org.apache.dolphinscheduler.common.datasource.clickhouse.ClickhouseConnectionParam cannot be cast to org.apache.dolphinscheduler.common.datasource.mysql.MysqlConnectionParam at org.apache.dolphinscheduler.common.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:113) at org.apache.dolphinscheduler.common.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:82) ... 12 common frames omitted ``` **Which version of Dolphin Scheduler:** latest dev **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5808
https://github.com/apache/dolphinscheduler/pull/5809
4c0993cdf490eb0898fba42908270d10dff32001
bca92157a088c78d245ff60dfee2504ea8716c6a
"2021-07-12T15:48:53Z"
java
"2021-07-27T07:46:15Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
public String[] tryExecuteSqlResolveColumnNames(BaseConnectionParam baseDataSource, String sql) { String[] columnNames; sql = String.format("SELECT t.* FROM ( %s ) t WHERE 0 = 1", sql); sql = sql.replace(";", ""); try ( Connection connection = DatasourceUtil.getConnection(DbType.valueOf(dataXParameters.getDtType()), baseDataSource); PreparedStatement stmt = connection.prepareStatement(sql); ResultSet resultSet = stmt.executeQuery()) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); columnNames = new String[num]; for (int i = 1; i <= num; i++) { columnNames[i - 1] = md.getColumnName(i); } } catch (SQLException e) { logger.warn(e.getMessage(), e); return null; } return columnNames; } @Override public AbstractParameters getParameters() { return dataXParameters; } private void notNull(Object obj, String message) { if (obj == null) { throw new RuntimeException(message); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.CHECK_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.EXECUTE_PROCESS_INSTANCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.START_PROCESS_INSTANCE_ERROR; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.ExecuteType; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.ExecutorService;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java
import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.RunMode; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import springfox.documentation.annotations.ApiIgnore; /** * executor controller */ @Api(tags = "EXECUTOR_TAG")
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java
@RestController @RequestMapping("projects/{projectName}/executors") public class ExecutorController extends BaseController { @Autowired private ExecutorService execService; /** * execute process instance * * @param loginUser login user * @param projectName project name * @param processDefinitionId process definition id * @param scheduleTime schedule time * @param failureStrategy failure strategy * @param startNodeList start nodes list * @param taskDependType task depend type * @param execType execute type * @param warningType warning type * @param warningGroupId warning group id * @param runMode run mode * @param processInstancePriority process instance priority * @param workerGroup worker group * @param timeout timeout * @return start process result code */ @ApiOperation(value = "startProcessInstance", notes = "RUN_PROCESS_INSTANCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "scheduleTime", value = "SCHEDULE_TIME", required = true, dataType = "String"),
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java
@ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", required = true, dataType = "FailureStrategy"), @ApiImplicitParam(name = "startNodeList", value = "START_NODE_LIST", dataType = "String"), @ApiImplicitParam(name = "taskDependType", value = "TASK_DEPEND_TYPE", dataType = "TaskDependType"), @ApiImplicitParam(name = "execType", value = "COMMAND_TYPE", dataType = "CommandType"), @ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", required = true, dataType = "WarningType"), @ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "runMode", value = "RUN_MODE", dataType = "RunMode"), @ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", required = true, dataType = "Priority"), @ApiImplicitParam(name = "workerGroup", value = "WORKER_GROUP", dataType = "String", example = "default"), @ApiImplicitParam(name = "timeout", value = "TIMEOUT", dataType = "Int", example = "100"), }) @PostMapping(value = "start-process-instance") @ResponseStatus(HttpStatus.OK) @ApiException(START_PROCESS_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result startProcessInstance(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam(value = "processDefinitionId") int processDefinitionId, @RequestParam(value = "scheduleTime", required = false) String scheduleTime, @RequestParam(value = "failureStrategy", required = true) FailureStrategy failureStrategy, @RequestParam(value = "startNodeList", required = false) String startNodeList, @RequestParam(value = "taskDependType", required = false) TaskDependType taskDependType, @RequestParam(value = "execType", required = false) CommandType execType, @RequestParam(value = "warningType", required = true) WarningType warningType, @RequestParam(value = "warningGroupId", required = false) int warningGroupId, @RequestParam(value = "runMode", required = false) RunMode runMode, @RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority, @RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup, @RequestParam(value = "timeout", required = false) Integer timeout, @RequestParam(value = "startParams", required = false) String startParams) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java
if (timeout == null) { timeout = Constants.MAX_TASK_TIMEOUT; } Map<String, String> startParamMap = null; if (startParams != null) { startParamMap = JSONUtils.toMap(startParams); } Map<String, Object> result = execService.execProcessInstance(loginUser, projectName, processDefinitionId, scheduleTime, execType, failureStrategy, startNodeList, taskDependType, warningType, warningGroupId, runMode, processInstancePriority, workerGroup, timeout, startParamMap); return returnDataList(result); } /** * do action to process instance:pause, stop, repeat, recover from pause, recover from stop * * @param loginUser login user * @param projectName project name * @param processInstanceId process instance id * @param executeType execute type * @return execute result code */ @ApiOperation(value = "execute", notes = "EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "processInstanceId", value = "PROCESS_INSTANCE_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "executeType", value = "EXECUTE_TYPE", required = true, dataType = "ExecuteType") }) @PostMapping(value = "/execute") @ResponseStatus(HttpStatus.OK) @ApiException(EXECUTE_PROCESS_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser")
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java
public Result execute(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam("processInstanceId") Integer processInstanceId, @RequestParam("executeType") ExecuteType executeType ) { Map<String, Object> result = execService.execute(loginUser, projectName, processInstanceId, executeType); return returnDataList(result); } /** * check process definition and all of the son process definitions is on line. * * @param loginUser login user * @param processDefinitionId process definition id * @return check result code */ @ApiOperation(value = "startCheckProcessDefinition", notes = "START_CHECK_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100") }) @PostMapping(value = "/start-check") @ResponseStatus(HttpStatus.OK) @ApiException(CHECK_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result startCheckProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "processDefinitionId") int processDefinitionId) { Map<String, Object> result = execService.startCheckByProcessDefinedId(processDefinitionId); return returnDataList(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.api.enums.ExecuteType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.RunMode; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; /** * executor service */ public interface ExecutorService { /** * execute process instance * * @param loginUser login user * @param projectName project name * @param processDefinitionId process Definition Id * @param cronTime cron time * @param commandType command type * @param failureStrategy failuer strategy * @param startNodeList start nodelist * @param taskDependType node dependency type * @param warningType warning type * @param warningGroupId notify group id * @param processInstancePriority process instance priority * @param workerGroup worker group name * @param runMode run mode * @param timeout timeout * @param startParams the global param values which pass to new process instance * @return execute process instance code */ Map<String, Object> execProcessInstance(User loginUser, String projectName, int processDefinitionId, String cronTime, CommandType commandType, FailureStrategy failureStrategy, String startNodeList, TaskDependType taskDependType, WarningType warningType, int warningGroupId,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
RunMode runMode, Priority processInstancePriority, String workerGroup, Integer timeout, Map<String, String> startParams); /** * check whether the process definition can be executed * * @param processDefinition process definition * @param processDefineCode process definition code * @return check result code */ Map<String, Object> checkProcessDefinitionValid(ProcessDefinition processDefinition, long processDefineCode); /** * do action to process instance:pause, stop, repeat, recover from pause, recover from stop * * @param loginUser login user * @param projectName project name * @param processInstanceId process instance id * @param executeType execute type * @return execute result code */ Map<String, Object> execute(User loginUser, String projectName, Integer processInstanceId, ExecuteType executeType); /** * check if sub processes are offline before starting process definition * * @param processDefineId process definition id * @return check result code */ Map<String, Object> startCheckByProcessDefinedId(int processDefineId); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_PARAMS; import static org.apache.dolphinscheduler.common.Constants.MAX_TASK_TIMEOUT; import org.apache.dolphinscheduler.api.enums.ExecuteType; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ExecutorService; import org.apache.dolphinscheduler.api.service.MonitorService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.RunMode; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.utils.CollectionUtils;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.commons.collections.MapUtils; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * executor service impl */ @Service
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
public class ExecutorServiceImpl extends BaseServiceImpl implements ExecutorService { private static final Logger logger = LoggerFactory.getLogger(ExecutorServiceImpl.class); @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private MonitorService monitorService; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private ProcessService processService; /** * execute process instance * * @param loginUser login user * @param projectName project name * @param processDefinitionId process Definition Id * @param cronTime cron time * @param commandType command type * @param failureStrategy failure strategy * @param startNodeList start nodelist * @param taskDependType node dependency type * @param warningType warning type
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* @param warningGroupId notify group id * @param processInstancePriority process instance priority * @param workerGroup worker group name * @param runMode run mode * @param timeout timeout * @param startParams the global param values which pass to new process instance * @return execute process instance code */ @Override public Map<String, Object> execProcessInstance(User loginUser, String projectName, int processDefinitionId, String cronTime, CommandType commandType, FailureStrategy failureStrategy, String startNodeList, TaskDependType taskDependType, WarningType warningType, int warningGroupId, RunMode runMode, Priority processInstancePriority, String workerGroup, Integer timeout, Map<String, String> startParams) { Map<String, Object> result = new HashMap<>(); if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) { putMsg(result, Status.TASK_TIMEOUT_PARAMS_ERROR); return result; } Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResultAndAuth = checkResultAndAuth(loginUser, projectName, project); if (checkResultAndAuth != null) { return checkResultAndAuth; } ProcessDefinition processDefinition = processDefinitionMapper.selectById(processDefinitionId); result = checkProcessDefinitionValid(processDefinition, processDefinitionId);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (!checkTenantSuitable(processDefinition)) { logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ", processDefinition.getId(), processDefinition.getName()); putMsg(result, Status.TENANT_NOT_SUITABLE); return result; } if (!checkMasterExists(result)) { return result; } /** * create command */ int create = this.createCommand(commandType, processDefinitionId, taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(), warningGroupId, runMode, processInstancePriority, workerGroup, startParams); if (create > 0) { processDefinition.setWarningGroupId(warningGroupId); processDefinitionMapper.updateById(processDefinition); putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.START_PROCESS_INSTANCE_ERROR); } return result; } /** * check whether master exists
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* * @param result result * @return master exists return true , otherwise return false */ private boolean checkMasterExists(Map<String, Object> result) { List<Server> masterServers = monitorService.getServerListFromRegistry(true); if (masterServers.isEmpty()) { putMsg(result, Status.MASTER_NOT_EXISTS); return false; } return true; } /** * check whether the process definition can be executed * * @param processDefinition process definition * @param processDefineCode process definition code * @return check result code */ @Override public Map<String, Object> checkProcessDefinitionValid(ProcessDefinition processDefinition, long processDefineCode) { Map<String, Object> result = new HashMap<>(); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineCode); } else if (processDefinition.getReleaseState() != ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefineCode);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
} else { result.put(Constants.STATUS, Status.SUCCESS); } return result; } /** * do action to process instance:pause, stop, repeat, recover from pause, recover from stop * * @param loginUser login user * @param projectName project name * @param processInstanceId process instance id * @param executeType execute type * @return execute result code */ @Override public Map<String, Object> execute(User loginUser, String projectName, Integer processInstanceId, ExecuteType executeType) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = checkResultAndAuth(loginUser, projectName, project); if (checkResult != null) { return checkResult; } if (!checkMasterExists(result)) { return result; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
} ProcessDefinition processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE) { result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionCode()); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } } checkResult = checkExecuteType(processInstance, executeType); Status status = (Status) checkResult.get(Constants.STATUS); if (status != Status.SUCCESS) { return checkResult; } if (!checkTenantSuitable(processDefinition)) { logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ", processDefinition.getId(), processDefinition.getName()); putMsg(result, Status.TENANT_NOT_SUITABLE); } // Map<String, Object> commandMap = JSONUtils.toMap(processInstance.getCommandParam(), String.class, Object.class); String startParams = null; if (MapUtils.isNotEmpty(commandMap) && executeType == ExecuteType.REPEAT_RUNNING) { Object startParamsJson = commandMap.get(Constants.CMD_PARAM_START_PARAMS); if (startParamsJson != null) { startParams = startParamsJson.toString(); } } switch (executeType) { case REPEAT_RUNNING:
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.REPEAT_RUNNING, startParams); break; case RECOVER_SUSPENDED_PROCESS: result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.RECOVER_SUSPENDED_PROCESS, startParams); break; case START_FAILURE_TASK_PROCESS: result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.START_FAILURE_TASK_PROCESS, startParams); break; case STOP: if (processInstance.getState() == ExecutionStatus.READY_STOP) { putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState()); } else { result = updateProcessInstancePrepare(processInstance, CommandType.STOP, ExecutionStatus.READY_STOP); } break; case PAUSE: if (processInstance.getState() == ExecutionStatus.READY_PAUSE) { putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState()); } else { result = updateProcessInstancePrepare(processInstance, CommandType.PAUSE, ExecutionStatus.READY_PAUSE); } break; default: logger.error("unknown execute type : {}", executeType); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type"); break; } return result; } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* check tenant suitable * * @param processDefinition process definition * @return true if tenant suitable, otherwise return false */ private boolean checkTenantSuitable(ProcessDefinition processDefinition) { Tenant tenant = processService.getTenantForProcess(processDefinition.getTenantId(), processDefinition.getUserId()); return tenant != null; } /** * Check the state of process instance and the type of operation match * * @param processInstance process instance * @param executeType execute type * @return check result code */ private Map<String, Object> checkExecuteType(ProcessInstance processInstance, ExecuteType executeType) { Map<String, Object> result = new HashMap<>(); ExecutionStatus executionStatus = processInstance.getState(); boolean checkResult = false; switch (executeType) { case PAUSE: case STOP: if (executionStatus.typeIsRunning()) { checkResult = true; } break; case REPEAT_RUNNING: if (executionStatus.typeIsFinished()) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
checkResult = true; } break; case START_FAILURE_TASK_PROCESS: if (executionStatus.typeIsFailure()) { checkResult = true; } break; case RECOVER_SUSPENDED_PROCESS: if (executionStatus.typeIsPause() || executionStatus.typeIsCancel()) { checkResult = true; } break; default: break; } if (!checkResult) { putMsg(result, Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), executionStatus.toString(), executeType.toString()); } else { putMsg(result, Status.SUCCESS); } return result; } /** * prepare to update process instance command type and status * * @param processInstance process instance * @param commandType command type * @param executionStatus execute status * @return update result
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
*/ private Map<String, Object> updateProcessInstancePrepare(ProcessInstance processInstance, CommandType commandType, ExecutionStatus executionStatus) { Map<String, Object> result = new HashMap<>(); processInstance.setCommandType(commandType); processInstance.addHistoryCmd(commandType); processInstance.setState(executionStatus); int update = processService.updateProcessInstance(processInstance); // if (update > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR); } return result; } /** * insert command, used in the implementation of the page, re run, recovery (pause / failure) execution * * @param loginUser login user * @param instanceId instance id * @param processDefinitionId process definition id * @param commandType command type * @return insert result code */ private Map<String, Object> insertCommand(User loginUser, Integer instanceId, Integer processDefinitionId, CommandType commandType, String startParams) { Map<String, Object> result = new HashMap<>(); // Map<String, Object> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, instanceId); if (StringUtils.isNotEmpty(startParams)) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
cmdParam.put(CMD_PARAM_START_PARAMS, startParams); } Command command = new Command(); command.setCommandType(commandType); command.setProcessDefinitionId(processDefinitionId); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); command.setExecutorId(loginUser.getId()); if (!processService.verifyIsNeedCreateCommand(command)) { putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND, processDefinitionId); return result; } int create = processService.createCommand(command); if (create > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR); } return result; } /** * check if sub processes are offline before starting process definition * * @param processDefineId process definition id * @return check result code */ @Override public Map<String, Object> startCheckByProcessDefinedId(int processDefineId) { Map<String, Object> result = new HashMap<>(); if (processDefineId == 0) { logger.error("process definition id is null");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "process definition id"); } List<Integer> ids = new ArrayList<>(); processService.recurseFindSubProcessId(processDefineId, ids); Integer[] idArray = ids.toArray(new Integer[ids.size()]); if (!ids.isEmpty()) { List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(idArray); if (processDefinitionList != null) { for (ProcessDefinition processDefinition : processDefinitionList) { /** * if there is no online process, exit directly */ if (processDefinition.getReleaseState() != ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName()); logger.info("not release process definition id: {} , name : {}", processDefinition.getId(), processDefinition.getName()); return result; } } } } putMsg(result, Status.SUCCESS); return result; } /** * create command * * @param commandType commandType * @param processDefineId processDefineId * @param nodeDep nodeDep
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
* @param failureStrategy failureStrategy * @param startNodeList startNodeList * @param schedule schedule * @param warningType warningType * @param executorId executorId * @param warningGroupId warningGroupId * @param runMode runMode * @param processInstancePriority processInstancePriority * @param workerGroup workerGroup * @return command id */ private int createCommand(CommandType commandType, int processDefineId, TaskDependType nodeDep, FailureStrategy failureStrategy, String startNodeList, String schedule, WarningType warningType, int executorId, int warningGroupId, RunMode runMode, Priority processInstancePriority, String workerGroup, Map<String, String> startParams) { /** * instantiate command schedule instance */ Command command = new Command(); Map<String, String> cmdParam = new HashMap<>(); if (commandType == null) { command.setCommandType(CommandType.START_PROCESS); } else { command.setCommandType(commandType); } command.setProcessDefinitionId(processDefineId); if (nodeDep != null) { command.setTaskDependType(nodeDep);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
} if (failureStrategy != null) { command.setFailureStrategy(failureStrategy); } if (StringUtils.isNotEmpty(startNodeList)) { cmdParam.put(CMD_PARAM_START_NODE_NAMES, startNodeList); } if (warningType != null) { command.setWarningType(warningType); } if (startParams != null && startParams.size() > 0) { cmdParam.put(CMD_PARAM_START_PARAMS, JSONUtils.toJsonString(startParams)); } command.setCommandParam(JSONUtils.toJsonString(cmdParam)); command.setExecutorId(executorId); command.setWarningGroupId(warningGroupId); command.setProcessInstancePriority(processInstancePriority); command.setWorkerGroup(workerGroup); Date start = null; Date end = null; if (StringUtils.isNotEmpty(schedule)) { String[] interval = schedule.split(","); if (interval.length == 2) { start = DateUtils.getScheduleDate(interval[0]); end = DateUtils.getScheduleDate(interval[1]); } } // if (commandType == CommandType.COMPLEMENT_DATA) { runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
if (null != start && null != end && !start.after(end)) { if (runMode == RunMode.RUN_MODE_SERIAL) { cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start)); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end)); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); return processService.createCommand(command); } else if (runMode == RunMode.RUN_MODE_PARALLEL) { List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefineId); List<Date> listDate = new LinkedList<>(); if (!CollectionUtils.isEmpty(schedules)) { for (Schedule item : schedules) { listDate.addAll(CronUtils.getSelfFireDateList(start, end, item.getCrontab())); } } if (!CollectionUtils.isEmpty(listDate)) { // for (Date date : listDate) { cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(date)); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(date)); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); processService.createCommand(command); } return listDate.size(); } else { // int runCunt = 0; while (!start.after(end)) { runCunt += 1; cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start)); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(start));
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
command.setCommandParam(JSONUtils.toJsonString(cmdParam)); processService.createCommand(command); start = DateUtils.getSomeDay(start, 1); } return runCunt; } } } else { logger.error("there is not valid schedule date for the process definition: id:{}", processDefineId); } } else { command.setCommandParam(JSONUtils.toJsonString(cmdParam)); return processService.createCommand(command); } return 0; } /** * check result and auth */ private Map<String, Object> checkResultAndAuth(User loginUser, String projectName, Project project) { // Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status status = (Status) checkResult.get(Constants.STATUS); if (status != Status.SUCCESS) { return checkResult; } return null; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import org.apache.dolphinscheduler.api.enums.ExecuteType; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.ExecutorServiceImpl; import org.apache.dolphinscheduler.api.service.impl.ProjectServiceImpl; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
import org.apache.dolphinscheduler.common.enums.RunMode; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; /** * executor service 2 test */ @RunWith(MockitoJUnitRunner.Silent.class)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
public class ExecutorService2Test { @InjectMocks private ExecutorServiceImpl executorService; @Mock private ProcessService processService; @Mock private ProcessDefinitionMapper processDefinitionMapper; @Mock private ProjectMapper projectMapper; @Mock private ProjectServiceImpl projectService; @Mock private MonitorService monitorService; private int processDefinitionId = 1;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
private int processInstanceId = 1; private int tenantId = 1; private int userId = 1; private ProcessDefinition processDefinition = new ProcessDefinition(); private ProcessInstance processInstance = new ProcessInstance(); private User loginUser = new User(); private String projectName = "projectName"; private Project project = new Project(); private String cronTime; @Before public void init() { loginUser.setId(userId); processDefinition.setId(processDefinitionId); processDefinition.setReleaseState(ReleaseState.ONLINE); processDefinition.setTenantId(tenantId); processDefinition.setUserId(userId); processDefinition.setVersion(1); processDefinition.setCode(1L); processInstance.setId(processInstanceId); processInstance.setState(ExecutionStatus.FAILURE); processInstance.setExecutorId(userId); processInstance.setTenantId(tenantId); processInstance.setProcessDefinitionVersion(1); processInstance.setProcessDefinitionCode(1L); project.setName(projectName);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
cronTime = "2020-01-01 00:00:00,2020-01-31 23:00:00"; Mockito.when(projectMapper.queryByName(projectName)).thenReturn(project); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(checkProjectAndAuth()); Mockito.when(processDefinitionMapper.selectById(processDefinitionId)).thenReturn(processDefinition); Mockito.when(processService.getTenantForProcess(tenantId, userId)).thenReturn(new Tenant()); Mockito.when(processService.createCommand(any(Command.class))).thenReturn(1); Mockito.when(monitorService.getServerListFromRegistry(true)).thenReturn(getMasterServersList()); Mockito.when(processService.findProcessInstanceDetailById(processInstanceId)).thenReturn(processInstance); Mockito.when(processService.findProcessDefinition(1L, 1)).thenReturn(processDefinition); } /** * not complement */ @Test public void testNoComplement() { Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(zeroSchedulerList()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.START_PROCESS, null, null, null, null, 0, RunMode.RUN_MODE_SERIAL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); verify(processService, times(1)).createCommand(any(Command.class)); } /** * not complement */ @Test
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
public void testComplementWithStartNodeList() { Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(zeroSchedulerList()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.START_PROCESS, null, "n1,n2", null, null, 0, RunMode.RUN_MODE_SERIAL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); verify(processService, times(1)).createCommand(any(Command.class)); } /** * date error */ @Test public void testDateError() { Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(zeroSchedulerList()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, "2020-01-31 23:00:00,2020-01-01 00:00:00", CommandType.COMPLEMENT_DATA, null, null, null, null, 0, RunMode.RUN_MODE_SERIAL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(Status.START_PROCESS_INSTANCE_ERROR, result.get(Constants.STATUS)); verify(processService, times(0)).createCommand(any(Command.class)); } /** * serial */ @Test
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
public void testSerial() { Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(zeroSchedulerList()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.COMPLEMENT_DATA, null, null, null, null, 0, RunMode.RUN_MODE_SERIAL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); verify(processService, times(1)).createCommand(any(Command.class)); } /** * without schedule */ @Test public void testParallelWithOutSchedule() { Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(zeroSchedulerList()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.COMPLEMENT_DATA, null, null, null, null, 0, RunMode.RUN_MODE_PARALLEL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); verify(processService, times(31)).createCommand(any(Command.class)); } /** * with schedule */ @Test
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
public void testParallelWithSchedule() { Mockito.when(processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId)).thenReturn(oneSchedulerList()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.COMPLEMENT_DATA, null, null, null, null, 0, RunMode.RUN_MODE_PARALLEL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); verify(processService, times(15)).createCommand(any(Command.class)); } @Test public void testNoMsterServers() { Mockito.when(monitorService.getServerListFromRegistry(true)).thenReturn(new ArrayList<>()); Map<String, Object> result = executorService.execProcessInstance(loginUser, projectName, processDefinitionId, cronTime, CommandType.COMPLEMENT_DATA, null, null, null, null, 0, RunMode.RUN_MODE_PARALLEL, Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110, null); Assert.assertEquals(result.get(Constants.STATUS), Status.MASTER_NOT_EXISTS); } @Test public void testExecuteRepeatRunning() { Mockito.when(processService.verifyIsNeedCreateCommand(any(Command.class))).thenReturn(true); Map<String, Object> result = executorService.execute(loginUser, projectName, processInstanceId, ExecuteType.REPEAT_RUNNING); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } private List<Server> getMasterServersList() {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
List<Server> masterServerList = new ArrayList<>(); Server masterServer1 = new Server(); masterServer1.setId(1); masterServer1.setHost("192.168.220.188"); masterServer1.setPort(1121); masterServerList.add(masterServer1); Server masterServer2 = new Server(); masterServer2.setId(2); masterServer2.setHost("192.168.220.189"); masterServer2.setPort(1122); masterServerList.add(masterServer2); return masterServerList; } private List zeroSchedulerList() { return Collections.EMPTY_LIST; } private List<Schedule> oneSchedulerList() { List<Schedule> schedulerList = new LinkedList<>(); Schedule schedule = new Schedule(); schedule.setCrontab("0 0 0 1/2 * ?"); schedulerList.add(schedule); return schedulerList; } private Map<String, Object> checkProjectAndAuth() { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
* contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task.sql; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.sql.SqlBinds; import org.apache.dolphinscheduler.common.task.sql.SqlParameters; import org.apache.dolphinscheduler.common.task.sql.SqlType; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.remote.command.alert.AlertSendResponseCommand; import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.utils.UDFUtils; import org.apache.dolphinscheduler.server.worker.task.AbstractTask; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; /** * sql task */ public class SqlTask extends AbstractTask {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
/** * sql parameters */ private SqlParameters sqlParameters; /** * alert dao */ private AlertDao alertDao; /** * base datasource */ private BaseConnectionParam baseConnectionParam; /** * taskExecutionContext */ private TaskExecutionContext taskExecutionContext; private AlertClientService alertClientService; public SqlTask(TaskExecutionContext taskExecutionContext, Logger logger, AlertClientService alertClientService) { super(taskExecutionContext, logger); this.taskExecutionContext = taskExecutionContext; logger.info("sql task params {}", taskExecutionContext.getTaskParams()); this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class); if (!sqlParameters.checkParameters()) { throw new RuntimeException("sql task params is not valid"); } this.alertClientService = alertClientService; this.alertDao = SpringApplicationContext.getBean(AlertDao.class); } @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
public void handle() throws Exception { String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); logger.info("Full sql parameters: {}", sqlParameters); logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {},varPool : {} ,query max result limit {}", sqlParameters.getType(), sqlParameters.getDatasource(), sqlParameters.getSql(), sqlParameters.getLocalParams(), sqlParameters.getUdfs(), sqlParameters.getShowType(), sqlParameters.getConnParams(), sqlParameters.getVarPool(), sqlParameters.getLimit()); try { SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext(); baseConnectionParam = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.valueOf(sqlParameters.getType()), sqlTaskExecutionContext.getConnectionParams()); SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql()); List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements()) .orElse(new ArrayList<>())
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
.stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<String> createFuncs = UDFUtils.createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(), logger); executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs); setExitStatusCode(Constants.EXIT_CODE_SUCCESS); } catch (Exception e) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); logger.error("sql task error: {}", e.toString()); throw e; } } /** * ready to execute SQL and parameter entity Map * * @return SqlBinds */ private SqlBinds getSqlAndSqlParamsMap(String sql) { Map<Integer, Property> sqlParamsMap = new HashMap<>(); StringBuilder sqlBuilder = new StringBuilder(); Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext,getParameters()); if (paramsMap == null) { sqlBuilder.append(sql); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } if (StringUtils.isNotEmpty(sqlParameters.getTitle())) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(), ParamUtils.convert(paramsMap)); logger.info("SQL title : {}", title); sqlParameters.setTitle(title); } sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime()); String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*"; setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap); String rgexo = "['\"]*\\!\\{(.*?)\\}['\"]*"; sql = replaceOriginalValue(sql, rgexo, paramsMap); // r String formatSql = sql.replaceAll(rgex, "?"); sqlBuilder.append(formatSql); // p printReplacedSql(sql, formatSql, rgex, sqlParamsMap); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } public String replaceOriginalValue(String content, String rgex, Map<String, Property> sqlParamsMap) { Pattern pattern = Pattern.compile(rgex); while (true) { Matcher m = pattern.matcher(content); if (!m.find()) { break; } String paramName = m.group(1); String paramValue = sqlParamsMap.get(paramName).getValue();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
content = m.replaceFirst(paramValue); } return content; } @Override public AbstractParameters getParameters() { return this.sqlParameters; } /** * execute function and sql * * @param mainSqlBinds main sql binds * @param preStatementsBinds pre statements binds * @param postStatementsBinds post statements binds * @param createFuncs create functions */ public void executeFuncAndSql(SqlBinds mainSqlBinds, List<SqlBinds> preStatementsBinds, List<SqlBinds> postStatementsBinds, List<String> createFuncs) throws Exception { Connection connection = null; PreparedStatement stmt = null; ResultSet resultSet = null; try { // c connection = DatasourceUtil.getConnection(DbType.valueOf(sqlParameters.getType()), baseConnectionParam); // c if (CollectionUtils.isNotEmpty(createFuncs)) { createTempFunction(connection, createFuncs); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
// p preSql(connection, preStatementsBinds); stmt = prepareStatementAndBind(connection, mainSqlBinds); String result = null; // d if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) { // q resultSet = stmt.executeQuery(); result = resultProcess(resultSet); } else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) { // n String updateResult = String.valueOf(stmt.executeUpdate()); result = setNonQuerySqlReturn(updateResult, sqlParameters.getLocalParams()); } //de sqlParameters.dealOutParam(result); postSql(connection, postStatementsBinds); } catch (Exception e) { logger.error("execute sql error: {}", e.getMessage()); throw e; } finally { close(resultSet, stmt, connection); } } public String setNonQuerySqlReturn(String updateResult, List<Property> properties) { String result = null; for (Property info :properties) { if (Direct.OUT == info.getDirect()) { List<Map<String,String>> updateRL = new ArrayList<>(); Map<String,String> updateRM = new HashMap<>();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
updateRM.put(info.getProp(),updateResult); updateRL.add(updateRM); result = JSONUtils.toJsonString(updateRL); break; } } return result; } /** * result process * * @param resultSet resultSet * @throws Exception Exception */ private String resultProcess(ResultSet resultSet) throws Exception { ArrayNode resultJSONArray = JSONUtils.createArrayNode(); if (resultSet != null) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); int rowCount = 0; while (rowCount < sqlParameters.getLimit() && resultSet.next()) { ObjectNode mapOfColValues = JSONUtils.createObjectNode(); for (int i = 1; i <= num; i++) { mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i))); } resultJSONArray.add(mapOfColValues); rowCount++; } int displayRows = sqlParameters.getDisplayRows() > 0 ? sqlParameters.getDisplayRows() : Constants.DEFAULT_DISPLAY_ROWS; displayRows = Math.min(displayRows, resultJSONArray.size());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
logger.info("display sql result {} rows as follows:", displayRows); for (int i = 0; i < displayRows; i++) { String row = JSONUtils.toJsonString(resultJSONArray.get(i)); logger.info("row {} : {}", i + 1, row); } } String result = JSONUtils.toJsonString(resultJSONArray); if (sqlParameters.getSendEmail() == null || sqlParameters.getSendEmail()) { sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets", result); } logger.debug("execute sql result : {}", result); return result; } /** * p * * @param connection connection * @param preStatementsBinds preStatementsBinds */ private void preSql(Connection connection, List<SqlBinds> preStatementsBinds) throws Exception { for (SqlBinds sqlBind : preStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("pre statement execute result: {}, for sql: {}", result, sqlBind.getSql()); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
/** * post sql * * @param connection connection * @param postStatementsBinds postStatementsBinds */ private void postSql(Connection connection, List<SqlBinds> postStatementsBinds) throws Exception { for (SqlBinds sqlBind : postStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("post statement execute result: {},for sql: {}", result, sqlBind.getSql()); } } } /** * c * * @param connection connection * @param createFuncs createFuncs */ private void createTempFunction(Connection connection, List<String> createFuncs) throws Exception { try (Statement funcStmt = connection.createStatement()) { for (String createFunc : createFuncs) { logger.info("hive create function sql: {}", createFunc); funcStmt.execute(createFunc); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
/** * close jdbc resource * * @param resultSet resultSet * @param pstmt pstmt * @param connection connection */ private void close(ResultSet resultSet, PreparedStatement pstmt, Connection connection) { if (resultSet != null) { try { resultSet.close(); } catch (SQLException e) { logger.error("close result set error : {}", e.getMessage(), e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { logger.error("close prepared statement error : {}", e.getMessage(), e); } } if (connection != null) { try { connection.close(); } catch (SQLException e) { logger.error("close connection error : {}", e.getMessage(), e); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
} } /** * preparedStatement bind * * @param connection connection * @param sqlBinds sqlBinds * @return PreparedStatement * @throws Exception Exception */ private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) throws Exception { // i boolean timeoutFlag = taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.FAILED || taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.WARNFAILED; PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql()); if (timeoutFlag) { stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout()); } Map<Integer, Property> params = sqlBinds.getParamsMap(); if (params != null) { for (Map.Entry<Integer, Property> entry : params.entrySet()) { Property prop = entry.getValue(); ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue()); } } logger.info("prepare statement replace sql : {} ", stmt); return stmt; } /** * send mail as an attachment
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
* * @param title title * @param content content */ public void sendAttachment(int groupId, String title, String content) { AlertSendResponseCommand alertSendResponseCommand = alertClientService.sendAlert(groupId, title, content); if (!alertSendResponseCommand.getResStatus()) { throw new RuntimeException("send mail failed!"); } } /** * regular expressions match the contents between two specified strings * * @param content content * @param rgex rgex * @param sqlParamsMap sql params map * @param paramsPropsMap params props map */ public void setSqlParamsMap(String content, String rgex, Map<Integer, Property> sqlParamsMap, Map<String, Property> paramsPropsMap) { Pattern pattern = Pattern.compile(rgex); Matcher m = pattern.matcher(content); int index = 1; while (m.find()) { String paramName = m.group(1); Property prop = paramsPropsMap.get(paramName); if (prop == null) { logger.error("setSqlParamsMap: No Property with paramName: {} is found in paramsPropsMap of task instance" + " with id: {}. So couldn't put Property in sqlParamsMap.", paramName, taskExecutionContext.getTaskInstanceId()); } else {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
sqlParamsMap.put(index, prop); index++; logger.info("setSqlParamsMap: Property with paramName: {} put in sqlParamsMap of content {} successfully.", paramName, content); } } } /** * print replace sql * * @param content content * @param formatSql format sql * @param rgex rgex * @param sqlParamsMap sql params map */ public void printReplacedSql(String content, String formatSql, String rgex, Map<Integer, Property> sqlParamsMap) { //pa logger.info("after replace sql , preparing : {}", formatSql); StringBuilder logPrint = new StringBuilder("replaced sql , parameters:"); if (sqlParamsMap == null) { logger.info("printReplacedSql: sqlParamsMap is null."); } else { for (int i = 1; i <= sqlParamsMap.size(); i++) { logPrint.append(sqlParamsMap.get(i).getValue() + "(" + sqlParamsMap.get(i).getType() + ")"); } } logger.info("Sql Params are {}", logPrint); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
/* * Licnsd o h Apach Sofwar Foundaion (ASF) undr on or mor * conribuor licns agrmns. S h NOTICE fil disribud wih * his work for addiional informaion rgarding copyrigh ownrship. * Th ASF licnss his fil o You undr h Apach Licns, Vrsion 2.0 * (h "Licns"); you may no us his fil xcp in complianc wih * h Licns. You may obain a copy of h Licns a * * hp://www.apach.org/licnss/LICENSE-2.0 * * Unlss rquird by applicabl law or agrd o in wriing, sofwar * disribud undr h Licns is disribud on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, ihr xprss or implid. * S h Licns for h spcic languag govrning prmissions and * limiaions undr h Licns. */ packag org.apach.dolphinschdulr.srvr.masr.runnr; impor saic org.apach.dolphinschdulr.common.Consans.CMDPARAM_COMPLEMENT_DATA_END_DATE; impor saic org.apach.dolphinschdulr.common.Consans.CMDPARAM_COMPLEMENT_DATA_START_DATE; impor saic org.apach.dolphinschdulr.common.Consans.CMD_PARAM_RECOVERY_START_NODE_STRING; impor saic org.apach.dolphinschdulr.common.Consans.CMD_PARAM_START_NODE_NAMES; impor saic org.apach.dolphinschdulr.common.Consans.DEFAULT_WORKER_GROUP; impor saic org.apach.dolphinschdulr.common.Consans.SEC_2_MINUTES_TIME_UNIT; impor org.apach.dolphinschdulr.common.Consans; impor org.apach.dolphinschdulr.common.nums.CommandTyp;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
impor org.apach.dolphinschdulr.common.nums.DpndRsul; impor org.apach.dolphinschdulr.common.nums.Dirc; impor org.apach.dolphinschdulr.common.nums.ExcuionSaus; impor org.apach.dolphinschdulr.common.nums.FailurSragy; impor org.apach.dolphinschdulr.common.nums.Flag; impor org.apach.dolphinschdulr.common.nums.Prioriy; impor org.apach.dolphinschdulr.common.nums.TaskDpndTyp; impor org.apach.dolphinschdulr.common.graph.DAG; impor org.apach.dolphinschdulr.common.modl.TaskNod; impor org.apach.dolphinschdulr.common.modl.TaskNodRlaion; impor org.apach.dolphinschdulr.common.procss.ProcssDag; impor org.apach.dolphinschdulr.common.procss.Propry; impor org.apach.dolphinschdulr.common.hrad.Soppr; impor org.apach.dolphinschdulr.common.hrad.ThradUils; impor org.apach.dolphinschdulr.common.uils.CollcionUils; impor org.apach.dolphinschdulr.common.uils.DaUils; impor org.apach.dolphinschdulr.common.uils.JSONUils; impor org.apach.dolphinschdulr.common.uils.OSUils; impor org.apach.dolphinschdulr.common.uils.ParamrUils; impor org.apach.dolphinschdulr.common.uils.SringUils; impor org.apach.dolphinschdulr.dao.niy.ProcssInsanc; impor org.apach.dolphinschdulr.dao.niy.ProjcUsr; impor org.apach.dolphinschdulr.dao.niy.Schdul; impor org.apach.dolphinschdulr.dao.niy.TaskInsanc; impor org.apach.dolphinschdulr.dao.uils.DagHlpr; impor org.apach.dolphinschdulr.rmo.NyRmoingClin; impor org.apach.dolphinschdulr.srvr.masr.config.MasrConfig; impor org.apach.dolphinschdulr.srvic.alr.ProcssAlrManagr; impor org.apach.dolphinschdulr.srvic.procss.ProcssSrvic; impor org.apach.dolphinschdulr.srvic.quarz.cron.CronUils;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
impor org.apach.dolphinschdulr.srvic.quu.PrTaskInsancPrioriyQuu; impor java.uil.ArrayLis; impor java.uil.Arrays; impor java.uil.Collcion; impor java.uil.Da; impor java.uil.HashMap; impor java.uil.Iraor; impor java.uil.Lis; impor java.uil.Map; impor java.uil.S; impor java.uil.concurrn.ConcurrnHashMap; impor java.uil.concurrn.ExcuorSrvic; impor java.uil.concurrn.Fuur; impor org.slf4j.Loggr; impor org.slf4j.LoggrFacory; impor com.googl.common.collc.Liss; /** * masr xc hrad,spli dag */ public class MasrExcThrad implmns Runnabl { /** * loggr of MasrExcThrad */ priva saic final Loggr loggr = LoggrFacory.gLoggr(MasrExcThrad.class); /** * runing TaskNod */ priva final Map<MasrBasTaskExcThrad, Fuur<Boolan>> acivTaskNod = nw ConcurrnHashMap<>(); /** * ask xc srvic
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
*/ priva final ExcuorSrvic askExcSrvic; /** * procss insanc */ priva ProcssInsanc procssInsanc; /** * submi failur nods */ priva boolan askFaildSubmi = fals; /** * rcovr nod id lis */ priva Lis<TaskInsanc> rcovrNodIdLis = nw ArrayLis<>(); /** * rror ask lis */ priva Map<Sring, TaskInsanc> rrorTaskLis = nw ConcurrnHashMap<>(); /** * compl ask lis */ priva Map<Sring, TaskInsanc> complTaskLis = nw ConcurrnHashMap<>(); /** * rady o submi ask quu */ priva PrTaskInsancPrioriyQuu radyToSubmiTaskQuu = nw PrTaskInsancPrioriyQuu(); /** * dpnd faild ask map */ priva Map<Sring, TaskInsanc> dpndFaildTask = nw ConcurrnHashMap<>();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
/** * forbiddn ask map */ priva Map<Sring, TaskNod> forbiddnTaskLis = nw ConcurrnHashMap<>(); /** * skip ask map */ priva Map<Sring, TaskNod> skipTaskNodLis = nw ConcurrnHashMap<>(); /** * rcovr olranc faul ask lis */ priva Lis<TaskInsanc> rcovrTolrancFaulTaskLis = nw ArrayLis<>(); /** * alr managr */ priva ProcssAlrManagr procssAlrManagr; /** * h objc of DAG */ priva DAG<Sring, TaskNod, TaskNodRlaion> dag; /** * procss srvic */ priva ProcssSrvic procssSrvic; /** * masr config */ priva MasrConfig masrConfig; /** *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
*/ priva NyRmoingClin nyRmoingClin; /** * submi pos nod * * @param parnNodNam parn nod nam */ priva Map<Sring, Objc> propToValu = nw ConcurrnHashMap<>(); /** * consrucor of MasrExcThrad * * @param procssInsanc procssInsanc * @param procssSrvic procssSrvic * @param nyRmoingClin nyRmoingClin */ public MasrExcThrad(ProcssInsanc procssInsanc , ProcssSrvic procssSrvic , NyRmoingClin nyRmoingClin , ProcssAlrManagr procssAlrManagr , MasrConfig masrConfig) { his.procssSrvic = procssSrvic; his.procssInsanc = procssInsanc; his.masrConfig = masrConfig; in masrTaskExcNum = masrConfig.gMasrExcTaskNum(); his.askExcSrvic = ThradUils.nwDamonFixdThradExcuor("Masr-Task-Exc-Thrad", masrTaskExcNum); his.nyRmoingClin = nyRmoingClin; his.procssAlrManagr = procssAlrManagr; } @Ovrrid
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
public void run() { (procssInsanc == null) { loggr.info("procss insanc is no xiss"); rurn; } (procssInsanc.gSa().ypIsFinishd()) { loggr.info("procss insanc is don : {}", procssInsanc.gId()); rurn; } ry { (procssInsanc.isComplmnDaa() && Flag.NO == procssInsanc.gIsSubProcss()) { xcuComplmnProcss(); } ls { xcuProcss(); } } cach (Excpion ) { loggr.rror("masr xc hrad xcpion", ); loggr.rror("procss xcu faild, procss id:{}", procssInsanc.gId()); procssInsanc.sSa(ExcuionSaus.FAILURE); procssInsanc.sEndTim(nw Da()); procssSrvic.updaProcssInsanc(procssInsanc); } finally { askExcSrvic.shudown(); } } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
* xcu procss * * @hrows Excpion xcpion */ priva void xcuProcss() hrows Excpion { prparProcss(); runProcss(); ndProcss(); } /** * xcu complmn procss * * @hrows Excpion xcpion */ priva void xcuComplmnProcss() hrows Excpion { Map<Sring, Sring> cmdParam = JSONUils.oMap(procssInsanc.gCommandParam()); Da sarDa = DaUils.gSchdulDa(cmdParam.g(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Da ndDa = DaUils.gSchdulDa(cmdParam.g(CMDPARAM_COMPLEMENT_DATA_END_DATE)); procssSrvic.savProcssInsanc(procssInsanc); in procssDfiniionId = procssInsanc.gProcssDfiniion().gId(); Lis<Schdul> schduls = procssSrvic.quryRlasSchdulrLisByProcssDfiniionId(procssDfiniionId); Lis<Da> lisDa = Liss.nwLinkdLis(); (!CollcionUils.isEmpy(schduls)) { for (Schdul schdul : schduls) { lisDa.addAll(CronUils.gSlfFirDaLis(sarDa, ndDa, schdul.gCronab())); } } Iraor<Da> iraor = null;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
Da schdulDa; (!CollcionUils.isEmpy(lisDa)) { iraor = lisDa.iraor(); schdulDa = iraor.nx(); procssInsanc.sSchdulTim(schdulDa); procssSrvic.updaProcssInsanc(procssInsanc); } ls { schdulDa = procssInsanc.gSchdulTim(); (schdulDa == null) { schdulDa = sarDa; } } whil (Soppr.isRunning()) { loggr.info("procss {} sar o complmn {} daa", procssInsanc.gId(), DaUils.daToSring(schdulDa)); prparProcss(); (dag == null) { loggr.rror("procss {} dag is null, plas chck ou paramrs", procssInsanc.gId()); procssInsanc.sSa(ExcuionSaus.SUCCESS); procssSrvic.updaProcssInsanc(procssInsanc); rurn; } runProcss(); ndProcss(); (!procssInsanc.gSa().ypIsSuccss()) { loggr.info("procss {} sa {}, complmn no complly!", procssInsanc.gId(), procssInsanc.gSa()); brak;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
} // (null == iraor) { // schdulDa = DaUils.gSomDay(schdulDa, 1); (schdulDa.afr(ndDa)) { // loggr.info("procss {} complmn complly!", procssInsanc.gId()); brak; } } ls { // (!iraor.hasNx()) { // loggr.info("procss {} complmn complly!", procssInsanc.gId()); brak; } schdulDa = iraor.nx(); } // flow nd // procssInsanc.sSchdulTim(schdulDa); (cmdParam.conainsKy(Consans.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.rmov(Consans.CMD_PARAM_RECOVERY_START_NODE_STRING); procssInsanc.sCommandParam(JSONUils.oJsonSring(cmdParam)); } procssInsanc.sSa(ExcuionSaus.RUNNING_EXECUTION); procssInsanc.sGlobalParams(ParamrUils.curingGlobalParams( procssInsanc.gProcssDfiniion().gGlobalParamMap(), procssInsanc.gProcssDfiniion().gGlobalParamLis(),
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
CommandTyp.COMPLEMENT_DATA, procssInsanc.gSchdulTim())); procssInsanc.sId(0); procssInsanc.sSarTim(nw Da()); procssInsanc.sEndTim(null); procssSrvic.savProcssInsanc(procssInsanc); } } /** * prpar procss paramr * * @hrows Excpion xcpion */ priva void prparProcss() hrows Excpion { // buildFlowDag(); // iniTaskQuu(); loggr.info("prpar procss :{} nd", procssInsanc.gId()); } /** * procss nd handl */ priva void ndProcss() { procssInsanc.sEndTim(nw Da()); procssSrvic.updaProcssInsanc(procssInsanc); (procssInsanc.gSa().ypIsWaiingThrad()) { procssSrvic.craRcovryWaiingThradCommand(null, procssInsanc); } Lis<TaskInsanc> askInsancs = procssSrvic.findValidTaskLisByProcssId(procssInsanc.gId()); ProjcUsr projcUsr = procssSrvic.quryProjcWihUsrByProcssInsancId(procssInsanc.gId());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
procssAlrManagr.sndAlrProcssInsanc(procssInsanc, askInsancs, projcUsr); } /** * * * @hrows Excpion xcpion */ priva void buildFlowDag() hrows Excpion { rcovrNodIdLis = gSarTaskInsancLis(procssInsanc.gCommandParam()); Lis<TaskNod> askNodLis = procssSrvic.gnTaskNodLis(procssInsanc.gProcssDfiniionCod(), procssInsanc.gProcssDfiniionVrsion(), nw HashMap<>()); forbiddnTaskLis.clar(); askNodLis.forEach(askNod -> { (askNod.isForbiddn()) { forbiddnTaskLis.pu(askNod.gNam(), askNod); } }); // Lis<Sring> rcovryNamLis = gRcovryNodNamLis(); Lis<Sring> sarNodNamLis = parsSarNodNam(procssInsanc.gCommandParam()); ProcssDag procssDag = gnraFlowDag(askNodLis, sarNodNamLis, rcovryNamLis, procssInsanc.gTaskDpndTyp()); (procssDag == null) { loggr.rror("procssDag is null"); rurn; } // dag = DagHlpr.buildDagGraph(procssDag); } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
* */ priva void iniTaskQuu() { askFaildSubmi = fals; acivTaskNod.clar(); dpndFaildTask.clar(); complTaskLis.clar(); rrorTaskLis.clar(); Lis<TaskInsanc> askInsancLis = procssSrvic.findValidTaskLisByProcssId(procssInsanc.gId()); for (TaskInsanc ask : askInsancLis) { (ask.isTaskCompl()) { complTaskLis.pu(ask.gNam(), ask); } (ask.isCondiionsTask() || DagHlpr.havCondiionsAfrNod(ask.gNam(), dag)) { coninu; } (ask.gSa().ypIsFailur() && !ask.askCanRry()) { rrorTaskLis.pu(ask.gNam(), ask); } } } /** * submi ask o xcu * * @param askInsanc ask insanc * @rurn TaskInsanc */ priva TaskInsanc submiTaskExc(TaskInsanc askInsanc) { MasrBasTaskExcThrad absracExcThrad = null; (askInsanc.isSubProcss()) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
absracExcThrad = nw SubProcssTaskExcThrad(askInsanc); } ls (askInsanc.isDpndTask()) { absracExcThrad = nw DpndnTaskExcThrad(askInsanc); } ls (askInsanc.isCondiionsTask()) { absracExcThrad = nw CondiionsTaskExcThrad(askInsanc); } ls (askInsanc.isSwichTask()) { absracExcThrad = nw SwichTaskExcThrad(askInsanc); } ls { absracExcThrad = nw MasrTaskExcThrad(askInsanc); } Fuur<Boolan> fuur = askExcSrvic.submi(absracExcThrad); acivTaskNod.puIfAbsn(absracExcThrad, fuur); rurn absracExcThrad.gTaskInsanc(); } /** * find ask insanc in db. * in cas submi mor han on sam nam ask in h sam im. * * @param askCod ask cod * @param askVrsion ask vrsion * @rurn TaskInsanc */ priva TaskInsanc findTaskIfExiss(Long askCod, in askVrsion) { Lis<TaskInsanc> askInsancLis = procssSrvic.findValidTaskLisByProcssId(his.procssInsanc.gId()); for (TaskInsanc askInsanc : askInsancLis) { (askInsanc.gTaskCod() == askCod && askInsanc.gTaskDfiniionVrsion() == askVrsion) { rurn askInsanc; } } rurn null;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
} /** * ncapsulaion ask * * @param procssInsanc procss insanc * @param askNod askNod * @rurn TaskInsanc */ priva TaskInsanc craTaskInsanc(ProcssInsanc procssInsanc, TaskNod askNod) { TaskInsanc askInsanc = findTaskIfExiss(askNod.gCod(), askNod.gVrsion()); (askInsanc == null) { askInsanc = nw TaskInsanc(); askInsanc.sTaskCod(askNod.gCod()); askInsanc.sTaskDfiniionVrsion(askNod.gVrsion()); // ask nam askInsanc.sNam(askNod.gNam()); // askInsanc.sSa(ExcuionSaus.SUBMITTED_SUCCESS); // askInsanc.sProcssInsancId(procssInsanc.gId()); // askInsanc.sTaskTyp(askNod.gTyp().oUpprCas()); // askInsanc.sAlrFlag(Flag.NO); // askInsanc.sSarTim(null); // askInsanc.sFlag(Flag.YES); // askInsanc.sRryTims(0);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
// askInsanc.sMaxRryTims(askNod.gMaxRryTims()); // askInsanc.sRryInrval(askNod.gRryInrval()); // askInsanc.sTaskParams(askNod.gTaskParams()); // (askNod.gTaskInsancPrioriy() == null) { askInsanc.sTaskInsancPrioriy(Prioriy.MEDIUM); } ls { askInsanc.sTaskInsancPrioriy(askNod.gTaskInsancPrioriy()); } Sring procssWorkrGroup = procssInsanc.gWorkrGroup(); procssWorkrGroup = SringUils.isBlank(procssWorkrGroup) ? DEFAULT_WORKER_GROUP : procssWorkrGroup; Sring askWorkrGroup = SringUils.isBlank(askNod.gWorkrGroup()) ? procssWorkrGroup : askNod.gWorkrGroup(); (!procssWorkrGroup.quals(DEFAULT_WORKER_GROUP) && askWorkrGroup.quals(DEFAULT_WORKER_GROUP)) { askInsanc.sWorkrGroup(procssWorkrGroup); } ls { askInsanc.sWorkrGroup(askWorkrGroup); } // askInsanc.sDlayTim(askNod.gDlayTim()); } rurn askInsanc; } public void gPrVarPool(TaskInsanc askInsanc, S<Sring> prTask) { Map<Sring,Propry> allPropry = nw HashMap<>(); Map<Sring,TaskInsanc> allTaskInsanc = nw HashMap<>(); (CollcionUils.isNoEmpy(prTask)) { for (Sring prTaskNam : prTask) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
TaskInsanc prTaskInsanc = complTaskLis.g(prTaskNam); (prTaskInsanc == null) { coninu; } Sring prVarPool = prTaskInsanc.gVarPool(); (SringUils.isNoEmpy(prVarPool)) { Lis<Propry> propris = JSONUils.oLis(prVarPool, Propry.class); for (Propry info : propris) { sVarPoolValu(allPropry, allTaskInsanc, prTaskInsanc, info); } } } (allPropry.siz() > 0) { askInsanc.sVarPool(JSONUils.oJsonSring(allPropry.valus())); } } } priva void sVarPoolValu(Map<Sring, Propry> allPropry, Map<Sring, TaskInsanc> allTaskInsanc, TaskInsanc prTaskInsanc, Propry hisPropry) { // hisPropry.sDirc(Dirc.IN); // Sring proNam = hisPropry.gProp(); // (allPropry.conainsKy(proNam)) { // Propry ohrPro = allPropry.g(proNam); // (SringUils.isEmpy(hisPropry.gValu())) { allPropry.pu(proNam, ohrPro); //
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
} ls (SringUils.isNoEmpy(ohrPro.gValu())) { TaskInsanc ohrTask = allTaskInsanc.g(proNam); (ohrTask.gEndTim().gTim() > prTaskInsanc.gEndTim().gTim()) { allPropry.pu(proNam, hisPropry); allTaskInsanc.pu(proNam,prTaskInsanc); } ls { allPropry.pu(proNam, ohrPro); } } ls { allPropry.pu(proNam, hisPropry); allTaskInsanc.pu(proNam,prTaskInsanc); } } ls { allPropry.pu(proNam, hisPropry); allTaskInsanc.pu(proNam,prTaskInsanc); } } priva void submiPosNod(Sring parnNodNam) { S<Sring> submiTaskNodLis = DagHlpr.parsPosNods(parnNodNam, skipTaskNodLis, dag, complTaskLis); Lis<TaskInsanc> askInsancs = nw ArrayLis<>(); for (Sring askNod : submiTaskNodLis) { TaskNod askNodObjc = dag.gNod(askNod); askInsancs.add(craTaskInsanc(procssInsanc, askNodObjc)); } // for (TaskInsanc ask : askInsancs) { (radyToSubmiTaskQuu.conains(ask)) { coninu; } (complTaskLis.conainsKy(ask.gNam())) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
loggr.info("ask {} has alrady run succss", ask.gNam()); coninu; } (ask.gSa().ypIsPaus() || ask.gSa().ypIsCancl()) { loggr.info("ask {} soppd, h sa is {}", ask.gNam(), ask.gSa()); } ls { addTaskToSandByLis(ask); } } } /** * drmin whhr h dpndncis of h ask nod ar compl * * @rurn DpndRsul */ priva DpndRsul isTaskDpsCompl(Sring askNam) { Collcion<Sring> sarNods = dag.gBginNod(); // (sarNods.conains(askNam)) { rurn DpndRsul.SUCCESS; } TaskNod askNod = dag.gNod(askNam); Lis<Sring> dpNamLis = askNod.gDpLis(); for (Sring dpsNod : dpNamLis) { (!dag.conainsNod(dpsNod) || forbiddnTaskLis.conainsKy(dpsNod) || skipTaskNodLis.conainsKy(dpsNod)) { coninu; } //
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
(!complTaskLis.conainsKy(dpsNod)) { rurn DpndRsul.WAITING; } ExcuionSaus dpTaskSa = complTaskLis.g(dpsNod).gSa(); (dpTaskSa.ypIsPaus() || dpTaskSa.ypIsCancl()) { rurn DpndRsul.NON_EXEC; } // (askNod.isCondiionsTask()) { coninu; } (!dpndTaskSuccss(dpsNod, askNam)) { rurn DpndRsul.FAILED; } } loggr.info("askNam: {} complDpndTaskLis: {}", askNam, Arrays.oSring(complTaskLis.kyS().oArray())); rurn DpndRsul.SUCCESS; } /** * dpnd nod is compld, bu hr nd chck h condiion ask branch is h nx nod */ priva boolan dpndTaskSuccss(Sring dpndNodNam, Sring nxNodNam) { (dag.gNod(dpndNodNam).isCondiionsTask()) { // Lis<Sring> nxTaskLis = DagHlpr.parsCondiionTask(dpndNodNam, skipTaskNodLis, dag, complTaskLis); (!nxTaskLis.conains(nxNodNam)) { rurn fals; } } ls { ExcuionSaus dpTaskSa = complTaskLis.g(dpndNodNam).gSa();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
(dpTaskSa.ypIsFailur()) { rurn fals; } } rurn ru; } /** * qury ask insanc by compl sa * * @param sa sa * @rurn ask insanc lis */ priva Lis<TaskInsanc> gComplTaskBySa(ExcuionSaus sa) { Lis<TaskInsanc> rsulLis = nw ArrayLis<>(); for (Map.Enry<Sring, TaskInsanc> nry : complTaskLis.nryS()) { (nry.gValu().gSa() == sa) { rsulLis.add(nry.gValu()); } } rurn rsulLis; } /** * whr hr ar ongoing asks * * @param sa sa * @rurn ExcuionSaus */ priva ExcuionSaus runningSa(ExcuionSaus sa) { (sa == ExcuionSaus.READY_STOP || sa == ExcuionSaus.READY_PAUSE
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
|| sa == ExcuionSaus.WAITING_THREAD || sa == ExcuionSaus.DELAY_EXECUTION) { // rurn sa; } ls { rurn ExcuionSaus.RUNNING_EXECUTION; } } /** * xiss failur ask,conains submi failur、dpndncy failur,xcu failur(rry afr) * * @rurn Boolan whhr has faild ask */ priva boolan hasFaildTask() { (his.askFaildSubmi) { rurn ru; } (his.rrorTaskLis.siz() > 0) { rurn ru; } rurn his.dpndFaildTask.siz() > 0; } /** * procss insanc failur * * @rurn Boolan whhr procss insanc faild */ priva boolan procssFaild() { (hasFaildTask()) { (procssInsanc.gFailurSragy() == FailurSragy.END) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
rurn ru; } (procssInsanc.gFailurSragy() == FailurSragy.CONTINUE) { rurn radyToSubmiTaskQuu.siz() == 0 || acivTaskNod.siz() == 0; } } rurn fals; } /** * whhr ask for waiing hrad * * @rurn Boolan whhr has waiing hrad ask */ priva boolan hasWaiingThradTask() { Lis<TaskInsanc> waiingLis = gComplTaskBySa(ExcuionSaus.WAITING_THREAD); rurn CollcionUils.isNoEmpy(waiingLis); } /** * prpar for paus * 1,faild rry ask in h prparaion quu , rurns o failur dircly * 2,xiss paus ask,complmn no compld, pnding submission of asks, rurn o suspnsion * 3,succss * * @rurn ExcuionSaus */ priva ExcuionSaus procssRadyPaus() { (hasRryTaskInSandBy()) { rurn ExcuionSaus.FAILURE; } Lis<TaskInsanc> pausLis = gComplTaskBySa(ExcuionSaus.PAUSE);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
(CollcionUils.isNoEmpy(pausLis) || !isComplmnEnd() || radyToSubmiTaskQuu.siz() > 0) { rurn ExcuionSaus.PAUSE; } ls { rurn ExcuionSaus.SUCCESS; } } /** * gnra h las procss insanc saus by h asks sa * * @rurn procss insanc xcuion saus */ priva ExcuionSaus gProcssInsancSa() { ProcssInsanc insanc = procssSrvic.findProcssInsancById(procssInsanc.gId()); ExcuionSaus sa = insanc.gSa(); (acivTaskNod.siz() > 0 || hasRryTaskInSandBy()) { // aciv a rurn runningSa(sa); } // procss failur (procssFaild()) { rurn ExcuionSaus.FAILURE; } // waiing hrad (hasWaiingThradTask()) { rurn ExcuionSaus.WAITING_THREAD; } // paus (sa == ExcuionSaus.READY_PAUSE) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
rurn procssRadyPaus(); } // sop (sa == ExcuionSaus.READY_STOP) { Lis<TaskInsanc> sopLis = gComplTaskBySa(ExcuionSaus.STOP); Lis<TaskInsanc> killLis = gComplTaskBySa(ExcuionSaus.KILL); (CollcionUils.isNoEmpy(sopLis) || CollcionUils.isNoEmpy(killLis) || !isComplmnEnd()) { rurn ExcuionSaus.STOP; } ls { rurn ExcuionSaus.SUCCESS; } } // succss (sa == ExcuionSaus.RUNNING_EXECUTION) { Lis<TaskInsanc> killTasks = gComplTaskBySa(ExcuionSaus.KILL); (radyToSubmiTaskQuu.siz() > 0) { //asks curr rurn ExcuionSaus.RUNNING_EXECUTION; } ls (CollcionUils.isNoEmpy(killTasks)) { // asks may rurn ExcuionSaus.FAILURE; } ls { // h w rurn ExcuionSaus.SUCCESS; } } rurn sa; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
/** * whhr sandby ask lis hav rry asks */ priva boolan rryTaskExiss() { boolan rsul = fals; for (Iraor<TaskInsanc> ir = radyToSubmiTaskQuu.iraor(); ir.hasNx(); ) { TaskInsanc ask = ir.nx(); (ask.gSa().ypIsFailur()) { rsul = ru; brak; } } rurn rsul; } /** * whhr complmn nd * * @rurn Boolan whhr is complmn nd */ priva boolan isComplmnEnd() { (!procssInsanc.isComplmnDaa()) { rurn ru; } ry { Map<Sring, Sring> cmdParam = JSONUils.oMap(procssInsanc.gCommandParam()); Da ndTim = DaUils.gSchdulDa(cmdParam.g(CMDPARAM_COMPLEMENT_DATA_END_DATE)); rurn procssInsanc.gSchdulTim().quals(ndTim); } cach (Excpion ) { loggr.rror("complmn nd faild ", ); rurn fals;