status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
233
| body
stringlengths 0
186k
⌀ | issue_url
stringlengths 38
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
timestamp[us, tz=UTC] | language
stringclasses 5
values | commit_datetime
timestamp[us, tz=UTC] | updated_file
stringlengths 7
188
| chunk_content
stringlengths 1
1.03M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,773 | [Improvement][server] need to support two parameters related to task | **Describe the question**
When I'm using the shell task ,I need the instance id of task and the absolute path of task.
**What are the current deficiencies and the benefits of improvement**
**Which version of DolphinScheduler:**
-[dev]
**Describe alternatives you've considered**
| https://github.com/apache/dolphinscheduler/issues/5773 | https://github.com/apache/dolphinscheduler/pull/5774 | ab527a5e5abd04243305a50f184d8009b9edf21a | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 2021-07-08T10:01:12Z | java | 2021-07-09T09:00:32Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.shell;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Method;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,773 | [Improvement][server] need to support two parameters related to task | **Describe the question**
When I'm using the shell task ,I need the instance id of task and the absolute path of task.
**What are the current deficiencies and the benefits of improvement**
**Which version of DolphinScheduler:**
-[dev]
**Describe alternatives you've considered**
| https://github.com/apache/dolphinscheduler/issues/5773 | https://github.com/apache/dolphinscheduler/pull/5774 | ab527a5e5abd04243305a50f184d8009b9edf21a | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 2021-07-08T10:01:12Z | java | 2021-07-09T09:00:32Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java | import org.apache.dolphinscheduler.server.worker.task.AbstractCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import java.util.Date;
import java.util.List;
/**
* python shell command executor test
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest(OSUtils.class)
@PowerMockIgnore({"javax.management.*"})
public class ShellCommandExecutorTest {
private static final Logger logger = LoggerFactory.getLogger(ShellCommandExecutorTest.class);
private ProcessService processService = null;
private ApplicationContext applicationContext;
@Before |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,773 | [Improvement][server] need to support two parameters related to task | **Describe the question**
When I'm using the shell task ,I need the instance id of task and the absolute path of task.
**What are the current deficiencies and the benefits of improvement**
**Which version of DolphinScheduler:**
-[dev]
**Describe alternatives you've considered**
| https://github.com/apache/dolphinscheduler/issues/5773 | https://github.com/apache/dolphinscheduler/pull/5774 | ab527a5e5abd04243305a50f184d8009b9edf21a | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 2021-07-08T10:01:12Z | java | 2021-07-09T09:00:32Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java | public void before() {
applicationContext = PowerMockito.mock(ApplicationContext.class);
processService = PowerMockito.mock(ProcessService.class);
SpringApplicationContext springApplicationContext = new SpringApplicationContext();
springApplicationContext.setApplicationContext(applicationContext);
PowerMockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService);
}
@Ignore
@Test
public void test() throws Exception {
TaskProps taskProps = new TaskProps();
taskProps.setExecutePath("/opt/soft/program/tmp/dolphinscheduler/exec/flow/5/36/2864/7657");
taskProps.setTaskAppId("36_2864_7657");
taskProps.setTenantCode("hdfs");
taskProps.setTaskStartTime(new Date());
taskProps.setTaskTimeout(360000);
taskProps.setTaskInstanceId(7657);
TaskInstance taskInstance = processService.findTaskInstanceById(7657);
AbstractTask task = null;
logger.info("task info : {}", task);
task.init();
task.handle();
ExecutionStatus status = ExecutionStatus.SUCCESS;
if (task.getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
status = ExecutionStatus.SUCCESS; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,773 | [Improvement][server] need to support two parameters related to task | **Describe the question**
When I'm using the shell task ,I need the instance id of task and the absolute path of task.
**What are the current deficiencies and the benefits of improvement**
**Which version of DolphinScheduler:**
-[dev]
**Describe alternatives you've considered**
| https://github.com/apache/dolphinscheduler/issues/5773 | https://github.com/apache/dolphinscheduler/pull/5774 | ab527a5e5abd04243305a50f184d8009b9edf21a | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 2021-07-08T10:01:12Z | java | 2021-07-09T09:00:32Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java | } else if (task.getExitStatusCode() == Constants.EXIT_CODE_KILL) {
status = ExecutionStatus.KILL;
} else {
status = ExecutionStatus.FAILURE;
}
logger.info(status.toString());
}
@Test
public void testParseProcessOutput() {
Class<AbstractCommandExecutor> shellCommandExecutorClass = AbstractCommandExecutor.class;
try {
Method method = shellCommandExecutorClass.getDeclaredMethod("parseProcessOutput", Process.class);
method.setAccessible(true);
Object[] arg1s = {new Process() {
@Override
public OutputStream getOutputStream() {
return new OutputStream() {
@Override
public void write(int b) throws IOException {
logger.info("unit test");
}
};
}
@Override
public InputStream getInputStream() {
return new InputStream() {
@Override
public int read() throws IOException {
return 0;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,773 | [Improvement][server] need to support two parameters related to task | **Describe the question**
When I'm using the shell task ,I need the instance id of task and the absolute path of task.
**What are the current deficiencies and the benefits of improvement**
**Which version of DolphinScheduler:**
-[dev]
**Describe alternatives you've considered**
| https://github.com/apache/dolphinscheduler/issues/5773 | https://github.com/apache/dolphinscheduler/pull/5774 | ab527a5e5abd04243305a50f184d8009b9edf21a | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 2021-07-08T10:01:12Z | java | 2021-07-09T09:00:32Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java | };
}
@Override
public InputStream getErrorStream() {
return null;
}
@Override
public int waitFor() throws InterruptedException {
return 0;
}
@Override
public int exitValue() {
return 0;
}
@Override
public void destroy() {
logger.info("unit test");
}
} };
method.invoke(new AbstractCommandExecutor(null, new TaskExecutionContext(), logger) {
@Override
protected String buildCommandFilePath() {
return null;
}
@Override
protected String commandInterpreter() {
return null;
}
@Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,773 | [Improvement][server] need to support two parameters related to task | **Describe the question**
When I'm using the shell task ,I need the instance id of task and the absolute path of task.
**What are the current deficiencies and the benefits of improvement**
**Which version of DolphinScheduler:**
-[dev]
**Describe alternatives you've considered**
| https://github.com/apache/dolphinscheduler/issues/5773 | https://github.com/apache/dolphinscheduler/pull/5774 | ab527a5e5abd04243305a50f184d8009b9edf21a | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 2021-07-08T10:01:12Z | java | 2021-07-09T09:00:32Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java | logger.info("unit test");
}
}, arg1s);
} catch (Exception e) {
logger.error(e.getMessage());
}
}
@Test
public void testFindAppId() {
Class<AbstractCommandExecutor> shellCommandExecutorClass = AbstractCommandExecutor.class;
try {
Method method = shellCommandExecutorClass.getDeclaredMethod("findAppId", new Class[]{String.class});
method.setAccessible(true);
Object[] arg1s = {"11111"};
String result = (String) method.invoke(new AbstractCommandExecutor(null, null, null) {
@Override
protected String buildCommandFilePath() {
return null;
}
@Override
protected String commandInterpreter() {
return null;
}
@Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException {
logger.info("unit test");
}
}, arg1s);
} catch (Exception e) {
logger.error(e.getMessage()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,773 | [Improvement][server] need to support two parameters related to task | **Describe the question**
When I'm using the shell task ,I need the instance id of task and the absolute path of task.
**What are the current deficiencies and the benefits of improvement**
**Which version of DolphinScheduler:**
-[dev]
**Describe alternatives you've considered**
| https://github.com/apache/dolphinscheduler/issues/5773 | https://github.com/apache/dolphinscheduler/pull/5774 | ab527a5e5abd04243305a50f184d8009b9edf21a | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 2021-07-08T10:01:12Z | java | 2021-07-09T09:00:32Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/shell/ShellCommandExecutorTest.java | }
}
@Test
public void testConvertFile2List() {
Class<AbstractCommandExecutor> shellCommandExecutorClass = AbstractCommandExecutor.class;
try {
Method method = shellCommandExecutorClass.getDeclaredMethod("convertFile2List", String.class);
method.setAccessible(true);
Object[] arg1s = {"/opt/1.txt"};
List<String> result = (List<String>) method.invoke(new AbstractCommandExecutor(null, null, null) {
@Override
protected String buildCommandFilePath() {
return null;
}
@Override
protected String commandInterpreter() {
return null;
}
@Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException {
logger.info("unit test");
}
}, arg1s);
Assert.assertTrue(true);
} catch (Exception e) {
logger.error(e.getMessage());
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,786 | [Improvement][Server] When the Worker turns down, the MasterServer cannot handle the Remove event correctly and throws NPE | **Describe the question**
When the Worker turns down, the MasterServer cannot handle the Remove event correctly and throws an NPE exception:
```java
[ERROR] 2021-07-10 00:27:52.736 org.apache.curator.framework.recipes.cache.TreeCache:[779] -
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.server.master.registry.MasterRegistryDataListener.handleWorkerEvent(MasterRegistryDataListener.java:83)
at org.apache.dolphinscheduler.server.master.registry.MasterRegistryDataListener.notify(MasterRegistryDataListener.java:47)
at org.apache.dolphinscheduler.spi.register.ListenerManager.dataChange(ListenerManager.java:63)
at org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperRegistry.lambda$subscribe$0(ZookeeperRegistry.java:166)
at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:760)
at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:754)
at org.apache.curator.framework.listen.ListenerContainer$1.run(ListenerContainer.java:100)
at org.apache.curator.shaded.com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)
at org.apache.curator.framework.listen.ListenerContainer.forEach(ListenerContainer.java:92)
at org.apache.curator.framework.recipes.cache.TreeCache.callListeners(TreeCache.java:753)
at org.apache.curator.framework.recipes.cache.TreeCache.access$1900(TreeCache.java:75)
at org.apache.curator.framework.recipes.cache.TreeCache$4.run(TreeCache.java:865)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266)
at java.util.concurrent.FutureTask.run(FutureTask.java)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
```
**What are the current deficiencies and the benefits of improvement**
```java
public class MasterRegistryDataListener implements SubscribeListener {
private static final Logger logger = LoggerFactory.getLogger(MasterRegistryDataListener.class);
@Resource
private MasterRegistryClient masterRegistryClient;
```
The `MasterRegistryDataListener` class will be used by `MasterRegistryClient`, as shown below:
```java
// line 123
registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_NODE, new MasterRegistryDataListener());
```
**The @Resource annotation will not take effect in the current case, it would be assigned to null**
**Which version of DolphinScheduler:**
latest dev branch
| https://github.com/apache/dolphinscheduler/issues/5786 | https://github.com/apache/dolphinscheduler/pull/5787 | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 2021-07-09T16:44:12Z | java | 2021-07-09T17:08:16Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryDataListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.registry;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS;
import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.NodeType;
import org.apache.dolphinscheduler.spi.register.DataChangeEvent;
import org.apache.dolphinscheduler.spi.register.SubscribeListener;
import javax.annotation.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MasterRegistryDataListener implements SubscribeListener { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,786 | [Improvement][Server] When the Worker turns down, the MasterServer cannot handle the Remove event correctly and throws NPE | **Describe the question**
When the Worker turns down, the MasterServer cannot handle the Remove event correctly and throws an NPE exception:
```java
[ERROR] 2021-07-10 00:27:52.736 org.apache.curator.framework.recipes.cache.TreeCache:[779] -
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.server.master.registry.MasterRegistryDataListener.handleWorkerEvent(MasterRegistryDataListener.java:83)
at org.apache.dolphinscheduler.server.master.registry.MasterRegistryDataListener.notify(MasterRegistryDataListener.java:47)
at org.apache.dolphinscheduler.spi.register.ListenerManager.dataChange(ListenerManager.java:63)
at org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperRegistry.lambda$subscribe$0(ZookeeperRegistry.java:166)
at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:760)
at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:754)
at org.apache.curator.framework.listen.ListenerContainer$1.run(ListenerContainer.java:100)
at org.apache.curator.shaded.com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)
at org.apache.curator.framework.listen.ListenerContainer.forEach(ListenerContainer.java:92)
at org.apache.curator.framework.recipes.cache.TreeCache.callListeners(TreeCache.java:753)
at org.apache.curator.framework.recipes.cache.TreeCache.access$1900(TreeCache.java:75)
at org.apache.curator.framework.recipes.cache.TreeCache$4.run(TreeCache.java:865)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266)
at java.util.concurrent.FutureTask.run(FutureTask.java)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
```
**What are the current deficiencies and the benefits of improvement**
```java
public class MasterRegistryDataListener implements SubscribeListener {
private static final Logger logger = LoggerFactory.getLogger(MasterRegistryDataListener.class);
@Resource
private MasterRegistryClient masterRegistryClient;
```
The `MasterRegistryDataListener` class will be used by `MasterRegistryClient`, as shown below:
```java
// line 123
registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_NODE, new MasterRegistryDataListener());
```
**The @Resource annotation will not take effect in the current case, it would be assigned to null**
**Which version of DolphinScheduler:**
latest dev branch
| https://github.com/apache/dolphinscheduler/issues/5786 | https://github.com/apache/dolphinscheduler/pull/5787 | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 2021-07-09T16:44:12Z | java | 2021-07-09T17:08:16Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryDataListener.java | private static final Logger logger = LoggerFactory.getLogger(MasterRegistryDataListener.class);
@Resource
MasterRegistryClient masterRegistryClient;
@Override
public void notify(String path, DataChangeEvent event) {
if (path.startsWith(REGISTRY_DOLPHINSCHEDULER_MASTERS + Constants.SINGLE_SLASH)) {
handleMasterEvent(event, path);
} else if (path.startsWith(REGISTRY_DOLPHINSCHEDULER_WORKERS + Constants.SINGLE_SLASH)) {
handleWorkerEvent(event, path);
}
}
/**
* monitor master
*
* @param event event
* @param path path
*/
public void handleMasterEvent(DataChangeEvent event, String path) {
switch (event) {
case ADD: |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,786 | [Improvement][Server] When the Worker turns down, the MasterServer cannot handle the Remove event correctly and throws NPE | **Describe the question**
When the Worker turns down, the MasterServer cannot handle the Remove event correctly and throws an NPE exception:
```java
[ERROR] 2021-07-10 00:27:52.736 org.apache.curator.framework.recipes.cache.TreeCache:[779] -
java.lang.NullPointerException: null
at org.apache.dolphinscheduler.server.master.registry.MasterRegistryDataListener.handleWorkerEvent(MasterRegistryDataListener.java:83)
at org.apache.dolphinscheduler.server.master.registry.MasterRegistryDataListener.notify(MasterRegistryDataListener.java:47)
at org.apache.dolphinscheduler.spi.register.ListenerManager.dataChange(ListenerManager.java:63)
at org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperRegistry.lambda$subscribe$0(ZookeeperRegistry.java:166)
at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:760)
at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:754)
at org.apache.curator.framework.listen.ListenerContainer$1.run(ListenerContainer.java:100)
at org.apache.curator.shaded.com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)
at org.apache.curator.framework.listen.ListenerContainer.forEach(ListenerContainer.java:92)
at org.apache.curator.framework.recipes.cache.TreeCache.callListeners(TreeCache.java:753)
at org.apache.curator.framework.recipes.cache.TreeCache.access$1900(TreeCache.java:75)
at org.apache.curator.framework.recipes.cache.TreeCache$4.run(TreeCache.java:865)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:266)
at java.util.concurrent.FutureTask.run(FutureTask.java)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
```
**What are the current deficiencies and the benefits of improvement**
```java
public class MasterRegistryDataListener implements SubscribeListener {
private static final Logger logger = LoggerFactory.getLogger(MasterRegistryDataListener.class);
@Resource
private MasterRegistryClient masterRegistryClient;
```
The `MasterRegistryDataListener` class will be used by `MasterRegistryClient`, as shown below:
```java
// line 123
registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_NODE, new MasterRegistryDataListener());
```
**The @Resource annotation will not take effect in the current case, it would be assigned to null**
**Which version of DolphinScheduler:**
latest dev branch
| https://github.com/apache/dolphinscheduler/issues/5786 | https://github.com/apache/dolphinscheduler/pull/5787 | 9fd5145b66646f3df847ea3c81bb272621ee86ca | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 2021-07-09T16:44:12Z | java | 2021-07-09T17:08:16Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryDataListener.java | logger.info("master node added : {}", path);
break;
case REMOVE:
masterRegistryClient.removeNodePath(path, NodeType.MASTER, true);
break;
default:
break;
}
}
/**
* monitor worker
*
* @param event event
* @param path path
*/
public void handleWorkerEvent(DataChangeEvent event, String path) {
switch (event) {
case ADD:
logger.info("worker node added : {}", path);
break;
case REMOVE:
logger.info("worker node deleted : {}", path);
masterRegistryClient.removeNodePath(path, NodeType.WORKER, true);
break;
default:
break;
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_FAILURE;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_KILL;
import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_SUCCESS;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.CommonUtils; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Field;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
/**
* abstract command executor
*/
public abstract class AbstractCommandExecutor { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | /**
* rules for extracting application ID
*/
protected static final Pattern APPLICATION_REGEX = Pattern.compile(Constants.APPLICATION_REGEX);
protected StringBuilder varPool = new StringBuilder();
/**
* process
*/
private Process process;
/**
* log handler
*/
protected Consumer<List<String>> logHandler;
/**
* logger
*/
protected Logger logger;
/**
* log list
*/
protected final List<String> logBuffer;
protected boolean logOutputIsScuccess = false;
/**
* taskExecutionContext
*/
protected TaskExecutionContext taskExecutionContext;
/**
* taskExecutionContextCacheManager |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | */
private TaskExecutionContextCacheManager taskExecutionContextCacheManager;
public AbstractCommandExecutor(Consumer<List<String>> logHandler,
TaskExecutionContext taskExecutionContext,
Logger logger) {
this.logHandler = logHandler;
this.taskExecutionContext = taskExecutionContext;
this.logger = logger;
this.logBuffer = Collections.synchronizedList(new ArrayList<>());
this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class);
}
protected AbstractCommandExecutor(List<String> logBuffer) {
this.logBuffer = logBuffer;
}
/**
* build process
*
* @param commandFile command file
* @throws IOException IO Exception
*/
private void buildProcess(String commandFile) throws IOException {
List<String> command = new LinkedList<>();
ProcessBuilder processBuilder = new ProcessBuilder();
processBuilder.directory(new File(taskExecutionContext.getExecutePath()));
processBuilder.redirectErrorStream(true); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | if (!OSUtils.isWindows() && CommonUtils.isSudoEnable()) {
command.add("sudo");
command.add("-u");
command.add(taskExecutionContext.getTenantCode());
}
command.add(commandInterpreter());
command.addAll(commandOptions());
command.add(commandFile);
processBuilder.command(command);
process = processBuilder.start();
printCommand(command);
}
/**
* task specific execution logic
*
* @param execCommand execCommand
* @return CommandExecuteResult
* @throws Exception if error throws Exception
*/
public CommandExecuteResult run(String execCommand) throws Exception {
CommandExecuteResult result = new CommandExecuteResult();
int taskInstanceId = taskExecutionContext.getTaskInstanceId();
if (null == taskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId)) {
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
if (StringUtils.isEmpty(execCommand)) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | taskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId);
return result;
}
String commandFilePath = buildCommandFilePath();
createCommandFileIfNotExists(execCommand, commandFilePath);
buildProcess(commandFilePath);
parseProcessOutput(process);
Integer processId = getProcessId(process);
result.setProcessId(processId);
taskExecutionContext.setProcessId(processId);
boolean updateTaskExecutionContextStatus = taskExecutionContextCacheManager.updateTaskExecutionContext(taskExecutionContext);
if (Boolean.FALSE.equals(updateTaskExecutionContextStatus)) {
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_KILL);
return result;
}
logger.info("process start, process id is: {}", processId);
long remainTime = getRemaintime();
boolean status = process.waitFor(remainTime, TimeUnit.SECONDS);
if (status) {
List<String> appIds = getAppIds(taskExecutionContext.getLogPath()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | result.setAppIds(String.join(Constants.COMMA, appIds));
result.setExitStatusCode(process.exitValue());
if (process.exitValue() == 0) {
result.setExitStatusCode(isSuccessOfYarnState(appIds) ? EXIT_CODE_SUCCESS : EXIT_CODE_FAILURE);
}
} else {
logger.error("process has failure , exitStatusCode:{}, processExitValue:{}, ready to kill ...",
result.getExitStatusCode(), process.exitValue());
ProcessUtils.kill(taskExecutionContext);
result.setExitStatusCode(EXIT_CODE_FAILURE);
}
logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{} ,processWaitForStatus:{} ,processExitValue:{}",
taskExecutionContext.getExecutePath(), processId, result.getExitStatusCode(), status, process.exitValue());
return result;
}
public String getVarPool() {
return varPool.toString();
}
/**
* cancel application
*
* @throws Exception exception
*/
public void cancelApplication() throws Exception {
if (process == null) {
return;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | clear();
int processId = getProcessId(process);
logger.info("cancel process: {}", processId);
boolean killed = softKill(processId);
if (!killed) {
hardKill(processId);
process.destroy();
process = null;
}
}
/**
* soft kill
*
* @param processId process id
* @return process is alive
* @throws InterruptedException interrupted exception
*/
private boolean softKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
String cmd = String.format("kill %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("soft kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | logger.info("kill attempt failed", e);
}
}
return !process.isAlive();
}
/**
* hard kill
*
* @param processId process id
*/
private void hardKill(int processId) {
if (processId != 0 && process.isAlive()) {
try {
String cmd = String.format("kill -9 %d", processId);
cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd);
logger.info("hard kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd);
Runtime.getRuntime().exec(cmd);
} catch (IOException e) {
logger.error("kill attempt failed ", e);
}
}
}
/**
* print command
*
* @param commands process builder
*/
private void printCommand(List<String> commands) {
String cmdStr;
try { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | cmdStr = ProcessUtils.buildCommandStr(commands);
logger.info("task run command:\n{}", cmdStr);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* clear
*/
private void clear() {
List<String> markerList = new ArrayList<>();
markerList.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString());
if (!logBuffer.isEmpty()) {
logHandler.accept(logBuffer);
logBuffer.clear();
}
logHandler.accept(markerList);
}
/**
* get the standard output of the process
*
* @param process process
*/
private void parseProcessOutput(Process process) {
String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskExecutionContext.getTaskAppId());
ExecutorService getOutputLogService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName + "-" + "getOutputLogService");
getOutputLogService.submit(() -> {
BufferedReader inReader = null;
try { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line;
logBuffer.add("welcome to use bigdata scheduling system...");
while ((line = inReader.readLine()) != null) {
if (line.startsWith("${setValue(")) {
varPool.append(line.substring("${setValue(".length(), line.length() - 2));
varPool.append("$VarPool$");
} else {
logBuffer.add(line);
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
logOutputIsScuccess = true;
close(inReader);
}
});
getOutputLogService.shutdown();
ExecutorService parseProcessOutputExecutorService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName);
parseProcessOutputExecutorService.submit(() -> {
try {
long lastFlushTime = System.currentTimeMillis();
while (logBuffer.size() > 0 || !logOutputIsScuccess) {
if (logBuffer.size() > 0) {
lastFlushTime = flush(lastFlushTime);
} else {
Thread.sleep(Constants.DEFAULT_LOG_FLUSH_INTERVAL);
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | } catch (Exception e) {
logger.error(e.getMessage(), e);
} finally {
clear();
}
});
parseProcessOutputExecutorService.shutdown();
}
/**
* check yarn state
*
* @param appIds application id list
* @return is success of yarn task state
*/
public boolean isSuccessOfYarnState(List<String> appIds) {
boolean result = true;
try {
for (String appId : appIds) {
logger.info("check yarn application status, appId:{}", appId);
while (Stopper.isRunning()) {
ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId);
if (logger.isDebugEnabled()) {
logger.debug("check yarn application status, appId:{}, final state:{}", appId, applicationStatus.name());
}
if (applicationStatus.equals(ExecutionStatus.FAILURE)
|| applicationStatus.equals(ExecutionStatus.KILL)) {
return false;
}
if (applicationStatus.equals(ExecutionStatus.SUCCESS)) {
break; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | }
ThreadUtils.sleep(Constants.SLEEP_TIME_MILLIS);
}
}
} catch (Exception e) {
logger.error("yarn applications: {} , query status failed, exception:{}", StringUtils.join(appIds, ","), e);
result = false;
}
return result;
}
public int getProcessId() {
return getProcessId(process);
}
/**
* get app links
*
* @param logPath log path
* @return app id list
*/
private List<String> getAppIds(String logPath) {
List<String> logs = convertFile2List(logPath);
List<String> appIds = new ArrayList<>();
/**
* analysis log?get submited yarn application id
*/
for (String log : logs) {
String appId = findAppId(log);
if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) {
logger.info("find app id: {}", appId);
appIds.add(appId); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | }
}
return appIds;
}
/**
* convert file to list
*
* @param filename file name
* @return line list
*/
private List<String> convertFile2List(String filename) {
List lineList = new ArrayList<String>(100);
File file = new File(filename);
if (!file.exists()) {
return lineList;
}
BufferedReader br = null;
try {
br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8));
String line = null;
while ((line = br.readLine()) != null) {
lineList.add(line);
}
} catch (Exception e) {
logger.error(String.format("read file: %s failed : ", filename), e);
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | logger.error(e.getMessage(), e);
}
}
}
return lineList;
}
/**
* find app id
*
* @param line line
* @return appid
*/
private String findAppId(String line) {
Matcher matcher = APPLICATION_REGEX.matcher(line);
if (matcher.find()) {
return matcher.group();
}
return null;
}
/**
* get remain time(s)
*
* @return remain time
*/
private long getRemaintime() {
long usedTime = (System.currentTimeMillis() - taskExecutionContext.getStartTime().getTime()) / 1000;
long remainTime = taskExecutionContext.getTaskTimeout() - usedTime;
if (remainTime < 0) {
throw new RuntimeException("task execution time out");
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | return remainTime;
}
/**
* get process id
*
* @param process process
* @return process id
*/
private int getProcessId(Process process) {
int processId = 0;
try {
Field f = process.getClass().getDeclaredField(Constants.PID);
f.setAccessible(true);
processId = f.getInt(process);
} catch (Throwable e) {
logger.error(e.getMessage(), e);
}
return processId;
}
/**
* when log buffer siz or flush time reach condition , then flush
*
* @param lastFlushTime last flush time
* @return last flush time
*/
private long flush(long lastFlushTime) {
long now = System.currentTimeMillis();
/**
* when log buffer siz or flush time reach condition , then flush
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java | if (logBuffer.size() >= Constants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > Constants.DEFAULT_LOG_FLUSH_INTERVAL) {
lastFlushTime = now;
/**
logHandler.accept(logBuffer);
logBuffer.clear();
}
return lastFlushTime;
}
/**
* close buffer reader
*
* @param inReader in reader
*/
private void close(BufferedReader inReader) {
if (inReader != null) {
try {
inReader.close();
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
}
protected List<String> commandOptions() {
return Collections.emptyList();
}
protected abstract String buildCommandFilePath();
protected abstract String commandInterpreter();
protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import static ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import java.util.List;
import java.util.StringJoiner;
import org.slf4j.Logger;
/**
* executive task
*/
public abstract class AbstractTask { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | /**
* taskExecutionContext
**/
TaskExecutionContext taskExecutionContext;
/**
* log record
*/
protected Logger logger;
/**
* SHELL process pid
*/
protected int processId;
/** |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | * other resource manager appId , for example : YARN etc
*/
protected String appIds;
/**
* cancel
*/
protected volatile boolean cancel = false;
/**
* exit code
*/
protected volatile int exitStatusCode = -1;
/**
* constructor
*
* @param taskExecutionContext taskExecutionContext
* @param logger logger
*/
protected AbstractTask(TaskExecutionContext taskExecutionContext, Logger logger) {
this.taskExecutionContext = taskExecutionContext;
this.logger = logger;
}
/**
* init task
*
* @throws Exception exception
*/
public void init() throws Exception {
}
/**
* task handle |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | *
* @throws Exception exception
*/
public abstract void handle() throws Exception;
/**
* result processing
*
* @throws Exception exception
*/
public void after() throws Exception {
}
/**
* cancel application
*
* @param status status
* @throws Exception exception
*/
public void cancelApplication(boolean status) throws Exception {
this.cancel = status;
}
/**
* log handle
*
* @param logs log list
*/
public void logHandle(List<String> logs) {
if (logs.contains(FINALIZE_SESSION_MARKER.toString())) {
logger.info(FINALIZE_SESSION_MARKER, FINALIZE_SESSION_MARKER.toString());
} else { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | StringJoiner joiner = new StringJoiner("\n\t");
logs.forEach(joiner::add);
logger.info(" -> {}", joiner);
}
}
/**
* get exit status code
*
* @return exit status code
*/
public int getExitStatusCode() {
return exitStatusCode;
}
public void setExitStatusCode(int exitStatusCode) {
this.exitStatusCode = exitStatusCode;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
public int getProcessId() {
return processId;
}
public void setProcessId(int processId) {
this.processId = processId;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | /**
* get task parameters
*
* @return AbstractParameters
*/
public abstract AbstractParameters getParameters();
private boolean typeIsNormalTask(String taskType) {
return !(TaskType.SUB_PROCESS.getDesc().equalsIgnoreCase(taskType) || TaskType.DEPENDENT.getDesc().equalsIgnoreCase(taskType));
}
/**
* get exit status according to exitCode
*
* @return exit status
*/
public ExecutionStatus getExitStatus() {
ExecutionStatus status;
switch (getExitStatusCode()) {
case Constants.EXIT_CODE_SUCCESS:
status = ExecutionStatus.SUCCESS;
break;
case Constants.EXIT_CODE_KILL:
status = ExecutionStatus.KILL;
break;
default:
status = ExecutionStatus.FAILURE;
break;
}
return status;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/PythonCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import org.apache.dolphinscheduler.common.Constants; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/PythonCommandExecutor.java | import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.List;
import java.util.function.Consumer;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* python command executor
*/
public class PythonCommandExecutor extends AbstractCommandExecutor {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(PythonCommandExecutor.class);
/**
* python
*/
public static final String PYTHON = "python";
private static final Pattern PYTHON_PATH_PATTERN = Pattern.compile("/bin/python[\\d.]*$"); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/PythonCommandExecutor.java | /**
* constructor
* @param logHandler log handler
* @param taskExecutionContext taskExecutionContext
* @param logger logger
*/
public PythonCommandExecutor(Consumer<List<String>> logHandler,
TaskExecutionContext taskExecutionContext,
Logger logger) {
super(logHandler,taskExecutionContext,logger);
}
/**
* build command file path
*
* @return command file path
*/
@Override
protected String buildCommandFilePath() {
return String.format("%s/py_%s.command", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId());
}
/**
* create command file if not exists
* @param execCommand exec command
* @param commandFile command file
* @throws IOException io exception
*/
@Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException {
logger.info("tenantCode :{}, task dir:{}", taskExecutionContext.getTenantCode(), taskExecutionContext.getExecutePath());
if (!Files.exists(Paths.get(commandFile))) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/PythonCommandExecutor.java | logger.info("generate command file:{}", commandFile);
StringBuilder sb = new StringBuilder();
sb.append("#-*- encoding=utf8 -*-\n");
sb.append("\n\n");
sb.append(execCommand);
logger.info(sb.toString());
FileUtils.writeStringToFile(new File(commandFile),
sb.toString(),
StandardCharsets.UTF_8);
}
}
/**
* get command options
* @return command options list
*/
@Override
protected List<String> commandOptions() {
return Collections.singletonList("-u");
}
/**
* Gets the command path to which Python can execute
* @return python command path
*/
@Override
protected String commandInterpreter() {
String pythonHome = getPythonHome(taskExecutionContext.getEnvFile());
return getPythonCommand(pythonHome);
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/PythonCommandExecutor.java | /**
* get python command
*
* @param pythonHome python home
* @return python command
*/
public static String getPythonCommand(String pythonHome) {
if (StringUtils.isEmpty(pythonHome)) {
return PYTHON;
}
File file = new File(pythonHome);
if (file.exists() && file.isFile()) {
return pythonHome;
}
if (PYTHON_PATH_PATTERN.matcher(pythonHome).find()) {
return pythonHome;
}
return Paths.get(pythonHome, "/bin/python").toString();
}
/**
* get python home
*
* @param envPath env path
* @return python home
*/
public static String getPythonHome(String envPath) {
BufferedReader br = null;
StringBuilder sb = new StringBuilder();
try {
br = new BufferedReader(new InputStreamReader(new FileInputStream(envPath))); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/PythonCommandExecutor.java | String line;
while ((line = br.readLine()) != null) {
if (line.contains(Constants.PYTHON_HOME)) {
sb.append(line);
break;
}
}
String result = sb.toString();
if (StringUtils.isEmpty(result)) {
return null;
}
String[] arrs = result.split(Constants.EQUAL_SIGN);
if (arrs.length == 2) {
return arrs[1];
}
} catch (IOException e) {
logger.error("read file failure", e);
} finally {
try {
if (br != null) {
br.close();
}
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
}
return null;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/ShellCommandExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/ShellCommandExecutor.java | package org.apache.dolphinscheduler.server.worker.task;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.commons.io.FileUtils;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.List;
import java.util.function.Consumer;
import org.slf4j.Logger;
/**
* shell command executor
*/
public class ShellCommandExecutor extends AbstractCommandExecutor {
/**
* For Unix-like, using sh
*/
public static final String SH = "sh";
/**
* For Windows, using cmd.exe
*/
public static final String CMD = "cmd.exe";
/**
* constructor
* @param logHandler logHandler
* @param taskExecutionContext taskExecutionContext
* @param logger logger
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/ShellCommandExecutor.java | public ShellCommandExecutor(Consumer<List<String>> logHandler,
TaskExecutionContext taskExecutionContext,
Logger logger) {
super(logHandler,taskExecutionContext,logger);
}
public ShellCommandExecutor(List<String> logBuffer) {
super(logBuffer);
}
@Override
protected String buildCommandFilePath() {
return String.format("%s/%s.%s"
, taskExecutionContext.getExecutePath()
, taskExecutionContext.getTaskAppId()
, OSUtils.isWindows() ? "bat" : "command");
}
/**
* get command type
* @return command type
*/
@Override
protected String commandInterpreter() {
return OSUtils.isWindows() ? CMD : SH;
}
/**
* create command file if not exists
* @param execCommand exec command
* @param commandFile command file
* @throws IOException io exception
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/ShellCommandExecutor.java | @Override
protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException {
logger.info("tenantCode user:{}, task dir:{}", taskExecutionContext.getTenantCode(),
taskExecutionContext.getTaskAppId());
if (!Files.exists(Paths.get(commandFile))) {
logger.info("create command file:{}", commandFile);
StringBuilder sb = new StringBuilder();
if (OSUtils.isWindows()) {
sb.append("@echo off\n");
sb.append("cd /d %~dp0\n");
if (taskExecutionContext.getEnvFile() != null) {
sb.append("call ").append(taskExecutionContext.getEnvFile()).append("\n");
}
} else {
sb.append("#!/bin/sh\n");
sb.append("BASEDIR=$(cd `dirname $0`; pwd)\n");
sb.append("cd $BASEDIR\n");
if (taskExecutionContext.getEnvFile() != null) {
sb.append("source ").append(taskExecutionContext.getEnvFile()).append("\n");
}
}
sb.append(execCommand);
logger.info("command : {}", sb.toString());
FileUtils.writeStringToFile(new File(commandFile), sb.toString(), StandardCharsets.UTF_8);
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.sqoop;
import org.apache.dolphinscheduler.common.task.sqoop.SqoopParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.server.entity.SqoopTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.worker.task.sqoop.generator.SqoopJobGenerator;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import java.util.ArrayList;
import java.util.Collections; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java | import java.util.Date;
import java.util.List;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
/**
* sqoop task test
*/
@RunWith(MockitoJUnitRunner.Silent.class)
public class SqoopTaskTest {
private static final Logger logger = LoggerFactory.getLogger(SqoopTaskTest.class);
private SqoopTask sqoopTask;
@Before
public void before() {
ProcessService processService = Mockito.mock(ProcessService.class);
ApplicationContext applicationContext = Mockito.mock(ApplicationContext.class);
SpringApplicationContext springApplicationContext = new SpringApplicationContext();
springApplicationContext.setApplicationContext(applicationContext);
Mockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService);
TaskExecutionContext taskExecutionContext = new TaskExecutionContext();
taskExecutionContext.setTaskAppId(String.valueOf(System.currentTimeMillis()));
taskExecutionContext.setTenantCode("1");
taskExecutionContext.setEnvFile(".dolphinscheduler_env.sh");
taskExecutionContext.setStartTime(new Date()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java | taskExecutionContext.setTaskTimeout(0);
taskExecutionContext.setTaskParams("{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,"
+ "\"modelType\":\"import\",\"sourceType\":\"MYSQL\",\"targetType\":\"HIVE\",\"sourceParams\":\"{\\\"srcDatasource\\\":2,\\\"srcTable\\\":\\\"person_2\\\","
+ "\\\"srcQueryType\\\":\\\"1\\\",\\\"srcQuerySql\\\":\\\"SELECT * FROM person_2\\\",\\\"srcColumnType\\\":\\\"0\\\","
+ "\\\"srcColumns\\\":\\\"\\\",\\\"srcConditionList\\\":[],\\\"mapColumnHive\\\":[],"
+ "\\\"mapColumnJava\\\":[{\\\"prop\\\":\\\"id\\\",\\\"direct\\\":\\\"IN\\\",\\\"type\\\":\\\"VARCHAR\\\",\\\"value\\\":\\\"Integer\\\"}]}\""
+ ",\"targetParams\":\"{\\\"hiveDatabase\\\":\\\"stg\\\",\\\"hiveTable\\\":\\\"person_internal_2\\\",\\\"createHiveTable\\\":true,"
+ "\\\"dropDelimiter\\\":false,\\\"hiveOverWrite\\\":true,\\\"replaceDelimiter\\\":\\\"\\\",\\\"hivePartitionKey\\\":\\\"date\\\","
+ "\\\"hivePartitionValue\\\":\\\"2020-02-16\\\"}\",\"localParams\":[]}");
sqoopTask = new SqoopTask(taskExecutionContext, logger);
sqoopTask.init();
}
/**
* test SqoopJobGenerator
*/
@Test
public void testGenerator() {
TaskExecutionContext mysqlTaskExecutionContext = getMysqlTaskExecutionContext();
String mysqlToHdfs =
"{\"jobName\":\"sqoop_import\",\"hadoopCustomParams\":[{\"prop\":\"mapreduce.map.memory.mb\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"4096\"}],"
+ "\"sqoopAdvancedParams\":[{\"prop\":\"--direct\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"\"}],\"jobType\":\"TEMPLATE\",\"concurrency\":1,"
+ "\"modelType\":\"import\",\"sourceType\":\"MYSQL\",\"targetType\":\"HDFS\","
+ "\"sourceParams\":\"{\\\"srcDatasource\\\":2,\\\"srcTable\\\":\\\"person_2\\\",\\\"srcQueryType\\\":\\\"0\\\",\\\"srcQuerySql\\\":\\\"\\\",\\\"srcColumnType\\\":\\\"0\\\","
+ "\\\"srcColumns\\\":\\\"\\\",\\\"srcConditionList\\\":[],\\\"mapColumnHive\\\":[],\\\"mapColumnJava\\\":[]}\",\"targetParams\":\"{\\\"targetPath\\\":\\\"/ods/tmp/test/person7\\\","
+ "\\\"deleteTargetDir\\\":true,\\\"fileType\\\":\\\"--as-textfile\\\",\\\"compressionCodec\\\":\\\"\\\",\\\"fieldsTerminated\\\":\\\"@\\\","
+ "\\\"linesTerminated\\\":\\\"\\\\\\\\n\\\"}\",\"localParams\":[]}";
SqoopParameters mysqlToHdfsParams = JSONUtils.parseObject(mysqlToHdfs, SqoopParameters.class); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java | SqoopJobGenerator generator = new SqoopJobGenerator();
String mysqlToHdfsScript = generator.generateSqoopJob(mysqlToHdfsParams, mysqlTaskExecutionContext);
String mysqlToHdfsExpected =
"sqoop import -D mapred.job.name=sqoop_import -D mapreduce.map.memory.mb=4096 --direct -m 1 --connect "
+ "\"jdbc:mysql://192.168.0.111:3306/test?allowLoadLocalInfile=false&autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false\" "
+ "--username kylo --password \"123456\" --table person_2 --target-dir /ods/tmp/test/person7 --as-textfile "
+ "--delete-target-dir --fields-terminated-by '@' --lines-terminated-by '\\n' --null-non-string 'NULL' --null-string 'NULL'";
Assert.assertEquals(mysqlToHdfsExpected, mysqlToHdfsScript);
String hdfsToMysql = "{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,\"modelType\":\"export\",\"sourceType\":\"HDFS\","
+ "\"targetType\":\"MYSQL\",\"sourceParams\":\"{\\\"exportDir\\\":\\\"/ods/tmp/test/person7\\\"}\","
+ "\"targetParams\":\"{\\\"targetDatasource\\\":2,\\\"targetTable\\\":\\\"person_3\\\",\\\"targetColumns\\\":\\\"id,name,age,sex,create_time\\\","
+ "\\\"preQuery\\\":\\\"\\\",\\\"isUpdate\\\":true,\\\"targetUpdateKey\\\":\\\"id\\\",\\\"targetUpdateMode\\\":\\\"allowinsert\\\","
+ "\\\"fieldsTerminated\\\":\\\"@\\\",\\\"linesTerminated\\\":\\\"\\\\\\\\n\\\"}\",\"localParams\":[]}";
SqoopParameters hdfsToMysqlParams = JSONUtils.parseObject(hdfsToMysql, SqoopParameters.class);
String hdfsToMysqlScript = generator.generateSqoopJob(hdfsToMysqlParams, mysqlTaskExecutionContext);
String hdfsToMysqlScriptExpected =
"sqoop export -D mapred.job.name=sqoop_import -m 1 --export-dir /ods/tmp/test/person7 --connect "
+ "\"jdbc:mysql://192.168.0.111:3306/test?allowLoadLocalInfile=false&autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false\" "
+ "--username kylo --password \"123456\" --table person_3 --columns id,name,age,sex,create_time --fields-terminated-by '@' "
+ "--lines-terminated-by '\\n' --update-key id --update-mode allowinsert";
Assert.assertEquals(hdfsToMysqlScriptExpected, hdfsToMysqlScript);
String hiveToMysql =
"{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,\"modelType\":\"export\",\"sourceType\":\"HIVE\","
+ "\"targetType\":\"MYSQL\",\"sourceParams\":\"{\\\"hiveDatabase\\\":\\\"stg\\\",\\\"hiveTable\\\":\\\"person_internal\\\","
+ "\\\"hivePartitionKey\\\":\\\"date\\\",\\\"hivePartitionValue\\\":\\\"2020-02-17\\\"}\","
+ "\"targetParams\":\"{\\\"targetDatasource\\\":2,\\\"targetTable\\\":\\\"person_3\\\",\\\"targetColumns\\\":\\\"\\\",\\\"preQuery\\\":\\\"\\\","
+ "\\\"isUpdate\\\":false,\\\"targetUpdateKey\\\":\\\"\\\",\\\"targetUpdateMode\\\":\\\"allowinsert\\\",\\\"fieldsTerminated\\\":\\\"@\\\","
+ "\\\"linesTerminated\\\":\\\"\\\\\\\\n\\\"}\",\"localParams\":[]}"; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java | SqoopParameters hiveToMysqlParams = JSONUtils.parseObject(hiveToMysql, SqoopParameters.class);
String hiveToMysqlScript = generator.generateSqoopJob(hiveToMysqlParams, mysqlTaskExecutionContext);
String hiveToMysqlExpected =
"sqoop export -D mapred.job.name=sqoop_import -m 1 --hcatalog-database stg --hcatalog-table person_internal --hcatalog-partition-keys date "
+ "--hcatalog-partition-values 2020-02-17 --connect \"jdbc:mysql://192.168.0.111:3306/test?allowLoadLocalInfile="
+ "false&autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false\" --username kylo --password \"123456\" --table person_3 "
+ "--fields-terminated-by '@' --lines-terminated-by '\\n'";
Assert.assertEquals(hiveToMysqlExpected, hiveToMysqlScript);
String mysqlToHive =
"{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,\"modelType\":\"import\",\"sourceType\":\"MYSQL\",\"targetType\":\"HIVE\","
+ "\"sourceParams\":\"{\\\"srcDatasource\\\":2,\\\"srcTable\\\":\\\"person_2\\\",\\\"srcQueryType\\\":\\\"1\\\","
+ "\\\"srcQuerySql\\\":\\\"SELECT * FROM person_2\\\",\\\"srcColumnType\\\":\\\"0\\\",\\\"srcColumns\\\":\\\"\\\",\\\"srcConditionList\\\":[],"
+ "\\\"mapColumnHive\\\":[],\\\"mapColumnJava\\\":[{\\\"prop\\\":\\\"id\\\",\\\"direct\\\":\\\"IN\\\",\\\"type\\\":\\\"VARCHAR\\\",\\\"value\\\":\\\"Integer\\\"}]}\","
+ "\"targetParams\":\"{\\\"hiveDatabase\\\":\\\"stg\\\",\\\"hiveTable\\\":\\\"person_internal_2\\\",\\\"createHiveTable\\\":true,\\\"dropDelimiter\\\":false,"
+ "\\\"hiveOverWrite\\\":true,\\\"replaceDelimiter\\\":\\\"\\\",\\\"hivePartitionKey\\\":\\\"date\\\",\\\"hivePartitionValue\\\":\\\"2020-02-16\\\"}\",\"localParams\":[]}";
SqoopParameters mysqlToHiveParams = JSONUtils.parseObject(mysqlToHive, SqoopParameters.class);
String mysqlToHiveScript = generator.generateSqoopJob(mysqlToHiveParams, mysqlTaskExecutionContext);
String mysqlToHiveExpected =
"sqoop import -D mapred.job.name=sqoop_import -m 1 --connect \"jdbc:mysql://192.168.0.111:3306/"
+ "test?allowLoadLocalInfile=false&autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false\" "
+ "--username kylo --password \"123456\" "
+ "--query \"SELECT * FROM person_2 WHERE \\$CONDITIONS\" --map-column-java id=Integer --hive-import --hive-database stg --hive-table person_internal_2 "
+ "--create-hive-table --hive-overwrite --delete-target-dir --hive-partition-key date --hive-partition-value 2020-02-16";
Assert.assertEquals(mysqlToHiveExpected, mysqlToHiveScript);
String sqoopCustomString = "{\"jobType\":\"CUSTOM\",\"localParams\":[],\"customShell\":\"sqoop import\"}";
SqoopParameters sqoopCustomParams = JSONUtils.parseObject(sqoopCustomString, SqoopParameters.class);
String sqoopCustomScript = generator.generateSqoopJob(sqoopCustomParams, new TaskExecutionContext());
String sqoopCustomExpected = "sqoop import"; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java | Assert.assertEquals(sqoopCustomExpected, sqoopCustomScript);
}
/**
* get taskExecutionContext include mysql
*
* @return TaskExecutionContext
*/
private TaskExecutionContext getMysqlTaskExecutionContext() {
TaskExecutionContext taskExecutionContext = new TaskExecutionContext();
SqoopTaskExecutionContext sqoopTaskExecutionContext = new SqoopTaskExecutionContext();
String mysqlSourceConnectionParams =
"{\"address\":\"jdbc:mysql://192.168.0.111:3306\",\"database\":\"test\",\"jdbcUrl\":\"jdbc:mysql://192.168.0.111:3306/test\",\"user\":\"kylo\",\"password\":\"123456\"}";
String mysqlTargetConnectionParams =
"{\"address\":\"jdbc:mysql://192.168.0.111:3306\",\"database\":\"test\",\"jdbcUrl\":\"jdbc:mysql://192.168.0.111:3306/test\",\"user\":\"kylo\",\"password\":\"123456\"}";
sqoopTaskExecutionContext.setDataSourceId(2);
sqoopTaskExecutionContext.setDataTargetId(2);
sqoopTaskExecutionContext.setSourcetype(0);
sqoopTaskExecutionContext.setTargetConnectionParams(mysqlTargetConnectionParams);
sqoopTaskExecutionContext.setSourceConnectionParams(mysqlSourceConnectionParams);
sqoopTaskExecutionContext.setTargetType(0);
taskExecutionContext.setSqoopTaskExecutionContext(sqoopTaskExecutionContext);
return taskExecutionContext;
}
@Test
public void testGetParameters() {
Assert.assertNotNull(sqoopTask.getParameters());
}
/**
* Method: init
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,775 | [Improvement][Worker] Task log may be lost | **Describe the question**
All the code in this issue is at `AbstractCommandExecutor`.
If you have seen the task logging code in DolphinScheduler, you probably know that we currently use the production-consumption model for logging.
We use one thread to write the log to the collection.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L351-L371
And we use another thread to consumer the log from collection and write to log file.
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L375-L390
https://github.com/apache/dolphinscheduler/blob/b114d330ac1fa7de27e09cc73c0804a7536f3b28/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java#L549-L562
The problem is that when executing between line 558 and line 560, the logs added to the collection will be lost.(`logHandler.accept` will take some time).
**Which version of DolphinScheduler:**
-[1.3.6-release]
-[dev]
**Describe alternatives you've considered**
There are two ways to solve the issue:
1. We can use a block queue to store the log instead of using `Collections.synchronizedList`.
2. We can direct use a read write lock to in `AbstractCommandExecutor` to solve the concurrent problem, instead of using `Collections.synchronizedList`
| https://github.com/apache/dolphinscheduler/issues/5775 | https://github.com/apache/dolphinscheduler/pull/5783 | 626c47399af4b7e8a839165d9d36fdbe04cc54cd | 30af55b82ae560a5300930af062ad3a88d542e3d | 2021-07-08T13:49:44Z | java | 2021-07-09T17:14:59Z | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java | @Test
public void testInit() {
try {
sqoopTask.init();
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
@Test
public void testLogHandler() throws InterruptedException {
List<String> list = Collections.synchronizedList(new ArrayList<>());
Thread thread1 = new Thread(() -> {
for (int i = 0; i < 10; i++) {
list.add("test add log");
}
});
Thread thread2 = new Thread(() -> {
for (int i = 0; i < 10; i++) {
sqoopTask.logHandle(list);
}
});
thread1.start();
thread2.start();
thread1.join();
thread2.join();
Assert.assertTrue(true);
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import static org.apache.dolphinscheduler.api.enums.Status.CREATE_SCHEDULE_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.OFFLINE_SCHEDULE_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.PREVIEW_SCHEDULE_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.PUBLISH_SCHEDULE_ONLINE_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.QUERY_SCHEDULE_LIST_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.QUERY_SCHEDULE_LIST_PAGING_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_SCHEDULE_ERROR;
import static org.apache.dolphinscheduler.common.Constants.SESSION_USER;
import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ApiException; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | import org.apache.dolphinscheduler.api.service.SchedulerService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import java.util.Map;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestAttribute;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import springfox.documentation.annotations.ApiIgnore;
/**
* scheduler controller
*/
@Api(tags = "SCHEDULER_TAG") |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | @RestController
@RequestMapping("/projects/{projectCode}/schedule")
public class SchedulerController extends BaseController {
public static final String DEFAULT_WARNING_TYPE = "NONE";
public static final String DEFAULT_NOTIFY_GROUP_ID = "1";
public static final String DEFAULT_FAILURE_POLICY = "CONTINUE";
public static final String DEFAULT_PROCESS_INSTANCE_PRIORITY = "MEDIUM";
@Autowired
private SchedulerService schedulerService;
/**
* create schedule
*
* @param loginUser login user
* @param projectCode project code
* @param processDefinitionCode process definition code
* @param schedule scheduler
* @param warningType warning type
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param processInstancePriority process instance priority
* @param workerGroup worker group
* @return create result code |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | */
@ApiOperation(value = "createSchedule", notes = "CREATE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionCode", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "100"),
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String",
example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','timezoneId':'America/Phoenix','crontab':'0 0 3/6 * * ? *'}"),
@ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type = "WarningType"),
@ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type = "FailureStrategy"),
@ApiImplicitParam(name = "workerGroupId", value = "WORKER_GROUP_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", type = "Priority"),
})
@PostMapping("/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_SCHEDULE_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result createSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam(value = "processDefinitionCode") long processDefinitionCode,
@RequestParam(value = "schedule") String schedule,
@RequestParam(value = "warningType", required = false, defaultValue = DEFAULT_WARNING_TYPE) WarningType warningType,
@RequestParam(value = "warningGroupId", required = false, defaultValue = DEFAULT_NOTIFY_GROUP_ID) int warningGroupId,
@RequestParam(value = "failureStrategy", required = false, defaultValue = DEFAULT_FAILURE_POLICY) FailureStrategy failureStrategy,
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
@RequestParam(value = "processInstancePriority", required = false, defaultValue = DEFAULT_PROCESS_INSTANCE_PRIORITY) Priority processInstancePriority) {
Map<String, Object> result = schedulerService.insertSchedule(loginUser, projectCode, processDefinitionCode, schedule,
warningType, warningGroupId, failureStrategy, processInstancePriority, workerGroup);
return returnDataList(result);
}
/** |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | * updateProcessInstance schedule
*
* @param loginUser login user
* @param projectCode project code
* @param id scheduler id
* @param schedule scheduler
* @param warningType warning type
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param workerGroup worker group
* @param processInstancePriority process instance priority
* @return update result code
*/
@ApiOperation(value = "updateSchedule", notes = "UPDATE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"),
@ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type = "WarningType"),
@ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type = "FailureStrategy"),
@ApiImplicitParam(name = "workerGroupId", value = "WORKER_GROUP_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", type = "Priority"),
})
@PostMapping("/update")
@ApiException(UPDATE_SCHEDULE_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result updateSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam(value = "id") Integer id,
@RequestParam(value = "schedule") String schedule, |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | @RequestParam(value = "warningType", required = false, defaultValue = DEFAULT_WARNING_TYPE) WarningType warningType,
@RequestParam(value = "warningGroupId", required = false) int warningGroupId,
@RequestParam(value = "failureStrategy", required = false, defaultValue = "END") FailureStrategy failureStrategy,
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
@RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority) {
Map<String, Object> result = schedulerService.updateSchedule(loginUser, projectCode, id, schedule,
warningType, warningGroupId, failureStrategy, processInstancePriority, workerGroup);
return returnDataList(result);
}
/**
* publish schedule setScheduleState
*
* @param loginUser login user
* @param projectCode project code
* @param id scheduler id
* @return publish result code
*/
@ApiOperation(value = "online", notes = "ONLINE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping("/online")
@ApiException(PUBLISH_SCHEDULE_ONLINE_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result online(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam("id") Integer id) {
Map<String, Object> result = schedulerService.setScheduleState(loginUser, projectCode, id, ReleaseState.ONLINE);
return returnDataList(result);
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | /**
* offline schedule
*
* @param loginUser login user
* @param projectCode project code
* @param id schedule id
* @return operation result code
*/
@ApiOperation(value = "offline", notes = "OFFLINE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping("/offline")
@ApiException(OFFLINE_SCHEDULE_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result offline(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam("id") Integer id) {
Map<String, Object> result = schedulerService.setScheduleState(loginUser, projectCode, id, ReleaseState.OFFLINE);
return returnDataList(result);
}
/**
* query schedule list paging
*
* @param loginUser login user
* @param projectCode project code
* @param processDefinitionCode process definition code
* @param pageNo page number
* @param pageSize page size
* @param searchVal search value |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | * @return schedule list page
*/
@ApiOperation(value = "queryScheduleListPaging", notes = "QUERY_SCHEDULE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", type = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "100")
})
@GetMapping("/list-paging")
@ApiException(QUERY_SCHEDULE_LIST_PAGING_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result queryScheduleListPaging(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam long processDefinitionCode,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageNo") Integer pageNo,
@RequestParam("pageSize") Integer pageSize) {
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = schedulerService.querySchedule(loginUser, projectCode, processDefinitionCode, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* delete schedule by id
*
* @param loginUser login user |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | * @param projectName project name
* @param scheduleId scheule id
* @return delete result code
*/
@ApiOperation(value = "deleteScheduleById", notes = "OFFLINE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "scheduleId", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_SCHEDULE_CRON_BY_ID_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result deleteScheduleById(@RequestAttribute(value = SESSION_USER) User loginUser,
@PathVariable String projectName,
@RequestParam("scheduleId") Integer scheduleId
) {
Map<String, Object> result = schedulerService.deleteScheduleById(loginUser, projectName, scheduleId);
return returnDataList(result);
}
/**
* query schedule list
*
* @param loginUser login user
* @param projectName project name
* @return schedule list
*/
@ApiOperation(value = "queryScheduleList", notes = "QUERY_SCHEDULE_LIST_NOTES")
@PostMapping("/list")
@ApiException(QUERY_SCHEDULE_LIST_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser") |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java | public Result queryScheduleList(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName) {
Map<String, Object> result = schedulerService.queryScheduleList(loginUser, projectName);
return returnDataList(result);
}
/**
* preview schedule
*
* @param loginUser login user
* @param projectName project name
* @param schedule schedule expression
* @return the next five fire time
*/
@ApiOperation(value = "previewSchedule", notes = "PREVIEW_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"),
})
@PostMapping("/preview")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(PREVIEW_SCHEDULE_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result previewSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser,
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
@RequestParam(value = "schedule") String schedule
) {
Map<String, Object> result = schedulerService.previewSchedule(loginUser, projectName, schedule);
return returnDataList(result);
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.dao.entity.User;
import java.util.Map;
/**
* scheduler service
*/
public interface SchedulerService { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java | /**
* save schedule
*
* @param loginUser login user
* @param projectCode project code
* @param processDefineCode process definition code
* @param schedule scheduler
* @param warningType warning type
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param processInstancePriority process instance priority
* @param workerGroup worker group
* @return create result code
*/
Map<String, Object> insertSchedule(User loginUser,
long projectCode,
long processDefineCode,
String schedule, |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java | WarningType warningType,
int warningGroupId,
FailureStrategy failureStrategy,
Priority processInstancePriority,
String workerGroup);
/**
* updateProcessInstance schedule
*
* @param loginUser login user
* @param projectCode project code
* @param id scheduler id
* @param scheduleExpression scheduler
* @param warningType warning type
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param workerGroup worker group
* @param processInstancePriority process instance priority
* @return update result code
*/
Map<String, Object> updateSchedule(User loginUser,
long projectCode,
Integer id,
String scheduleExpression,
WarningType warningType,
int warningGroupId,
FailureStrategy failureStrategy,
Priority processInstancePriority,
String workerGroup);
/**
* set schedule online or offline |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java | *
* @param loginUser login user
* @param projectCode project code
* @param id scheduler id
* @param scheduleStatus schedule status
* @return publish result code
*/
Map<String, Object> setScheduleState(User loginUser,
long projectCode,
Integer id,
ReleaseState scheduleStatus);
/**
* query schedule
*
* @param loginUser login user
* @param projectCode project code
* @param processDefineCode process definition code
* @param pageNo page number
* @param pageSize page size
* @param searchVal search value
* @return schedule list page
*/
Map<String, Object> querySchedule(User loginUser, long projectCode, long processDefineCode, String searchVal,
Integer pageNo, Integer pageSize);
/**
* query schedule list
*
* @param loginUser login user
* @param projectName project name
* @return schedule list |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java | */
Map<String, Object> queryScheduleList(User loginUser, String projectName);
/**
* delete schedule
*
* @param projectId project id
* @param scheduleId schedule id
* @throws RuntimeException runtime exception
*/
void deleteSchedule(int projectId, int scheduleId);
/**
* delete schedule by id
*
* @param loginUser login user
* @param projectName project name
* @param scheduleId scheule id
* @return delete result code
*/
Map<String, Object> deleteScheduleById(User loginUser, String projectName, Integer scheduleId);
/**
* preview schedule
*
* @param loginUser login user
* @param projectName project name
* @param schedule schedule expression
* @return the next five fire time
*/
Map<String, Object> previewSchedule(User loginUser, String projectName, String schedule);
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | * The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import org.apache.dolphinscheduler.api.dto.ScheduleParam;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.service.ExecutorService;
import org.apache.dolphinscheduler.api.service.MonitorService;
import org.apache.dolphinscheduler.api.service.ProjectService;
import org.apache.dolphinscheduler.api.service.SchedulerService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.ProcessScheduleJob;
import org.apache.dolphinscheduler.service.quartz.QuartzExecutors;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
/**
* scheduler service impl
*/
@Service |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | public class SchedulerServiceImpl extends BaseServiceImpl implements SchedulerService {
private static final Logger logger = LoggerFactory.getLogger(SchedulerServiceImpl.class);
@Autowired
private ProjectService projectService;
@Autowired
private ExecutorService executorService;
@Autowired
private MonitorService monitorService;
@Autowired
private ProcessService processService;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* save schedule
*
* @param loginUser login user
* @param projectCode project name
* @param processDefineCode process definition code
* @param schedule scheduler
* @param warningType warning type
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param processInstancePriority process instance priority |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | * @param workerGroup worker group
* @return create result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> insertSchedule(User loginUser,
long projectCode,
long processDefineCode,
String schedule,
WarningType warningType,
int warningGroupId,
FailureStrategy failureStrategy,
Priority processInstancePriority,
String workerGroup) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByCode(projectCode);
boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result);
if (!hasProjectAndPerm) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefineCode);
result = executorService.checkProcessDefinitionValid(processDefinition, processDefineCode);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
Schedule scheduleObj = new Schedule();
Date now = new Date();
scheduleObj.setProjectName(project.getName()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | scheduleObj.setProcessDefinitionId(processDefinition.getId());
scheduleObj.setProcessDefinitionName(processDefinition.getName());
ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(), scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result, Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
scheduleObj.setStartTime(scheduleParam.getStartTime());
scheduleObj.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {
logger.error("{} verify failure", scheduleParam.getCrontab());
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, scheduleParam.getCrontab());
return result;
}
scheduleObj.setCrontab(scheduleParam.getCrontab());
scheduleObj.setTimezoneId(scheduleParam.getTimezoneId());
scheduleObj.setWarningType(warningType);
scheduleObj.setWarningGroupId(warningGroupId);
scheduleObj.setFailureStrategy(failureStrategy);
scheduleObj.setCreateTime(now);
scheduleObj.setUpdateTime(now);
scheduleObj.setUserId(loginUser.getId());
scheduleObj.setUserName(loginUser.getUserName());
scheduleObj.setReleaseState(ReleaseState.OFFLINE);
scheduleObj.setProcessInstancePriority(processInstancePriority);
scheduleObj.setWorkerGroup(workerGroup);
scheduleMapper.insert(scheduleObj);
/**
* updateProcessInstance receivers and cc by process definition id |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | */
processDefinition.setWarningGroupId(warningGroupId);
processDefinitionMapper.updateById(processDefinition);
result.put(Constants.DATA_LIST, scheduleMapper.selectById(scheduleObj.getId()));
putMsg(result, Status.SUCCESS);
result.put("scheduleId", scheduleObj.getId());
return result;
}
/**
* updateProcessInstance schedule
*
* @param loginUser login user
* @param projectCode project code
* @param id scheduler id
* @param scheduleExpression scheduler
* @param warningType warning type
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param workerGroup worker group
* @param processInstancePriority process instance priority
* @return update result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> updateSchedule(User loginUser,
long projectCode,
Integer id,
String scheduleExpression,
WarningType warningType, |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | int warningGroupId,
FailureStrategy failureStrategy,
Priority processInstancePriority,
String workerGroup) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByCode(projectCode);
boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result);
if (!hasProjectAndPerm) {
return result;
}
Schedule schedule = scheduleMapper.selectById(id);
if (schedule == null) {
putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, id);
return result;
}
ProcessDefinition processDefinition = processService.findProcessDefineById(schedule.getProcessDefinitionId());
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, schedule.getProcessDefinitionId());
return result;
}
/**
* scheduling on-line status forbid modification
*/
if (checkValid(result, schedule.getReleaseState() == ReleaseState.ONLINE, Status.SCHEDULE_CRON_ONLINE_FORBID_UPDATE)) {
return result;
}
Date now = new Date(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | if (StringUtils.isNotEmpty(scheduleExpression)) {
ScheduleParam scheduleParam = JSONUtils.parseObject(scheduleExpression, ScheduleParam.class);
if (DateUtils.differSec(scheduleParam.getStartTime(), scheduleParam.getEndTime()) == 0) {
logger.warn("The start time must not be the same as the end");
putMsg(result, Status.SCHEDULE_START_TIME_END_TIME_SAME);
return result;
}
schedule.setStartTime(scheduleParam.getStartTime());
schedule.setEndTime(scheduleParam.getEndTime());
if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) {
putMsg(result, Status.SCHEDULE_CRON_CHECK_FAILED, scheduleParam.getCrontab());
return result;
}
schedule.setCrontab(scheduleParam.getCrontab());
schedule.setTimezoneId(scheduleParam.getTimezoneId());
}
if (warningType != null) {
schedule.setWarningType(warningType);
}
schedule.setWarningGroupId(warningGroupId);
if (failureStrategy != null) {
schedule.setFailureStrategy(failureStrategy);
}
schedule.setWorkerGroup(workerGroup);
schedule.setUpdateTime(now);
schedule.setProcessInstancePriority(processInstancePriority);
scheduleMapper.updateById(schedule);
/**
* updateProcessInstance recipients and cc by process definition ID
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | processDefinition.setWarningGroupId(warningGroupId);
processDefinitionMapper.updateById(processDefinition);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* set schedule online or offline
*
* @param loginUser login user
* @param projectCode project code
* @param id scheduler id
* @param scheduleStatus schedule status
* @return publish result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> setScheduleState(User loginUser,
long projectCode,
Integer id,
ReleaseState scheduleStatus) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByCode(projectCode);
boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result);
if (!hasProjectAndPerm) {
return result;
}
Schedule scheduleObj = scheduleMapper.selectById(id);
if (scheduleObj == null) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, id);
return result;
}
if (scheduleObj.getReleaseState() == scheduleStatus) {
logger.info("schedule release is already {},needn't to change schedule id: {} from {} to {}",
scheduleObj.getReleaseState(), scheduleObj.getId(), scheduleObj.getReleaseState(), scheduleStatus);
putMsg(result, Status.SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE, scheduleStatus);
return result;
}
ProcessDefinition processDefinition = processService.findProcessDefineById(scheduleObj.getProcessDefinitionId());
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, scheduleObj.getProcessDefinitionId());
return result;
}
if (scheduleStatus == ReleaseState.ONLINE) {
if (processDefinition.getReleaseState() != ReleaseState.ONLINE) {
logger.info("not release process definition id: {} , name : {}",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName());
return result;
}
List<Integer> subProcessDefineIds = new ArrayList<>();
processService.recurseFindSubProcessId(scheduleObj.getProcessDefinitionId(), subProcessDefineIds);
Integer[] idArray = subProcessDefineIds.toArray(new Integer[subProcessDefineIds.size()]);
if (!subProcessDefineIds.isEmpty()) {
List<ProcessDefinition> subProcessDefinitionList =
processDefinitionMapper.queryDefinitionListByIdList(idArray); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | if (subProcessDefinitionList != null && !subProcessDefinitionList.isEmpty()) {
for (ProcessDefinition subProcessDefinition : subProcessDefinitionList) {
/**
* if there is no online process, exit directly
*/
if (subProcessDefinition.getReleaseState() != ReleaseState.ONLINE) {
logger.info("not release process definition id: {} , name : {}",
subProcessDefinition.getId(), subProcessDefinition.getName());
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, subProcessDefinition.getId());
return result;
}
}
}
}
}
List<Server> masterServers = monitorService.getServerListFromRegistry(true);
if (masterServers.isEmpty()) {
putMsg(result, Status.MASTER_NOT_EXISTS);
return result;
}
scheduleObj.setReleaseState(scheduleStatus);
scheduleMapper.updateById(scheduleObj);
try {
switch (scheduleStatus) {
case ONLINE:
logger.info("Call master client set schedule online, project id: {}, flow id: {},host: {}", project.getId(), processDefinition.getId(), masterServers);
setSchedule(project.getId(), scheduleObj);
break; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | case OFFLINE:
logger.info("Call master client set schedule offline, project id: {}, flow id: {},host: {}", project.getId(), processDefinition.getId(), masterServers);
deleteSchedule(project.getId(), id);
break;
default:
putMsg(result, Status.SCHEDULE_STATUS_UNKNOWN, scheduleStatus.toString());
return result;
}
} catch (Exception e) {
result.put(Constants.MSG, scheduleStatus == ReleaseState.ONLINE ? "set online failure" : "set offline failure");
throw new ServiceException(result.get(Constants.MSG).toString());
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query schedule
*
* @param loginUser login user
* @param projectCode project code
* @param processDefineCode process definition code
* @param pageNo page number
* @param pageSize page size
* @param searchVal search value
* @return schedule list page
*/
@Override
public Map<String, Object> querySchedule(User loginUser, long projectCode, long processDefineCode, String searchVal,
Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | Project project = projectMapper.queryByCode(projectCode);
boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result);
if (!hasProjectAndPerm) {
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefineCode);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineCode);
return result;
}
Page<Schedule> page = new Page<>(pageNo, pageSize);
IPage<Schedule> scheduleIPage = scheduleMapper.queryByProcessDefineIdPaging(page, processDefinition.getId(),
searchVal);
PageInfo<Schedule> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotalCount((int)scheduleIPage.getTotal());
pageInfo.setLists(scheduleIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query schedule list
*
* @param loginUser login user
* @param projectName project name
* @return schedule list
*/
@Override
public Map<String, Object> queryScheduleList(User loginUser, String projectName) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result);
if (!hasProjectAndPerm) {
return result;
}
List<Schedule> schedules = scheduleMapper.querySchedulerListByProjectName(projectName);
result.put(Constants.DATA_LIST, schedules);
putMsg(result, Status.SUCCESS);
return result;
}
public void setSchedule(int projectId, Schedule schedule) {
logger.info("set schedule, project id: {}, scheduleId: {}", projectId, schedule.getId());
QuartzExecutors.getInstance().addJob(ProcessScheduleJob.class, projectId, schedule);
}
/**
* delete schedule
*
* @param projectId project id
* @param scheduleId schedule id
* @throws RuntimeException runtime exception
*/
@Override
public void deleteSchedule(int projectId, int scheduleId) {
logger.info("delete schedules of project id:{}, schedule id:{}", projectId, scheduleId);
String jobName = QuartzExecutors.buildJobName(scheduleId);
String jobGroupName = QuartzExecutors.buildJobGroupName(projectId);
if (!QuartzExecutors.getInstance().deleteJob(jobName, jobGroupName)) {
logger.warn("set offline failure:projectId:{},scheduleId:{}", projectId, scheduleId); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | throw new ServiceException("set offline failure");
}
}
/**
* check valid
*
* @param result result
* @param bool bool
* @param status status
* @return check result code
*/
private boolean checkValid(Map<String, Object> result, boolean bool, Status status) {
if (bool) {
putMsg(result, status);
return true;
}
return false;
}
/**
* delete schedule by id
*
* @param loginUser login user
* @param projectName project name
* @param scheduleId scheule id
* @return delete result code
*/
@Override
public Map<String, Object> deleteScheduleById(User loginUser, String projectName, Integer scheduleId) {
Map<String, Object> result = new HashMap<>(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
Schedule schedule = scheduleMapper.selectById(scheduleId);
if (schedule == null) {
putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, scheduleId);
return result;
}
if (loginUser.getId() != schedule.getUserId()
&& loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (schedule.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId());
return result;
}
int delete = scheduleMapper.deleteById(scheduleId);
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR);
}
return result;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java | /**
* preview schedule
*
* @param loginUser login user
* @param projectName project name
* @param schedule schedule expression
* @return the next five fire time
*/
@Override
public Map<String, Object> previewSchedule(User loginUser, String projectName, String schedule) {
Map<String, Object> result = new HashMap<>();
CronExpression cronExpression;
ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class);
Date now = new Date();
Date startTime = now.after(scheduleParam.getStartTime()) ? now : scheduleParam.getStartTime();
Date endTime = scheduleParam.getEndTime();
try {
cronExpression = CronUtils.parse2CronExpression(scheduleParam.getCrontab());
} catch (ParseException e) {
logger.error(e.getMessage(), e);
putMsg(result, Status.PARSE_TO_CRON_EXPRESSION_ERROR);
return result;
}
List<Date> selfFireDateList = CronUtils.getSelfFireDateList(startTime, endTime, cronExpression, Constants.PREVIEW_SCHEDULE_EXECUTE_COUNT);
result.put(Constants.DATA_LIST, selfFireDateList.stream().map(DateUtils::dateToString));
putMsg(result, Status.SUCCESS);
return result;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/SchedulerControllerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import static org.mockito.ArgumentMatchers.isA;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/SchedulerControllerTest.java | import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
import java.util.Map;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.SchedulerService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.User;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.http.MediaType;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
/**
* scheduler controller test
*/
public class SchedulerControllerTest extends AbstractControllerTest { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/SchedulerControllerTest.java | private static Logger logger = LoggerFactory.getLogger(SchedulerControllerTest.class);
@MockBean
private SchedulerService schedulerService;
@Test
public void testCreateSchedule() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("processDefinitionCode","40");
paramsMap.add("schedule","{'startTime':'2019-12-16 00:00:00','endTime':'2019-12-17 00:00:00','crontab':'0 0 6 * * ? *'}");
paramsMap.add("warningType",String.valueOf(WarningType.NONE));
paramsMap.add("warningGroupId","1");
paramsMap.add("failureStrategy",String.valueOf(FailureStrategy.CONTINUE));
paramsMap.add("receivers","");
paramsMap.add("receiversCc","");
paramsMap.add("workerGroupId","1");
paramsMap.add("processInstancePriority",String.valueOf(Priority.HIGH));
Mockito.when(schedulerService.insertSchedule(isA(User.class), isA(Long.class), isA(Long.class),
isA(String.class), isA(WarningType.class), isA(int.class), isA(FailureStrategy.class),
isA(Priority.class), isA(String.class))).thenReturn(success());
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectCode}/schedule/create",123)
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/SchedulerControllerTest.java | logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUpdateSchedule() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id","37");
paramsMap.add("schedule","{'startTime':'2019-12-16 00:00:00','endTime':'2019-12-17 00:00:00','crontab':'0 0 7 * * ? *'}");
paramsMap.add("warningType",String.valueOf(WarningType.NONE));
paramsMap.add("warningGroupId","1");
paramsMap.add("failureStrategy",String.valueOf(FailureStrategy.CONTINUE));
paramsMap.add("receivers","");
paramsMap.add("receiversCc","");
paramsMap.add("workerGroupId","1");
paramsMap.add("processInstancePriority",String.valueOf(Priority.HIGH));
Mockito.when(schedulerService.updateSchedule(isA(User.class), isA(Long.class), isA(Integer.class),
isA(String.class), isA(WarningType.class), isA(Integer.class), isA(FailureStrategy.class),
isA(Priority.class), isA(String.class))).thenReturn(success());
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectCode}/schedule/update",123)
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testOnline() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/SchedulerControllerTest.java | paramsMap.add("id","37");
Mockito.when(schedulerService.setScheduleState(isA(User.class), isA(Long.class), isA(Integer.class),
isA(ReleaseState.class))).thenReturn(success());
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectCode}/schedule/online",123)
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testOffline() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id","28");
Mockito.when(schedulerService.setScheduleState(isA(User.class), isA(Long.class), isA(Integer.class),
isA(ReleaseState.class))).thenReturn(success());
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectCode}/schedule/offline",123)
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/SchedulerControllerTest.java | public void testQueryScheduleListPaging() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("processDefinitionCode","40");
paramsMap.add("searchVal","test");
paramsMap.add("pageNo","1");
paramsMap.add("pageSize","30");
Map<String, Object> mockResult = success();
PageInfo<Resource> pageInfo = new PageInfo<>(1, 10);
mockResult.put(Constants.DATA_LIST, pageInfo);
Mockito.when(schedulerService.querySchedule(isA(User.class), isA(Long.class), isA(Long.class),
isA(String.class), isA(Integer.class), isA(Integer.class))).thenReturn(mockResult);
MvcResult mvcResult = mockMvc.perform(get("/projects/{projectCode}/schedule/list-paging",123)
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryScheduleList() throws Exception {
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectName}/schedule/list","cxc_1113")
.header(SESSION_ID, sessionId))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,778 | [Feature][JsonSplit-api]schedule list、preview、delete interface | from #5498
Change the request parameter projectName to projectCode,including the front end and controller interface | https://github.com/apache/dolphinscheduler/issues/5778 | https://github.com/apache/dolphinscheduler/pull/5779 | 72535a47e3dafc68c457996ea6e01b8da17685aa | d2a9e05a664a9bf12a29ae1873183b0173c0bf49 | 2021-07-09T08:56:03Z | java | 2021-07-12T02:21:17Z | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/SchedulerControllerTest.java | logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testPreviewSchedule() throws Exception {
MvcResult mvcResult = mockMvc.perform(post("/projects/{projectName}/schedule/preview","cxc_1113")
.header(SESSION_ID, sessionId)
.param("schedule","{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testDeleteScheduleById() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("scheduleId","37");
MvcResult mvcResult = mockMvc.perform(get("/projects/{projectName}/schedule/delete","cxc_1113")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.ZonedDateTime; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | import java.time.format.DateTimeFormatter;
import java.util.Calendar;
import java.util.Date;
import java.util.TimeZone;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* date utils
*/
public class DateUtils {
private static final Logger logger = LoggerFactory.getLogger(DateUtils.class);
private DateUtils() {
throw new UnsupportedOperationException("Construct DateUtils");
}
/**
* date to local datetime
*
* @param date date
* @return local datetime
*/
private static LocalDateTime date2LocalDateTime(Date date) {
return LocalDateTime.ofInstant(date.toInstant(), ZoneId.systemDefault());
}
/**
* local datetime to date
*
* @param localDateTime local datetime
* @return date
*/
private static Date localDateTime2Date(LocalDateTime localDateTime) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | Instant instant = localDateTime.atZone(ZoneId.systemDefault()).toInstant();
return Date.from(instant);
}
/**
* get current date str
*
* @return date string
*/
public static String getCurrentTime() {
return getCurrentTime(Constants.YYYY_MM_DD_HH_MM_SS);
}
/**
* get the date string in the specified format of the current time
*
* @param format date format
* @return date string
*/
public static String getCurrentTime(String format) {
return LocalDateTime.now().format(DateTimeFormatter.ofPattern(format));
}
/**
* get the formatted date string
*
* @param date date
* @param format e.g. yyyy-MM-dd HH:mm:ss
* @return date string
*/
public static String format(Date date, String format) {
return format(date2LocalDateTime(date), format);
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | /**
* get the formatted date string
*
* @param localDateTime local data time
* @param format yyyy-MM-dd HH:mm:ss
* @return date string
*/
public static String format(LocalDateTime localDateTime, String format) {
return localDateTime.format(DateTimeFormatter.ofPattern(format));
}
/**
* convert time to yyyy-MM-dd HH:mm:ss format
*
* @param date date
* @return date string
*/
public static String dateToString(Date date) {
return format(date, Constants.YYYY_MM_DD_HH_MM_SS);
}
/**
* convert string to date and time
*
* @param date date
* @param format format
* @return date
*/
public static Date parse(String date, String format) {
try {
LocalDateTime ldt = LocalDateTime.parse(date, DateTimeFormatter.ofPattern(format));
return localDateTime2Date(ldt); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | } catch (Exception e) {
logger.error("error while parse date:" + date, e);
}
return null;
}
/**
* convert date str to yyyy-MM-dd HH:mm:ss format
*
* @param str date string
* @return yyyy-MM-dd HH:mm:ss format
*/
public static Date stringToDate(String str) {
return parse(str, Constants.YYYY_MM_DD_HH_MM_SS);
}
/**
* get seconds between two dates
*
* @param d1 date1
* @param d2 date2
* @return differ seconds
*/
public static long differSec(Date d1, Date d2) {
if (d1 == null || d2 == null) {
return 0;
}
return (long) Math.ceil(differMs(d1, d2) / 1000.0);
}
/**
* get ms between two dates
* |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | * @param d1 date1
* @param d2 date2
* @return differ ms
*/
public static long differMs(Date d1, Date d2) {
return Math.abs(d1.getTime() - d2.getTime());
}
/**
* get hours between two dates
*
* @param d1 date1
* @param d2 date2
* @return differ hours
*/
public static long diffHours(Date d1, Date d2) {
return (long) Math.ceil(diffMin(d1, d2) / 60.0);
}
/**
* get minutes between two dates
*
* @param d1 date1
* @param d2 date2
* @return differ minutes
*/
public static long diffMin(Date d1, Date d2) {
return (long) Math.ceil(differSec(d1, d2) / 60.0);
}
/**
* get the date of the specified date in the days before and after
* |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | * @param date date
* @param day day
* @return the date of the specified date in the days before and after
*/
public static Date getSomeDay(Date date, int day) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
calendar.add(Calendar.DATE, day);
return calendar.getTime();
}
/**
* get the hour of day.
*
* @param date date
* @return hour of day
*/
public static int getHourIndex(Date date) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
return calendar.get(Calendar.HOUR_OF_DAY);
}
/**
* compare two dates
*
* @param future future date
* @param old old date
* @return true if future time greater than old time
*/
public static boolean compare(Date future, Date old) {
return future.getTime() > old.getTime(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | }
/**
* convert schedule string to date
*
* @param schedule schedule
* @return convert schedule string to date
*/
public static Date getScheduleDate(String schedule) {
return stringToDate(schedule);
}
/**
* format time to readable
*
* @param ms ms
* @return format time
*/
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
/**
*
* format time to duration
*
* @param d1 d1
* @param d2 d2
* @return format time |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | */
public static String format2Duration(Date d1, Date d2) {
if (d1 == null || d2 == null) {
return null;
}
return format2Duration(differMs(d1, d2));
}
/**
* format time to duration
*
* @param ms ms
* @return format time
*/
public static String format2Duration(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
StringBuilder strBuilder = new StringBuilder();
strBuilder = days > 0 ? strBuilder.append(days).append("d").append(" ") : strBuilder;
strBuilder = hours > 0 ? strBuilder.append(hours).append("h").append(" ") : strBuilder;
strBuilder = minutes > 0 ? strBuilder.append(minutes).append("m").append(" ") : strBuilder;
strBuilder = seconds > 0 ? strBuilder.append(seconds).append("s") : strBuilder;
return strBuilder.toString();
}
/**
* get monday
* <p>
* note: Set the first day of the week to Monday, the default is Sunday
* |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,795 | [Improvement][Server] The starttime field in the HttpTask log is not displayed as expected. | *For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.*
**Describe the question**

```java
long costTime = System.currentTimeMillis() - startTime;
logger.info("startTime: {}, httpUrl: {}, httpMethod: {}, costTime : {}Millisecond, statusCode : {}, body : {}, log : {}",
DateUtils.format2Readable(startTime), httpParameters.getUrl(), httpParameters.getHttpMethod(), costTime, statusCode, body, output);
public static String format2Readable(long ms) {
long days = MILLISECONDS.toDays(ms);
long hours = MILLISECONDS.toDurationHours(ms);
long minutes = MILLISECONDS.toDurationMinutes(ms);
long seconds = MILLISECONDS.toDurationSeconds(ms);
return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds);
}
```
The API `format2Readable` is intended to display the execution time of a task more friendly, such as how many days and hours it has been executed.
It's better to convert the timestamp to a formatted time according to a specified `DateTimeFormatter`
**Which version of DolphinScheduler:**
latest dev branch
**Describe alternatives you've considered**
A clear and concise description of any alternative improvement solutions you've considered.
| https://github.com/apache/dolphinscheduler/issues/5795 | https://github.com/apache/dolphinscheduler/pull/5796 | 16986c3c651af38469c6d4cb03a587fd174c9a9b | 7bffe0ac85b0147210facdeedc531026b0022e6f | 2021-07-11T07:49:32Z | java | 2021-07-12T06:31:48Z | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java | * @param date date
* @return get monday
*/
public static Date getMonday(Date date) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
cal.setFirstDayOfWeek(Calendar.MONDAY);
cal.set(Calendar.DAY_OF_WEEK, Calendar.MONDAY);
return cal.getTime();
}
/**
* get sunday
* <p>
* note: Set the first day of the week to Monday, the default is Sunday
*
* @param date date
* @return get sunday
*/
public static Date getSunday(Date date) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
cal.setFirstDayOfWeek(Calendar.MONDAY);
cal.set(Calendar.DAY_OF_WEEK, Calendar.SUNDAY);
return cal.getTime();
}
/**
* get first day of month
*
* @param date date
* @return first day of month |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.