status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
233
| body
stringlengths 0
186k
⌀ | issue_url
stringlengths 38
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
unknown | language
stringclasses 5
values | commit_datetime
unknown | updated_file
stringlengths 7
188
| chunk_content
stringlengths 1
1.03M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | *
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task;
import static ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskRecordStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.TaskRecordDao;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
/**
* executive task
*/
public abstract class AbstractTask { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | /**
* varPool string
*/
protected String varPool;
/**
* taskExecutionContext
**/
TaskExecutionContext taskExecutionContext;
/**
* log record
*/
protected Logger logger;
/**
* SHELL process pid
*/
protected int processId;
/**
* SHELL result string
*/
protected String resultString;
/**
* other resource manager appId , for example : YARN etc
*/
protected String appIds; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | /**
* cancel
*/
protected volatile boolean cancel = false;
/**
* exit code
*/
protected volatile int exitStatusCode = -1;
/**
* constructor
*
* @param taskExecutionContext taskExecutionContext
* @param logger logger
*/
protected AbstractTask(TaskExecutionContext taskExecutionContext, Logger logger) {
this.taskExecutionContext = taskExecutionContext;
this.logger = logger;
}
/**
* init task
*
* @throws Exception exception
*/
public void init() throws Exception {
}
/**
* task handle
*
* @throws Exception exception
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | public abstract void handle() throws Exception;
/**
* cancel application
*
* @param status status
* @throws Exception exception
*/
public void cancelApplication(boolean status) throws Exception {
this.cancel = status;
}
/**
* log handle
*
* @param logs log list
*/
public void logHandle(List<String> logs) {
if (logs.contains(FINALIZE_SESSION_MARKER.toString())) {
logger.info(FINALIZE_SESSION_MARKER, FINALIZE_SESSION_MARKER.toString());
} else {
logger.info(" -> {}", String.join("\n\t", logs));
}
}
public void setVarPool(String varPool) {
this.varPool = varPool;
}
public String getVarPool() {
return varPool;
}
/** |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | * get exit status code
*
* @return exit status code
*/
public int getExitStatusCode() {
return exitStatusCode;
}
public void setExitStatusCode(int exitStatusCode) {
this.exitStatusCode = exitStatusCode;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
public int getProcessId() {
return processId;
}
public void setProcessId(int processId) {
this.processId = processId;
}
public String getResultString() {
return resultString;
}
public void setResultString(String resultString) {
this.resultString = resultString;
}
/**
* get task parameters |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | *
* @return AbstractParameters
*/
public abstract AbstractParameters getParameters();
/**
* result processing
*/
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
if (TaskRecordDao.getTaskRecordFlag() && typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,487 | [Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class | Dolphin scheduler 目前已经移除了数据质量检测,
可见在配置文件中也已经移除了对 相关数据质量涉及的db的
但是代码中依旧存在TaskRecordDao对数据质量的query,
并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'"
中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中,
但是在重要的抽象类AbstractTask 中依旧存在对
TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净
public void after() {
if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) {
// task recor flat : if true , start up qianfan
if (TaskRecordDao.getTaskRecordFlag()
&& TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) {
AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams());
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
params.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
if (paramsMap != null && !paramsMap.isEmpty()
&& paramsMap.containsKey("v_proc_date")) {
String vProcDate = paramsMap.get("v_proc_date").getValue();
if (!StringUtils.isEmpty(vProcDate)) {
TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate);
logger.info("task record status : {}", taskRecordState);
if (taskRecordState == TaskRecordStatus.FAILURE) {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
}
}
} else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
| https://github.com/apache/dolphinscheduler/issues/5487 | https://github.com/apache/dolphinscheduler/pull/5492 | 018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c | bc22ae7c91c9cbd7c971796ba3a45358c2f11864 | "2021-05-17T09:46:25Z" | java | "2021-05-18T09:00:03Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java | } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) {
setExitStatusCode(Constants.EXIT_CODE_KILL);
} else {
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
}
}
private boolean typeIsNormalTask(String taskType) {
return !(TaskType.SUB_PROCESS.getDesc().equalsIgnoreCase(taskType) || TaskType.DEPENDENT.getDesc().equalsIgnoreCase(taskType));
}
/**
* get exit status according to exitCode
*
* @return exit status
*/
public ExecutionStatus getExitStatus() {
ExecutionStatus status;
switch (getExitStatusCode()) {
case Constants.EXIT_CODE_SUCCESS:
status = ExecutionStatus.SUCCESS;
break;
case Constants.EXIT_CODE_KILL:
status = ExecutionStatus.KILL;
break;
default:
status = ExecutionStatus.FAILURE;
break;
}
return status;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResUploadType;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java | import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* common utils
*/
public class CommonUtils {
private static final Logger logger = LoggerFactory.getLogger(CommonUtils.class);
private static final Base64 BASE64 = new Base64();
private CommonUtils() {
throw new UnsupportedOperationException("Construct CommonUtils");
}
/**
* @return get the path of system environment variables
*/
public static String getSystemEnvPath() {
String envPath = PropertyUtils.getString(Constants.DOLPHINSCHEDULER_ENV_PATH);
if (StringUtils.isEmpty(envPath)) {
URL envDefaultPath = CommonUtils.class.getClassLoader().getResource(Constants.ENV_PATH);
if (envDefaultPath != null) {
envPath = envDefaultPath.getPath();
logger.debug("env path :{}", envPath);
} else {
envPath = "/etc/profile";
}
}
return envPath;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java | /**
* @return is develop mode
*/
public static boolean isDevelopMode() {
return PropertyUtils.getBoolean(Constants.DEVELOPMENT_STATE, true);
}
/**
* @return sudo enable
*/
public static boolean isSudoEnable() {
return PropertyUtils.getBoolean(Constants.SUDO_ENABLE, true);
}
/**
* if upload resource is HDFS and kerberos startup is true , else false
*
* @return true if upload resource is HDFS and kerberos startup
*/
public static boolean getKerberosStartupState() {
String resUploadStartupType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false);
return resUploadType == ResUploadType.HDFS && kerberosStartupState;
}
/**
* load kerberos configuration
*
* @throws Exception errors
*/
public static void loadKerberosConf() throws Exception {
loadKerberosConf(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH), |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java | PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
/**
* load kerberos configuration
* @param javaSecurityKrb5Conf javaSecurityKrb5Conf
* @param loginUserKeytabUsername loginUserKeytabUsername
* @param loginUserKeytabPath loginUserKeytabPath
* @throws IOException errors
*/
public static void loadKerberosConf(String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) throws IOException {
if (CommonUtils.getKerberosStartupState()) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, StringUtils.defaultIfBlank(javaSecurityKrb5Conf, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH)));
Configuration configuration = new Configuration();
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, Constants.KERBEROS);
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(StringUtils.defaultIfBlank(loginUserKeytabUsername, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME)),
StringUtils.defaultIfBlank(loginUserKeytabPath, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)));
}
}
/**
* encode password
*/
public static String encodePassword(String password) {
if (StringUtils.isEmpty(password)) {
return StringUtils.EMPTY;
}
boolean encryptionEnable = PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE, false);
if (!encryptionEnable) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java | return password;
}
String salt = PropertyUtils.getString(Constants.DATASOURCE_ENCRYPTION_SALT, Constants.DATASOURCE_ENCRYPTION_SALT_DEFAULT);
String passwordWithSalt = salt + new String(BASE64.encode(password.getBytes(StandardCharsets.UTF_8)));
return new String(BASE64.encode(passwordWithSalt.getBytes(StandardCharsets.UTF_8)));
}
/**
* decode password
*/
public static String decodePassword(String password) {
if (StringUtils.isEmpty(password)) {
return StringUtils.EMPTY;
}
boolean encryptionEnable = PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE, false);
if (!encryptionEnable) {
return password;
}
String salt = PropertyUtils.getString(Constants.DATASOURCE_ENCRYPTION_SALT, Constants.DATASOURCE_ENCRYPTION_SALT_DEFAULT);
String passwordWithSalt = new String(BASE64.decode(password), StandardCharsets.UTF_8);
if (!passwordWithSalt.startsWith(salt)) {
logger.warn("There is a password and salt mismatch: {} ", password);
return password;
}
return new String(BASE64.decode(passwordWithSalt.substring(salt.length())), StandardCharsets.UTF_8);
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ResUploadType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.exception.BaseException;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
/**
* hadoop utils
* single instance
*/
public class HadoopUtils implements Closeable { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
public static final String jobHistoryAddress = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS);
private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY";
private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder
.newBuilder()
.expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS)
.build(new CacheLoader<String, HadoopUtils>() {
@Override
public HadoopUtils load(String key) throws Exception {
return new HadoopUtils();
}
});
private static volatile boolean yarnEnabled = false;
private Configuration configuration;
private FileSystem fs;
private HadoopUtils() { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | init();
initHdfsPath();
}
public static HadoopUtils getInstance() {
return cache.getUnchecked(HADOOP_UTILS_KEY);
}
/**
* init dolphinscheduler root path in hdfs
*/
private void initHdfsPath() {
Path path = new Path(resourceUploadPath);
try {
if (!fs.exists(path)) {
fs.mkdirs(path);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* init hadoop configuration
*/
private void init() {
try {
configuration = new HdfsConfiguration();
String resourceStorageType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType);
if (resUploadType == ResUploadType.HDFS) {
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
hdfsUser = "";
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
String defaultFS = configuration.get(Constants.FS_DEFAULTFS);
if (defaultFS.startsWith("file")) {
String defaultFSProp = PropertyUtils.getString(Constants.FS_DEFAULTFS);
if (StringUtils.isNotBlank(defaultFSProp)) {
Map<String, String> fsRelatedProps = PropertyUtils.getPrefixedProperties("fs.");
configuration.set(Constants.FS_DEFAULTFS, defaultFSProp);
fsRelatedProps.forEach((key, value) -> configuration.set(key, value));
} else {
logger.error("property:{} can not to be empty, please set!", Constants.FS_DEFAULTFS);
throw new RuntimeException(
String.format("property: %s can not to be empty, please set!", Constants.FS_DEFAULTFS)
);
}
} else {
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS);
}
if (fs == null) {
if (StringUtils.isNotEmpty(hdfsUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | public Boolean run() throws Exception {
fs = FileSystem.get(configuration);
return true;
}
});
} else {
logger.warn("hdfs.root.user is not set value!");
fs = FileSystem.get(configuration);
}
}
} else if (resUploadType == ResUploadType.S3) {
System.setProperty(Constants.AWS_S3_V4, Constants.STRING_TRUE);
configuration.set(Constants.FS_DEFAULTFS, PropertyUtils.getString(Constants.FS_DEFAULTFS));
configuration.set(Constants.FS_S3A_ENDPOINT, PropertyUtils.getString(Constants.FS_S3A_ENDPOINT));
configuration.set(Constants.FS_S3A_ACCESS_KEY, PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY));
configuration.set(Constants.FS_S3A_SECRET_KEY, PropertyUtils.getString(Constants.FS_S3A_SECRET_KEY));
fs = FileSystem.get(configuration);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* @return Configuration
*/
public Configuration getConfiguration() {
return configuration;
}
/**
* get application url |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | *
* @param applicationId application id
* @return url of application
*/
public String getApplicationUrl(String applicationId) throws Exception {
/**
* if rmHaIds contains xx, it signs not use resourcemanager
* otherwise:
* if rmHaIds is empty, single resourcemanager enabled
* if rmHaIds not empty: resourcemanager HA enabled
*/
yarnEnabled = true;
String appUrl = StringUtils.isEmpty(rmHaIds) ? appAddress : getAppAddress(appAddress, rmHaIds);
if (StringUtils.isBlank(appUrl)) {
throw new BaseException("yarn application url generation failed");
}
if (logger.isDebugEnabled()) {
logger.debug("yarn application url:{}, applicationId:{}", appUrl, applicationId);
}
String activeResourceManagerPort = String.valueOf(PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088));
return String.format(appUrl, activeResourceManagerPort, applicationId);
}
public String getJobHistoryUrl(String applicationId) {
String jobId = applicationId.replace("application", "job");
return String.format(jobHistoryAddress, jobId);
}
/**
* cat file on hdfs
* |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | * @param hdfsFilePath hdfs file path
* @return byte[] byte array
* @throws IOException errors
*/
public byte[] catFile(String hdfsFilePath) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return new byte[0];
}
try (FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath))) {
return IOUtils.toByteArray(fsDataInputStream);
}
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @param skipLineNums skip line numbers
* @param limit read how many lines
* @return content of file
* @throws IOException errors
*/
public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return Collections.emptyList();
}
try (FSDataInputStream in = fs.open(new Path(hdfsFilePath))) {
BufferedReader br = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
Stream<String> stream = br.lines().skip(skipLineNums).limit(limit); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | return stream.collect(Collectors.toList());
}
}
/**
* make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
*
* @param hdfsPath path to create
* @return mkdir result
* @throws IOException errors
*/
public boolean mkdir(String hdfsPath) throws IOException {
return fs.mkdirs(new Path(hdfsPath));
}
/**
* copy files between FileSystems
*
* @param srcPath source hdfs path
* @param dstPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException {
return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf());
}
/**
* the src file is on the local disk. Add it to FS at |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | * the given dst name.
*
* @param srcFile local file
* @param dstHdfsPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcFile);
Path dstPath = new Path(dstHdfsPath);
fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath);
return true;
}
/**
* copy hdfs file to local
*
* @param srcHdfsFilePath source hdfs file path
* @param dstFile destination file
* @param deleteSource delete source
* @param overwrite overwrite
* @return result of copy hdfs file to local
* @throws IOException errors
*/
public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcHdfsFilePath);
File dstPath = new File(dstFile);
if (dstPath.exists()) {
if (dstPath.isFile()) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | if (overwrite) {
Files.delete(dstPath.toPath());
}
} else {
logger.error("destination file must be a file");
}
}
if (!dstPath.getParentFile().exists()) {
dstPath.getParentFile().mkdirs();
}
return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf());
}
/**
* delete a file
*
* @param hdfsFilePath the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException errors
*/
public boolean delete(String hdfsFilePath, boolean recursive) throws IOException {
return fs.delete(new Path(hdfsFilePath), recursive);
}
/**
* check if exists
*
* @param hdfsFilePath source file path
* @return result of exists or not |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | * @throws IOException errors
*/
public boolean exists(String hdfsFilePath) throws IOException {
return fs.exists(new Path(hdfsFilePath));
}
/**
* Gets a list of files in the directory
*
* @param filePath file path
* @return {@link FileStatus} file status
* @throws Exception errors
*/
public FileStatus[] listFileStatus(String filePath) throws Exception {
try {
return fs.listStatus(new Path(filePath));
} catch (IOException e) {
logger.error("Get file list exception", e);
throw new Exception("Get file list exception", e);
}
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*
* @param src path to be renamed
* @param dst new path after rename
* @return true if rename is successful
* @throws IOException on failure
*/
public boolean rename(String src, String dst) throws IOException { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | return fs.rename(new Path(src), new Path(dst));
}
/**
* hadoop resourcemanager enabled or not
*
* @return result
*/
public boolean isYarnEnabled() {
return yarnEnabled;
}
/**
* get the state of an application
*
* @param applicationId application id
* @return the return may be null or there may be other parse exceptions
*/
public ExecutionStatus getApplicationStatus(String applicationId) throws Exception {
if (StringUtils.isEmpty(applicationId)) {
return null;
}
String result = Constants.FAILED;
String applicationUrl = getApplicationUrl(applicationId);
if (logger.isDebugEnabled()) {
logger.debug("generate yarn application url, applicationUrl={}", applicationUrl);
}
String responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(applicationUrl) : HttpUtils.get(applicationUrl);
if (responseContent != null) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("app")) {
return ExecutionStatus.FAILURE; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | }
result = jsonObject.path("app").path("finalStatus").asText();
} else {
String jobHistoryUrl = getJobHistoryUrl(applicationId);
if (logger.isDebugEnabled()) {
logger.debug("generate yarn job history application url, jobHistoryUrl={}", jobHistoryUrl);
}
responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(jobHistoryUrl) : HttpUtils.get(jobHistoryUrl);
if (null != responseContent) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("job")) {
return ExecutionStatus.FAILURE;
}
result = jsonObject.path("job").path("state").asText();
} else {
return ExecutionStatus.FAILURE;
}
}
switch (result) {
case Constants.ACCEPTED:
return ExecutionStatus.SUBMITTED_SUCCESS;
case Constants.SUCCEEDED:
return ExecutionStatus.SUCCESS;
case Constants.NEW:
case Constants.NEW_SAVING:
case Constants.SUBMITTED:
case Constants.FAILED:
return ExecutionStatus.FAILURE;
case Constants.KILLED: |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | return ExecutionStatus.KILL;
case Constants.RUNNING:
default:
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* get data hdfs path
*
* @return data hdfs path
*/
public static String getHdfsDataBasePath() {
if ("/".equals(resourceUploadPath)) {
return "";
} else {
return resourceUploadPath;
}
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @param resourceType resource type
* @return hdfs resource dir
*/
public static String getHdfsDir(ResourceType resourceType, String tenantCode) {
String hdfsDir = "";
if (resourceType.equals(ResourceType.FILE)) {
hdfsDir = getHdfsResDir(tenantCode); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | } else if (resourceType.equals(ResourceType.UDF)) {
hdfsDir = getHdfsUdfDir(tenantCode);
}
return hdfsDir;
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsResDir(String tenantCode) {
return String.format("%s/resources", getHdfsTenantDir(tenantCode));
}
/**
* hdfs user dir
*
* @param tenantCode tenant code
* @param userId user id
* @return hdfs resource dir
*/
public static String getHdfsUserDir(String tenantCode, int userId) {
return String.format("%s/home/%d", getHdfsTenantDir(tenantCode), userId);
}
/**
* hdfs udf dir
*
* @param tenantCode tenant code
* @return get udf dir on hdfs
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | public static String getHdfsUdfDir(String tenantCode) {
return String.format("%s/udfs", getHdfsTenantDir(tenantCode));
}
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param fileName file name
* @return hdfs file name
*/
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsDir(resourceType, tenantCode), fileName);
}
/**
* get absolute path and name for resource file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
public static String getHdfsResourceFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | /**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for udf file on hdfs
*/
public static String getHdfsUdfFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
}
/**
* @param tenantCode tenant code
* @return file directory of tenants on hdfs
*/
public static String getHdfsTenantDir(String tenantCode) {
return String.format("%s/%s", getHdfsDataBasePath(), tenantCode);
}
/**
* getAppAddress
*
* @param appAddress app address
* @param rmHa resource manager ha
* @return app address
*/
public static String getAppAddress(String appAddress, String rmHa) {
String activeRM = YarnHAAdminUtils.getAcitveRMName(rmHa); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | if (StringUtils.isEmpty(activeRM)) {
return null;
}
String[] split1 = appAddress.split(Constants.DOUBLE_SLASH);
if (split1.length != 2) {
return null;
}
String start = split1[0] + Constants.DOUBLE_SLASH;
String[] split2 = split1[1].split(Constants.COLON);
if (split2.length != 2) {
return null;
}
String end = Constants.COLON + split2[1];
return start + activeRM + end;
}
@Override
public void close() throws IOException {
if (fs != null) {
try {
fs.close();
} catch (IOException e) {
logger.error("Close HadoopUtils instance failed", e);
throw new IOException("Close HadoopUtils instance failed", e);
}
}
}
/**
* yarn ha admin utils
*/
private static final class YarnHAAdminUtils extends RMAdminCLI { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | /**
* get active resourcemanager
*/
public static String getAcitveRMName(String rmIds) {
String[] rmIdArr = rmIds.split(Constants.COMMA);
int activeResourceManagerPort = PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088);
String yarnUrl = "http://%s:" + activeResourceManagerPort + "/ws/v1/cluster/info";
try {
/**
* send http get request to rm
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | for (String rmId : rmIdArr) {
String state = getRMState(String.format(yarnUrl, rmId));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmId;
}
}
} catch (Exception e) {
logger.error("yarn ha application url generation failed, message:{}", e.getMessage());
}
return null;
}
/**
* get ResourceManager state
*/
public static String getRMState(String url) {
String retStr = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(url) : HttpUtils.get(url);
if (StringUtils.isEmpty(retStr)) {
return null;
}
ObjectNode jsonObject = JSONUtils.parseObject(retStr);
if (!jsonObject.has("clusterInfo")) {
return null;
}
return jsonObject.get("clusterInfo").path("haState").asText();
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/CommonUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* configuration test
*/
public class CommonUtilsTest { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/CommonUtilsTest.java | private static final Logger logger = LoggerFactory.getLogger(CommonUtilsTest.class);
@Test
public void getSystemEnvPath() {
logger.info(CommonUtils.getSystemEnvPath());
Assert.assertTrue(true);
}
@Test
public void isDevelopMode() {
logger.info("develop mode: {}",CommonUtils.isDevelopMode());
Assert.assertTrue(true);
}
@Test
public void getKerberosStartupState(){
logger.info("kerberos startup state: {}",CommonUtils.getKerberosStartupState());
Assert.assertTrue(true);
}
@Test
public void loadKerberosConf(){
try {
CommonUtils.loadKerberosConf();
Assert.assertTrue(true);
} catch (Exception e) {
Assert.fail("load Kerberos Conf failed"); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/CommonUtilsTest.java | }
}
@Test
public void getHdfsDataBasePath() {
logger.info(HadoopUtils.getHdfsDataBasePath());
Assert.assertTrue(true);
}
@Test
public void getDownloadFilename() {
logger.info(FileUtils.getDownloadFilename("a.txt"));
Assert.assertTrue(true);
}
@Test
public void getUploadFilename() {
logger.info(FileUtils.getUploadFilename("1234", "a.txt"));
Assert.assertTrue(true);
}
@Test
public void getHdfsDir() {
logger.info(HadoopUtils.getHdfsResDir("1234"));
Assert.assertTrue(true);
}
@Test
public void test(){
InetAddress IP = null;
try {
IP = InetAddress.getLocalHost();
logger.info(IP.getHostAddress());
} catch (UnknownHostException e) {
e.printStackTrace(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,527 | [Improvement][api-server] failed find any kerberos | kerberos.expire.time 已经设置成了1
但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。

建议优化使之能够长期生效,避免重启 | https://github.com/apache/dolphinscheduler/issues/5527 | https://github.com/apache/dolphinscheduler/pull/5533 | 46660b58ed82d76904f26c5b869f3aa96e50727a | 9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86 | "2021-05-20T06:44:32Z" | java | "2021-05-23T15:43:59Z" | dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/CommonUtilsTest.java | }
Assert.assertTrue(true);
}
@Test
public void encodePassword() {
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"true");
Assert.assertEquals("",CommonUtils.encodePassword(""));
Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==",CommonUtils.encodePassword("123456"));
Assert.assertEquals("IUAjJCVeJipJVkZCV2xoVFYwQT0=",CommonUtils.encodePassword("!QAZXSW@"));
Assert.assertEquals("IUAjJCVeJipOV1JtWjJWeUtFQT0=",CommonUtils.encodePassword("5dfger(@"));
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"false");
Assert.assertEquals("",CommonUtils.encodePassword(""));
Assert.assertEquals("123456",CommonUtils.encodePassword("123456"));
Assert.assertEquals("!QAZXSW@",CommonUtils.encodePassword("!QAZXSW@"));
Assert.assertEquals("5dfger(@",CommonUtils.encodePassword("5dfger(@"));
}
@Test
public void decodePassword() {
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "true");
Assert.assertEquals("", CommonUtils.decodePassword(""));
Assert.assertEquals("123456", CommonUtils.decodePassword("IUAjJCVeJipNVEl6TkRVMg=="));
Assert.assertEquals("!QAZXSW@", CommonUtils.decodePassword("IUAjJCVeJipJVkZCV2xoVFYwQT0="));
Assert.assertEquals("5dfger(@", CommonUtils.decodePassword("IUAjJCVeJipOV1JtWjJWeUtFQT0="));
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "false");
Assert.assertEquals("", CommonUtils.decodePassword(""));
Assert.assertEquals("123456", CommonUtils.decodePassword("123456"));
Assert.assertEquals("!QAZXSW@", CommonUtils.decodePassword("!QAZXSW@"));
Assert.assertEquals("5dfger(@", CommonUtils.decodePassword("5dfger(@"));
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0 |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java | *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.registry;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.registry.HeartBeatTask;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.curator.framework.state.ConnectionState;
import java.util.Date;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.google.common.collect.Sets;
/**
* master registry
*/
@Service |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java | public class MasterRegistry {
private final Logger logger = LoggerFactory.getLogger(MasterRegistry.class);
/**
* zookeeper registry center
*/
@Autowired
private ZookeeperRegistryCenter zookeeperRegistryCenter;
/**
* master config
*/
@Autowired
private MasterConfig masterConfig;
/**
* heartbeat executor
*/
private ScheduledExecutorService heartBeatExecutor;
/**
* master start time
*/
private String startTime;
@PostConstruct
public void init() {
this.startTime = DateUtils.dateToString(new Date());
this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor"));
}
/**
* registry |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java | */
public void registry() {
String address = NetUtils.getAddr(masterConfig.getListenPort());
String localNodePath = getMasterPath();
zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(localNodePath, "");
zookeeperRegistryCenter.getRegisterOperator().getZkClient().getConnectionStateListenable().addListener(
(client, newState) -> {
if (newState == ConnectionState.LOST) {
logger.error("master : {} connection lost from zookeeper", address);
} else if (newState == ConnectionState.RECONNECTED) {
logger.info("master : {} reconnected to zookeeper", address);
zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(localNodePath, "");
} else if (newState == ConnectionState.SUSPENDED) {
logger.warn("master : {} connection SUSPENDED ", address);
zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(localNodePath, "");
}
});
int masterHeartbeatInterval = masterConfig.getMasterHeartbeatInterval();
HeartBeatTask heartBeatTask = new HeartBeatTask(startTime,
masterConfig.getMasterMaxCpuloadAvg(),
masterConfig.getMasterReservedMemory(),
Sets.newHashSet(getMasterPath()),
Constants.MASTER_TYPE,
zookeeperRegistryCenter);
this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, masterHeartbeatInterval, masterHeartbeatInterval, TimeUnit.SECONDS);
logger.info("master node : {} registry to ZK successfully with heartBeatInterval : {}s", address, masterHeartbeatInterval);
}
/**
* remove registry info
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java | public void unRegistry() {
String address = getLocalAddress();
String localNodePath = getMasterPath();
zookeeperRegistryCenter.getRegisterOperator().remove(localNodePath);
logger.info("master node : {} unRegistry to ZK.", address);
heartBeatExecutor.shutdown();
logger.info("heartbeat executor shutdown");
}
/**
* get master path
*/
public String getMasterPath() {
String address = getLocalAddress();
return this.zookeeperRegistryCenter.getMasterPath() + "/" + address;
}
/**
* get local address
*/
private String getLocalAddress() {
return NetUtils.getAddr(masterConfig.getListenPort());
}
/**
* get zookeeper registry center
* @return ZookeeperRegistryCenter
*/
public ZookeeperRegistryCenter getZookeeperRegistryCenter() {
return zookeeperRegistryCenter;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java | * limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.registry;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
import static org.apache.dolphinscheduler.common.Constants.SLASH;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.server.registry.HeartBeatTask;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.curator.framework.state.ConnectionState;
import java.util.Date;
import java.util.Set;
import java.util.StringJoiner;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.google.common.collect.Sets;
/**
* worker registry
*/
@Service |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java | public class WorkerRegistry {
private final Logger logger = LoggerFactory.getLogger(WorkerRegistry.class);
/**
* zookeeper registry center
*/
@Autowired
private ZookeeperRegistryCenter zookeeperRegistryCenter;
/**
* worker config
*/
@Autowired
private WorkerConfig workerConfig;
/**
* heartbeat executor
*/
private ScheduledExecutorService heartBeatExecutor;
/**
* worker start time |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java | */
private String startTime;
private Set<String> workerGroups;
@PostConstruct
public void init() {
this.workerGroups = workerConfig.getWorkerGroups();
this.startTime = DateUtils.dateToString(new Date());
this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor"));
}
/**
* get zookeeper registry center
* @return ZookeeperRegistryCenter
*/
public ZookeeperRegistryCenter getZookeeperRegistryCenter() {
return zookeeperRegistryCenter;
}
/**
* registry
*/
public void registry() {
String address = NetUtils.getAddr(workerConfig.getListenPort());
Set<String> workerZkPaths = getWorkerZkPaths();
int workerHeartbeatInterval = workerConfig.getWorkerHeartbeatInterval();
for (String workerZKPath : workerZkPaths) {
zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(workerZKPath, "");
zookeeperRegistryCenter.getRegisterOperator().getZkClient().getConnectionStateListenable().addListener(
(client,newState) -> {
if (newState == ConnectionState.LOST) {
logger.error("worker : {} connection lost from zookeeper", address);
} else if (newState == ConnectionState.RECONNECTED) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java | logger.info("worker : {} reconnected to zookeeper", address);
zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(workerZKPath, "");
} else if (newState == ConnectionState.SUSPENDED) {
logger.warn("worker : {} connection SUSPENDED ", address);
zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(workerZKPath, "");
}
});
logger.info("worker node : {} registry to ZK {} successfully", address, workerZKPath);
}
HeartBeatTask heartBeatTask = new HeartBeatTask(startTime,
workerConfig.getWorkerMaxCpuloadAvg(),
workerConfig.getWorkerReservedMemory(),
workerConfig.getHostWeight(),
workerZkPaths,
Constants.WORKER_TYPE,
zookeeperRegistryCenter);
this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, workerHeartbeatInterval, workerHeartbeatInterval, TimeUnit.SECONDS);
logger.info("worker node : {} heartbeat interval {} s", address, workerHeartbeatInterval);
}
/**
* remove registry info
*/
public void unRegistry() {
String address = getLocalAddress();
Set<String> workerZkPaths = getWorkerZkPaths();
for (String workerZkPath : workerZkPaths) {
zookeeperRegistryCenter.getRegisterOperator().remove(workerZkPath);
logger.info("worker node : {} unRegistry from ZK {}.", address, workerZkPath);
}
this.heartBeatExecutor.shutdownNow(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java | logger.info("heartbeat executor shutdown");
}
/**
* get worker path
*/
public Set<String> getWorkerZkPaths() {
Set<String> workerZkPaths = Sets.newHashSet();
String address = getLocalAddress();
String workerZkPathPrefix = this.zookeeperRegistryCenter.getWorkerPath();
for (String workGroup : this.workerGroups) {
StringJoiner workerZkPathJoiner = new StringJoiner(SLASH);
workerZkPathJoiner.add(workerZkPathPrefix);
if (StringUtils.isEmpty(workGroup)) {
workGroup = DEFAULT_WORKER_GROUP;
}
workerZkPathJoiner.add(workGroup.trim().toLowerCase());
workerZkPathJoiner.add(address);
workerZkPaths.add(workerZkPathJoiner.toString());
}
return workerZkPaths;
}
/**
* get local address
*/
private String getLocalAddress() {
return NetUtils.getAddr(workerConfig.getListenPort());
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | * See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.zk;
import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.service.exceptions.ServiceException;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.utils.CloseableUtils;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
/**
* zk base operator
*/
@Component |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | public class ZookeeperOperator implements InitializingBean {
private final Logger logger = LoggerFactory.getLogger(ZookeeperOperator.class);
@Autowired
private ZookeeperConfig zookeeperConfig;
protected CuratorFramework zkClient;
@Override
public void afterPropertiesSet() {
this.zkClient = buildClient();
initStateListener();
treeCacheStart();
}
/**
* this method is for sub class,
*/
protected void registerListener() {
}
protected void treeCacheStart() {
}
public void initStateListener() {
checkNotNull(zkClient);
zkClient.getConnectionStateListenable().addListener((client, newState) -> {
if (newState == ConnectionState.LOST) {
logger.error("connection lost from zookeeper");
} else if (newState == ConnectionState.RECONNECTED) {
logger.info("reconnected to zookeeper"); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | } else if (newState == ConnectionState.SUSPENDED) {
logger.warn("connection SUSPENDED to zookeeper");
}
});
}
private CuratorFramework buildClient() {
logger.info("zookeeper registry center init, server lists is: {}.", zookeeperConfig.getServerList());
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().ensembleProvider(new DefaultEnsembleProvider(checkNotNull(zookeeperConfig.getServerList(),
"zookeeper quorum can't be null")))
.retryPolicy(new ExponentialBackoffRetry(zookeeperConfig.getBaseSleepTimeMs(), zookeeperConfig.getMaxRetries(), zookeeperConfig.getMaxSleepMs()));
if (0 != zookeeperConfig.getSessionTimeoutMs()) {
builder.sessionTimeoutMs(zookeeperConfig.getSessionTimeoutMs());
}
if (0 != zookeeperConfig.getConnectionTimeoutMs()) {
builder.connectionTimeoutMs(zookeeperConfig.getConnectionTimeoutMs());
}
if (StringUtils.isNotBlank(zookeeperConfig.getDigest())) {
builder.authorization("digest", zookeeperConfig.getDigest().getBytes(StandardCharsets.UTF_8)).aclProvider(new ACLProvider() {
@Override
public List<ACL> getDefaultAcl() {
return ZooDefs.Ids.CREATOR_ALL_ACL;
}
@Override
public List<ACL> getAclForPath(final String path) {
return ZooDefs.Ids.CREATOR_ALL_ACL;
}
});
}
zkClient = builder.build(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | zkClient.start();
try {
zkClient.blockUntilConnected();
} catch (final Exception ex) {
throw new ServiceException(ex);
}
return zkClient;
}
public String get(final String key) {
try {
return new String(zkClient.getData().forPath(key), StandardCharsets.UTF_8);
} catch (Exception ex) {
logger.error("get key : {}", key, ex);
}
return null;
}
public List<String> getChildrenKeys(final String key) {
try {
return zkClient.getChildren().forPath(key);
} catch (NoNodeException ex) {
return new ArrayList<>();
} catch (InterruptedException ex) {
logger.error("getChildrenKeys key : {} InterruptedException", key);
throw new IllegalStateException(ex);
} catch (Exception ex) {
logger.error("getChildrenKeys key : {}", key, ex);
throw new ServiceException(ex);
}
}
public boolean hasChildren(final String key) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | Stat stat;
try {
stat = zkClient.checkExists().forPath(key);
return stat.getNumChildren() >= 1;
} catch (Exception ex) {
throw new IllegalStateException(ex);
}
}
public boolean isExisted(final String key) {
try {
return zkClient.checkExists().forPath(key) != null;
} catch (Exception ex) {
logger.error("isExisted key : {}", key, ex);
}
return false;
}
public void persist(final String key, final String value) {
try {
if (!isExisted(key)) {
zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(key, value.getBytes(StandardCharsets.UTF_8));
} else {
update(key, value);
}
} catch (Exception ex) {
logger.error("persist key : {} , value : {}", key, value, ex);
}
}
public void update(final String key, final String value) {
try {
zkClient.inTransaction().check().forPath(key).and().setData().forPath(key, value.getBytes(StandardCharsets.UTF_8)).and().commit(); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | } catch (Exception ex) {
logger.error("update key : {} , value : {}", key, value, ex);
}
}
public void persistEphemeral(final String key, final String value) {
try {
if (isExisted(key)) {
try {
zkClient.delete().deletingChildrenIfNeeded().forPath(key);
} catch (NoNodeException ignore) {
}
}
zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(key, value.getBytes(StandardCharsets.UTF_8));
} catch (final Exception ex) {
logger.error("persistEphemeral key : {} , value : {}", key, value, ex);
}
}
public void persistEphemeral(String key, String value, boolean overwrite) {
try {
if (overwrite) {
persistEphemeral(key, value);
} else {
if (!isExisted(key)) {
zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(key, value.getBytes(StandardCharsets.UTF_8));
}
}
} catch (final Exception ex) {
logger.error("persistEphemeral key : {} , value : {}, overwrite : {}", key, value, overwrite, ex);
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,210 | [Improvement][Server] Must restart master if Zk reconnect | **Describe the question**
If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery.
Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer.
**What are the current deficiencies and the benefits of improvement**
Improving the server availability.
**Which version of DolphinScheduler:**
-[1.3.6-preview]
| https://github.com/apache/dolphinscheduler/issues/5210 | https://github.com/apache/dolphinscheduler/pull/5211 | 9d0c816cee102edbba2ac080f483c8a73a0b7b30 | 842c5400e605a8b8eb0d8fdc78701f10222063fd | "2021-04-04T14:23:48Z" | java | "2021-05-24T21:03:29Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java | }
public void persistEphemeralSequential(final String key, String value) {
try {
zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(key, value.getBytes(StandardCharsets.UTF_8));
} catch (final Exception ex) {
logger.error("persistEphemeralSequential key : {}", key, ex);
}
}
public void remove(final String key) {
try {
if (isExisted(key)) {
zkClient.delete().deletingChildrenIfNeeded().forPath(key);
}
} catch (NoNodeException ignore) {
} catch (final Exception ex) {
logger.error("remove key : {}", key, ex);
}
}
public CuratorFramework getZkClient() {
return zkClient;
}
public ZookeeperConfig getZookeeperConfig() {
return zookeeperConfig;
}
public void close() {
CloseableUtils.closeQuietly(zkClient);
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao;
import org.apache.dolphinscheduler.common.enums.AlertEvent;
import org.apache.dolphinscheduler.common.enums.AlertStatus; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java | import org.apache.dolphinscheduler.common.enums.AlertWarnLevel;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.datasource.ConnectionFactory;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessAlertContent;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ServerAlertContent;
import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper;
import org.apache.dolphinscheduler.dao.mapper.AlertMapper;
import org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
@Component
public class AlertDao extends AbstractBaseDao {
private final Logger logger = LoggerFactory.getLogger(getClass());
@Autowired
private AlertMapper alertMapper;
@Autowired
private AlertPluginInstanceMapper alertPluginInstanceMapper;
@Autowired |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java | private AlertGroupMapper alertGroupMapper;
@Override
protected void init() {
alertMapper = ConnectionFactory.getInstance().getMapper(AlertMapper.class);
alertPluginInstanceMapper = ConnectionFactory.getInstance().getMapper(AlertPluginInstanceMapper.class);
alertGroupMapper = ConnectionFactory.getInstance().getMapper(AlertGroupMapper.class);
}
/**
* insert alert
*
* @param alert alert
* @return add alert result
*/
public int addAlert(Alert alert) {
return alertMapper.insert(alert);
}
/**
* update alert
*
* @param alertStatus alertStatus
* @param log log
* @param id id
* @return update alert result
*/
public int updateAlert(AlertStatus alertStatus, String log, int id) {
Alert alert = alertMapper.selectById(id);
alert.setAlertStatus(alertStatus);
alert.setUpdateTime(new Date());
alert.setLog(log);
return alertMapper.updateById(alert); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java | }
/**
* MasterServer or WorkerServer stoped
*
* @param alertGroupId alertGroupId
* @param host host
* @param serverType serverType
*/
public void sendServerStopedAlert(int alertGroupId, String host, String serverType) {
Alert alert = new Alert();
List<ServerAlertContent> serverAlertContents = new ArrayList<>(1);
ServerAlertContent serverStopAlertContent = ServerAlertContent.newBuilder().
type(serverType).host(host).event(AlertEvent.SERVER_DOWN).warningLevel(AlertWarnLevel.SERIOUS).
build();
serverAlertContents.add(serverStopAlertContent);
String content = JSONUtils.toJsonString(serverAlertContents);
alert.setTitle("Fault tolerance warning");
saveTaskTimeoutAlert(alert, content, alertGroupId);
}
/**
* process time out alert
*
* @param processInstance processInstance
* @param processDefinition processDefinition
*/
public void sendProcessTimeoutAlert(ProcessInstance processInstance, ProcessDefinition processDefinition) {
int alertGroupId = processInstance.getWarningGroupId();
Alert alert = new Alert();
List<ProcessAlertContent> processAlertContentList = new ArrayList<>(1);
ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java | .processId(processInstance.getId())
.processName(processInstance.getName())
.event(AlertEvent.TIME_OUT)
.warningLevel(AlertWarnLevel.MIDDLE)
.build();
processAlertContentList.add(processAlertContent);
String content = JSONUtils.toJsonString(processAlertContentList);
alert.setTitle("Process Timeout Warn");
saveTaskTimeoutAlert(alert, content, alertGroupId);
}
private void saveTaskTimeoutAlert(Alert alert, String content, int alertGroupId) {
alert.setAlertGroupId(alertGroupId);
alert.setContent(content);
alert.setCreateTime(new Date());
alert.setUpdateTime(new Date());
alertMapper.insert(alert);
}
/**
* task timeout warn
*
* @param alertGroupId alertGroupId
* @param processInstanceId processInstanceId
* @param processInstanceName processInstanceName
* @param taskId taskId
* @param taskName taskName
*/
public void sendTaskTimeoutAlert(int alertGroupId, int processInstanceId,
String processInstanceName, int taskId, String taskName) {
Alert alert = new Alert();
List<ProcessAlertContent> processAlertContentList = new ArrayList<>(1); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java | ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder()
.processId(processInstanceId)
.processName(processInstanceName)
.taskId(taskId)
.taskName(taskName)
.event(AlertEvent.TIME_OUT)
.warningLevel(AlertWarnLevel.MIDDLE)
.build();
processAlertContentList.add(processAlertContent);
String content = JSONUtils.toJsonString(processAlertContentList);
alert.setTitle("Task Timeout Warn");
saveTaskTimeoutAlert(alert, content, alertGroupId);
}
/**
* list the alert information of waiting to be executed
*
* @return alert list
*/
public List<Alert> listWaitExecutionAlert() {
return alertMapper.listAlertByStatus(AlertStatus.WAIT_EXECUTION);
}
/**
* for test
*
* @return AlertMapper
*/
public AlertMapper getAlertMapper() {
return alertMapper;
}
/** |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java | * list all alert plugin instance by alert group id
*
* @param alertGroupId alert group id
* @return AlertPluginInstance list
*/
public List<AlertPluginInstance> listInstanceByAlertGroupId(int alertGroupId) {
String alertInstanceIdsParam = alertGroupMapper.queryAlertGroupInstanceIdsById(alertGroupId);
if (StringUtils.isNotBlank(alertInstanceIdsParam)) {
String[] idsArray = alertInstanceIdsParam.split(",");
List<Integer> ids = Arrays.stream(idsArray)
.map(s -> Integer.parseInt(s.trim()))
.collect(Collectors.toList());
return alertPluginInstanceMapper.queryByIds(ids);
}
return null;
}
public AlertPluginInstanceMapper getAlertPluginInstanceMapper() {
return alertPluginInstanceMapper;
}
public void setAlertPluginInstanceMapper(AlertPluginInstanceMapper alertPluginInstanceMapper) {
this.alertPluginInstanceMapper = alertPluginInstanceMapper;
}
public AlertGroupMapper getAlertGroupMapper() {
return alertGroupMapper;
}
public void setAlertGroupMapper(AlertGroupMapper alertGroupMapper) {
this.alertGroupMapper = alertGroupMapper;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.common.enums.AlertStatus;
import org.apache.dolphinscheduler.dao.entity.Alert;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import org.apache.ibatis.annotations.Param;
import java.util.List;
/**
* alert mapper interface
*/
public interface AlertMapper extends BaseMapper<Alert> { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertMapper.java | /**
* list alert by status
* @param alertStatus alertStatus
* @return alert list
*/
List<Alert> listAlertByStatus(@Param("alertStatus") AlertStatus alertStatus);
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/AlertDaoTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao;
import org.apache.dolphinscheduler.common.enums.AlertStatus;
import org.apache.dolphinscheduler.dao.entity.Alert;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
public class AlertDaoTest { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,525 | server down will send repetitive message | The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat.
For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data
in the database.
I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations? | https://github.com/apache/dolphinscheduler/issues/5525 | https://github.com/apache/dolphinscheduler/pull/5529 | 60af52fb2bbf5f0fcab072024f44b01d85a8d620 | f8ecb536b71d6f33b71c73930832b62890b84ea1 | "2021-05-19T06:41:24Z" | java | "2021-06-01T02:21:46Z" | dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/AlertDaoTest.java | @Test
public void testAlertDao() {
AlertDao alertDao = DaoFactory.getDaoInstance(AlertDao.class);
Alert alert = new Alert();
alert.setTitle("Mysql Exception");
alert.setContent("[\"alarm time:2018-02-05\", \"service name:MYSQL_ALTER\", \"alarm name:MYSQL_ALTER_DUMP\", "
+ "\"get the alarm exception.!,interface error,exception information:timed out\", \"request address:http://blog.csdn.net/dreamInTheWorld/article/details/78539286\"]");
alert.setAlertGroupId(1);
alert.setAlertStatus(AlertStatus.WAIT_EXECUTION);
alertDao.addAlert(alert);
List<Alert> alerts = alertDao.listWaitExecutionAlert();
Assert.assertNotNull(alerts);
Assert.assertNotEquals(0, alerts.size());
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID;
import org.apache.dolphinscheduler.api.dto.ProcessMeta;
import org.apache.dolphinscheduler.api.dto.treeview.Instance; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.ProcessDefinitionService;
import org.apache.dolphinscheduler.api.service.ProcessInstanceService;
import org.apache.dolphinscheduler.api.service.ProjectService;
import org.apache.dolphinscheduler.api.service.SchedulerService;
import org.apache.dolphinscheduler.api.utils.CheckUtils;
import org.apache.dolphinscheduler.api.utils.FileUtils;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.exportprocess.ProcessAddTaskParam;
import org.apache.dolphinscheduler.api.utils.exportprocess.TaskNodeParamFactory;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils;
import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException;
import org.apache.dolphinscheduler.common.utils.StreamUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskDefinition;
import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.service.permission.PermissionCheck;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.commons.collections.map.HashedMap;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.ImmutableMap;
/**
* process definition service impl
*/
@Service
public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService {
private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class);
private static final String PROCESSDEFINITIONCODE = "processDefinitionCode";
private static final String RELEASESTATE = "releaseState"; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | private static final String TASKS = "tasks";
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectService projectService;
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionLogMapper processDefinitionLogMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
@Autowired
private ProcessInstanceService processInstanceService;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private ProcessService processService;
@Autowired
private ProcessTaskRelationMapper processTaskRelationMapper;
@Autowired
TaskDefinitionLogMapper taskDefinitionLogMapper;
@Autowired
private SchedulerService schedulerService;
/**
* create process definition
*
* @param loginUser login user
* @param projectName project name |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | * @param processDefinitionName process definition name
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return create result code
*/
@Override
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> createProcessDefinition(User loginUser,
String projectName,
String processDefinitionName,
String processDefinitionJson,
String desc,
String locations,
String connects) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = new ProcessDefinition();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
if (checkProcessJson.get(Constants.STATUS) != Status.SUCCESS) {
return checkProcessJson;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | try {
long processDefinitionCode = SnowFlakeUtils.getInstance().nextId();
processDefinition.setCode(processDefinitionCode);
processDefinition.setVersion(1);
} catch (SnowFlakeException e) {
putMsg(result, Status.CREATE_PROCESS_DEFINITION);
return result;
}
int saveResult = processService.saveProcessDefinition(loginUser, project, processDefinitionName, desc,
locations, connects, processData, processDefinition, true);
if (saveResult > 0) {
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, processDefinition.getId());
} else {
putMsg(result, Status.CREATE_PROCESS_DEFINITION);
}
return result;
}
/**
* query process definition list
*
* @param loginUser login user
* @param projectName project name
* @return definition list
*/
@Override
public Map<String, Object> queryProcessDefinitionList(User loginUser, String projectName) {
HashMap<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(project.getCode());
resourceList.forEach(processDefinition -> {
ProcessData processData = processService.genProcessData(processDefinition);
processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData));
});
result.put(Constants.DATA_LIST, resourceList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query process definition list paging
*
* @param loginUser login user
* @param projectName project name
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @param userId user id
* @return process definition page
*/
@Override
public Map<String, Object> queryProcessDefinitionListPaging(User loginUser, String projectName, String searchVal, Integer pageNo, Integer pageSize, Integer userId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
Page<ProcessDefinition> page = new Page<>(pageNo, pageSize);
IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging(
page, searchVal, userId, project.getCode(), isAdmin(loginUser));
List<ProcessDefinition> records = processDefinitionIPage.getRecords();
for (ProcessDefinition pd : records) {
ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryMaxVersionDefinitionLog(pd.getCode());
int operator = processDefinitionLog.getOperator();
User user = userMapper.selectById(operator);
pd.setModifyBy(user.getUserName());
pd.setProjectId(project.getId());
}
processDefinitionIPage.setRecords(records);
PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotalCount((int) processDefinitionIPage.getTotal());
pageInfo.setLists(records);
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query datail of process definition
*
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @return process definition detail |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | */
@Override
public Map<String, Object> queryProcessDefinitionById(User loginUser, String projectName, Integer processId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefinitionMapper.selectById(processId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId);
} else {
ProcessData processData = processService.genProcessData(processDefinition);
processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData));
result.put(Constants.DATA_LIST, processDefinition);
putMsg(result, Status.SUCCESS);
}
return result;
}
@Override
public Map<String, Object> queryProcessDefinitionByName(User loginUser, String projectName, String processDefinitionName) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(project.getCode(), processDefinitionName);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionName);
} else {
ProcessData processData = processService.genProcessData(processDefinition);
processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData));
result.put(Constants.DATA_LIST, processDefinition);
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* update process definition
*
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param id process definition id
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return update result code
*/
@Override
public Map<String, Object> updateProcessDefinition(User loginUser,
String projectName,
int id,
String name,
String processDefinitionJson, |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | String desc,
String locations,
String connects) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
if ((checkProcessJson.get(Constants.STATUS) != Status.SUCCESS)) {
return checkProcessJson;
}
ProcessDefinition processDefinition = processService.findProcessDefineById(id);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, id);
return result;
}
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName());
return result;
}
if (!name.equals(processDefinition.getName())) {
ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | if (definition != null) {
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name);
return result;
}
}
ProcessData newProcessData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
int saveResult = processService.saveProcessDefinition(loginUser, project, name, desc,
locations, connects, newProcessData, processDefinition, true);
if (saveResult > 0) {
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, processDefinition);
} else {
putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR);
}
return result;
}
/**
* verify process definition name unique
*
* @param loginUser login user
* @param projectName project name
* @param name name
* @return true if process definition name not exists, otherwise false
*/
@Override
public Map<String, Object> verifyProcessDefinitionName(User loginUser, String projectName, String name) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name);
if (processDefinition == null) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name);
}
return result;
}
/**
* delete process definition by id
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process definition id
* @return delete result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> deleteProcessDefinitionById(User loginUser, String projectName, Integer processDefinitionId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefinitionMapper.selectById(processDefinitionId); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionId);
return result;
}
if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, processDefinitionId);
return result;
}
List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES);
if (CollectionUtils.isNotEmpty(processInstances)) {
putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_ID_FAIL, processInstances.size());
return result;
}
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty() && schedules.size() > 1) {
logger.warn("scheduler num is {},Greater than 1", schedules.size());
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
return result;
} else if (schedules.size() == 1) {
Schedule schedule = schedules.get(0); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | if (schedule.getReleaseState() == ReleaseState.OFFLINE) {
scheduleMapper.deleteById(schedule.getId());
} else if (schedule.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId());
return result;
}
}
int delete = processDefinitionMapper.deleteById(processDefinitionId);
processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode());
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
}
return result;
}
/**
* release process definition: online / offline
*
* @param loginUser login user
* @param projectName project name
* @param id process definition id
* @param releaseState release state
* @return release result code
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> releaseProcessDefinition(User loginUser, String projectName, int id, ReleaseState releaseState) {
HashMap<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
if (null == releaseState) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
ProcessDefinition processDefinition = processDefinitionMapper.selectById(id);
switch (releaseState) {
case ONLINE:
String resourceIds = processDefinition.getResourceIds();
if (StringUtils.isNotBlank(resourceIds)) {
Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new);
PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger);
try {
permissionCheck.checkPermission();
} catch (Exception e) {
logger.error(e.getMessage(), e);
putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE);
return result;
}
}
processDefinition.setReleaseState(releaseState);
processDefinitionMapper.updateById(processDefinition);
break;
case OFFLINE: |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | processDefinition.setReleaseState(releaseState);
processDefinitionMapper.updateById(processDefinition);
List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray(
new int[]{processDefinition.getId()}
);
for (Schedule schedule : scheduleList) {
logger.info("set schedule offline, project id: {}, schedule id: {}, process definition id: {}", project.getId(), schedule.getId(), id);
schedule.setReleaseState(ReleaseState.OFFLINE);
scheduleMapper.updateById(schedule);
schedulerService.deleteSchedule(project.getId(), schedule.getId());
}
break;
default:
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* batch export process definition by ids
*/
@Override
public void batchExportProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds, HttpServletResponse response) {
if (StringUtils.isEmpty(processDefinitionIds)) {
return;
}
Project project = projectMapper.queryByName(projectName); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return;
}
List<ProcessMeta> processDefinitionList =
getProcessDefinitionList(processDefinitionIds);
if (CollectionUtils.isNotEmpty(processDefinitionList)) {
downloadProcessDefinitionFile(response, processDefinitionList);
}
}
/**
* get process definition list by ids
*/
private List<ProcessMeta> getProcessDefinitionList(String processDefinitionIds) {
String[] processDefinitionIdArray = processDefinitionIds.split(",");
List<ProcessMeta> processDefinitionList = new ArrayList<>();
for (String strProcessDefinitionId : processDefinitionIdArray) {
int processDefinitionId = Integer.parseInt(strProcessDefinitionId);
ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(processDefinitionId);
processDefinitionList.add(exportProcessMetaData(processDefinition));
}
return processDefinitionList;
}
/**
* download the process definition file
*/
private void downloadProcessDefinitionFile(HttpServletResponse response, List<ProcessMeta> processDefinitionList) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE);
BufferedOutputStream buff = null;
ServletOutputStream out = null;
try {
out = response.getOutputStream();
buff = new BufferedOutputStream(out);
buff.write(JSONUtils.toJsonString(processDefinitionList).getBytes(StandardCharsets.UTF_8));
buff.flush();
buff.close();
} catch (IOException e) {
logger.warn("export process fail", e);
} finally {
if (null != buff) {
try {
buff.close();
} catch (Exception e) {
logger.warn("export process buffer not close", e);
}
}
if (null != out) {
try {
out.close();
} catch (Exception e) {
logger.warn("export process output stream not close", e);
}
}
}
}
/**
* get export process metadata string |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | *
* @param processDefinition process definition
* @return export process metadata string
*/
public ProcessMeta exportProcessMetaData(ProcessDefinition processDefinition) {
ProcessData processData = processService.genProcessData(processDefinition);
addExportTaskNodeSpecialParam(processData);
ProcessMeta exportProcessMeta = new ProcessMeta();
exportProcessMeta.setProjectName(processDefinition.getProjectName());
exportProcessMeta.setProcessDefinitionName(processDefinition.getName());
exportProcessMeta.setProcessDefinitionJson(JSONUtils.toJsonString(processService.genProcessData(processDefinition)));
exportProcessMeta.setProcessDefinitionDescription(processDefinition.getDescription());
exportProcessMeta.setProcessDefinitionLocations(processDefinition.getLocations());
exportProcessMeta.setProcessDefinitionConnects(processDefinition.getConnects());
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinition.getId());
if (!schedules.isEmpty()) {
Schedule schedule = schedules.get(0);
exportProcessMeta.setScheduleWarningType(schedule.getWarningType().toString());
exportProcessMeta.setScheduleWarningGroupId(schedule.getWarningGroupId());
exportProcessMeta.setScheduleStartTime(DateUtils.dateToString(schedule.getStartTime()));
exportProcessMeta.setScheduleEndTime(DateUtils.dateToString(schedule.getEndTime()));
exportProcessMeta.setScheduleCrontab(schedule.getCrontab());
exportProcessMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
exportProcessMeta.setScheduleReleaseState(String.valueOf(ReleaseState.OFFLINE));
exportProcessMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
exportProcessMeta.setScheduleWorkerGroupName(schedule.getWorkerGroup());
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | return exportProcessMeta;
}
/**
* correct task param which has datasource or dependent
*
* @param processData process data
* @return correct processDefinitionJson
*/
private void addExportTaskNodeSpecialParam(ProcessData processData) {
List<TaskNode> taskNodeList = processData.getTasks();
List<TaskNode> tmpNodeList = new ArrayList<>();
for (TaskNode taskNode : taskNodeList) {
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskNode.getType());
JsonNode jsonNode = JSONUtils.toJsonNode(taskNode);
if (null != addTaskParam) {
addTaskParam.addExportSpecialParam(jsonNode);
}
tmpNodeList.add(JSONUtils.parseObject(jsonNode.toString(), TaskNode.class));
}
processData.setTasks(tmpNodeList);
}
/**
* check task if has sub process
*
* @param taskType task type
* @return if task has sub process return true else false
*/
private boolean checkTaskHasSubProcess(String taskType) {
return taskType.equals(TaskType.SUB_PROCESS.getDesc()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | }
/**
* import process definition
*
* @param loginUser login user
* @param file process metadata json file
* @param currentProjectName current project name
* @return import process
*/
@Override
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> importProcessDefinition(User loginUser, MultipartFile file, String currentProjectName) {
Map<String, Object> result = new HashMap<>();
String processMetaJson = FileUtils.file2String(file);
List<ProcessMeta> processMetaList = JSONUtils.toList(processMetaJson, ProcessMeta.class);
if (CollectionUtils.isEmpty(processMetaList)) {
putMsg(result, Status.DATA_IS_NULL, "fileContent");
return result;
}
for (ProcessMeta processMeta : processMetaList) {
if (!checkAndImportProcessDefinition(loginUser, currentProjectName, result, processMeta)) {
return result;
}
}
return result;
}
/**
* check and import process definition
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | private boolean checkAndImportProcessDefinition(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta) {
if (!checkImportanceParams(processMeta, result)) {
return false;
}
String processDefinitionName = processMeta.getProcessDefinitionName();
Project targetProject = projectMapper.queryByName(currentProjectName);
if (null != targetProject) {
processDefinitionName = recursionProcessDefinitionName(targetProject.getCode(),
processDefinitionName, 1);
}
Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, currentProjectName, processDefinitionName);
Status status = (Status) checkResult.get(Constants.STATUS);
if (Status.SUCCESS.equals(status)) {
putMsg(result, Status.SUCCESS);
} else {
result.putAll(checkResult);
return false;
}
Map<String, Object> createProcessResult =
getCreateProcessResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
addImportTaskNodeParam(loginUser, processMeta.getProcessDefinitionJson(), targetProject));
if (createProcessResult == null) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | return false;
}
Integer processDefinitionId =
Objects.isNull(createProcessResult.get(Constants.DATA_LIST))
? null : Integer.parseInt(createProcessResult.get(Constants.DATA_LIST).toString());
return getImportProcessScheduleResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
processDefinitionId);
}
/**
* get create process result
*/
private Map<String, Object> getCreateProcessResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
String importProcessParam) {
Map<String, Object> createProcessResult = null;
try {
createProcessResult = createProcessDefinition(loginUser
, currentProjectName,
processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp(),
importProcessParam,
processMeta.getProcessDefinitionDescription(), |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | processMeta.getProcessDefinitionLocations(),
processMeta.getProcessDefinitionConnects());
putMsg(result, Status.SUCCESS);
} catch (Exception e) {
logger.error("import process meta json data: {}", e.getMessage(), e);
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
}
return createProcessResult;
}
/**
* get import process schedule result
*/
private boolean getImportProcessScheduleResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
Integer processDefinitionId) {
if (null != processMeta.getScheduleCrontab() && null != processDefinitionId) {
int scheduleInsert = importProcessSchedule(loginUser,
currentProjectName,
processMeta,
processDefinitionName,
processDefinitionId);
if (0 == scheduleInsert) {
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
return false;
}
}
return true; |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | }
/**
* check importance params
*/
private boolean checkImportanceParams(ProcessMeta processMeta, Map<String, Object> result) {
if (StringUtils.isEmpty(processMeta.getProjectName())) {
putMsg(result, Status.DATA_IS_NULL, "projectName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionName())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionJson())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionJson");
return false;
}
return true;
}
/**
* import process add special task param
*
* @param loginUser login user
* @param processDefinitionJson process definition json
* @param targetProject target project
* @return import process param
*/
private String addImportTaskNodeParam(User loginUser, String processDefinitionJson, Project targetProject) {
ObjectNode jsonObject = JSONUtils.parseObject(processDefinitionJson);
ArrayNode jsonArray = (ArrayNode) jsonObject.get(TASKS); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | for (int i = 0; i < jsonArray.size(); i++) {
JsonNode taskNode = jsonArray.path(i);
String taskType = taskNode.path("type").asText();
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType);
if (null != addTaskParam) {
addTaskParam.addImportSpecialParam(taskNode);
}
}
Map<Long, Long> subProcessCodeMap = new HashMap<>();
List<Object> subProcessList = StreamUtils.asStream(jsonArray.elements())
.filter(elem -> checkTaskHasSubProcess(JSONUtils.parseObject(elem.toString()).path("type").asText()))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, jsonArray, subProcessCodeMap);
}
jsonObject.set(TASKS, jsonArray);
return jsonObject.toString();
}
/**
* import process schedule
*
* @param loginUser login user
* @param currentProjectName current project name
* @param processMeta process meta data
* @param processDefinitionName process definition name
* @param processDefinitionId process definition id
* @return insert schedule flag
*/ |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | public int importProcessSchedule(User loginUser, String currentProjectName, ProcessMeta processMeta,
String processDefinitionName, Integer processDefinitionId) {
Date now = new Date();
Schedule scheduleObj = new Schedule();
scheduleObj.setProjectName(currentProjectName);
scheduleObj.setProcessDefinitionId(processDefinitionId);
scheduleObj.setProcessDefinitionName(processDefinitionName);
scheduleObj.setCreateTime(now);
scheduleObj.setUpdateTime(now);
scheduleObj.setUserId(loginUser.getId());
scheduleObj.setUserName(loginUser.getUserName());
scheduleObj.setCrontab(processMeta.getScheduleCrontab());
if (null != processMeta.getScheduleStartTime()) {
scheduleObj.setStartTime(DateUtils.stringToDate(processMeta.getScheduleStartTime()));
}
if (null != processMeta.getScheduleEndTime()) {
scheduleObj.setEndTime(DateUtils.stringToDate(processMeta.getScheduleEndTime()));
}
if (null != processMeta.getScheduleWarningType()) {
scheduleObj.setWarningType(WarningType.valueOf(processMeta.getScheduleWarningType()));
}
if (null != processMeta.getScheduleWarningGroupId()) {
scheduleObj.setWarningGroupId(processMeta.getScheduleWarningGroupId());
}
if (null != processMeta.getScheduleFailureStrategy()) {
scheduleObj.setFailureStrategy(FailureStrategy.valueOf(processMeta.getScheduleFailureStrategy()));
}
if (null != processMeta.getScheduleReleaseState()) {
scheduleObj.setReleaseState(ReleaseState.valueOf(processMeta.getScheduleReleaseState()));
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | if (null != processMeta.getScheduleProcessInstancePriority()) {
scheduleObj.setProcessInstancePriority(Priority.valueOf(processMeta.getScheduleProcessInstancePriority()));
}
if (null != processMeta.getScheduleWorkerGroupName()) {
scheduleObj.setWorkerGroup(processMeta.getScheduleWorkerGroupName());
}
return scheduleMapper.insert(scheduleObj);
}
/**
* check import process has sub process
* recursion create sub process
*
* @param loginUser login user
* @param targetProject target project
* @param jsonArray process task array
* @param subProcessCodeMap correct sub process id map
*/
private void importSubProcess(User loginUser, Project targetProject, ArrayNode jsonArray, Map<Long, Long> subProcessCodeMap) {
for (int i = 0; i < jsonArray.size(); i++) {
ObjectNode taskNode = (ObjectNode) jsonArray.path(i);
String taskType = taskNode.path("type").asText();
if (!checkTaskHasSubProcess(taskType)) {
continue;
}
ObjectNode subParams = (ObjectNode) taskNode.path("params");
Long subProcessCode = subParams.path(PROCESSDEFINITIONCODE).asLong();
ProcessDefinition subProcess = processDefinitionMapper.queryByCode(subProcessCode);
if (null == subProcess) { |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | continue;
}
String subProcessJson = JSONUtils.toJsonString(processService.genProcessData(subProcess));
ProcessDefinition currentProjectSubProcess = processDefinitionMapper.queryByDefineName(targetProject.getCode(), subProcess.getName());
if (null == currentProjectSubProcess) {
ArrayNode subJsonArray = (ArrayNode) JSONUtils.parseObject(subProcessJson).get(TASKS);
List<Object> subProcessList = StreamUtils.asStream(subJsonArray.elements())
.filter(item -> checkTaskHasSubProcess(JSONUtils.parseObject(item.toString()).path("type").asText()))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, subJsonArray, subProcessCodeMap);
if (!subProcessCodeMap.isEmpty()) {
for (Map.Entry<Long, Long> entry : subProcessCodeMap.entrySet()) {
String oldSubProcessCode = "\"processDefinitionCode\":" + entry.getKey();
String newSubProcessCode = "\"processDefinitionCode\":" + entry.getValue();
subProcessJson = subProcessJson.replaceAll(oldSubProcessCode, newSubProcessCode);
}
subProcessCodeMap.clear();
}
}
try {
createProcessDefinition(loginUser
, targetProject.getName(),
subProcess.getName(),
subProcessJson,
subProcess.getDescription(),
subProcess.getLocations(),
subProcess.getConnects()); |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 5,569 | [Bug][dolphinscheduler-api] verify proccess definition name fail | **For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! **
**Describe the bug**
when use spaces in proccess definition name, it will check fail!
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to create a process, edit a simple task;
2. Save process definition name as “new_process”;
3. Go to create a new process, edit a simple task;
4. Save process definition name as “ new_process”. This time, the name verification will also succeed;
5. Then two process definitions with the same name are generated.
**Which version of Dolphin Scheduler:**
-[1.3.5]
**Requirement or improvement**
- Name verification can be solved by adding and removing the first and last spaces.
| https://github.com/apache/dolphinscheduler/issues/5569 | https://github.com/apache/dolphinscheduler/pull/5574 | a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57 | cc9e5d5d34fcf2279b267cca7df37a9e80eeba07 | "2021-06-01T11:46:21Z" | java | "2021-06-02T04:01:01Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | logger.info("create sub process, project: {}, process name: {}", targetProject.getName(), subProcess.getName());
} catch (Exception e) {
logger.error("import process meta json data: {}", e.getMessage(), e);
}
ProcessDefinition newSubProcessDefine = processDefinitionMapper.queryByDefineName(subProcess.getCode(), subProcess.getName());
if (null != newSubProcessDefine) {
subProcessCodeMap.put(subProcessCode, newSubProcessDefine.getCode());
subParams.put(PROCESSDEFINITIONCODE, newSubProcessDefine.getId());
taskNode.set("params", subParams);
}
}
}
}
/**
* check the process definition node meets the specifications
*
* @param processData process data
* @param processDefinitionJson process definition json
* @return check result code
*/
@Override
public Map<String, Object> checkProcessNodeList(ProcessData processData, String processDefinitionJson) {
Map<String, Object> result = new HashMap<>();
try {
if (processData == null) {
logger.error("process data is null");
putMsg(result, Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.