status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
timestamp[us, tz=UTC]
language
stringclasses
5 values
commit_datetime
timestamp[us, tz=UTC]
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ @Override public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ @Override public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
* @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ @Override public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ @Override public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ @Override public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ @Override public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ @Override public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
} /** * for show in page of taskInstance */ @Override public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() { }); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ @Override public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ @Override public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCodeList processDefinitionCodeList * @see Schedule */ @Override public Map<Long, String> queryWorkerGroupByProcessDefinitionCodes(List<Long> processDefinitionCodeList) { List<Schedule> processDefinitionScheduleList = scheduleMapper.querySchedulesByProcessDefinitionCodes(processDefinitionCodeList); return processDefinitionScheduleList.stream().collect(Collectors.toMap(Schedule::getProcessDefinitionCode, Schedule::getWorkerGroup)); } /** * query dependent process definition by process definition code * * @param processDefinitionCode processDefinitionCode * @see DependentProcessDefinition
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
*/ @Override public List<DependentProcessDefinition> queryDependentProcessDefinitionByProcessDefinitionCode(long processDefinitionCode) { return workFlowLineageMapper.queryDependentProcessDefinitionByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ @Override public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } @Override public List<String> queryNeedFailoverProcessInstanceHost() { return processInstanceMapper.queryNeedFailoverProcessInstanceHost(stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Override @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setProcessDefinitionVersion(processDefinition.getVersion()); cmd.setProcessInstanceId(processInstance.getId()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ @Override public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ @Override public DataSource findDataSourceById(int id) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ @Override public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ @Override public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
* find udf function list by id list string * * @param ids ids * @return udf function list */ @Override public List<UdfFunc> queryUdfFunListByIds(Integer[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ @Override public String queryTenantCodeByResName(String resName, ResourceType resourceType) { String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.queryById(user.getTenantId());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ @Override public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ @Override public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
* * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ @Override public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ @Override public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance * * @param processInstance processInstance
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
* @return queue */ @Override public String queryUserQueueByProcessInstance(ProcessInstance processInstance) { String queue = ""; if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ @Override public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
@Override public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ @Override public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ @Override public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ @Override public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ @Override public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ @Override public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
*/ @Override public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinitionLog); if (result <= 0) { return Constants.EXIT_CODE_FAILURE; } } return result; } @Override public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); int batchInsert = processTaskRelationMapper.batchInsert(processTaskRelationLogList); if (batchInsert == 0) { return Constants.EXIT_CODE_FAILURE; } else { int result = 0;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
for (ProcessTaskRelationLog taskRelationLog : processTaskRelationLogList) { int switchResult = switchTaskDefinitionVersion(taskRelationLog.getPostTaskCode(), taskRelationLog.getPostTaskVersion()); if (switchResult != Constants.EXIT_CODE_FAILURE) { result++; } } return result; } } @Override public int switchTaskDefinitionVersion(long taskCode, int taskVersion) { TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { return Constants.EXIT_CODE_FAILURE; } if (taskDefinition.getVersion() == taskVersion) { return Constants.EXIT_CODE_SUCCESS; } TaskDefinitionLog taskDefinitionUpdate = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskVersion); if (taskDefinitionUpdate == null) { return Constants.EXIT_CODE_FAILURE; } taskDefinitionUpdate.setUpdateTime(new Date()); taskDefinitionUpdate.setId(taskDefinition.getId()); return taskDefinitionMapper.updateById(taskDefinitionUpdate); } /** * get resource ids * * @param taskDefinition taskDefinition
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
* @return resource ids */ @Override public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = taskPluginManager.getParameters(ParametersNode.builder().taskType(taskDefinition.getTaskType()).taskParams(taskDefinition.getTaskParams()).build()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } @Override public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs, Boolean syncDefine) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() == 0) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
try { taskDefinitionLog.setCode(CodeGenerateUtils.getInstance().genCode()); } catch (CodeGenerateException e) { logger.error("Task code get error, ", e); return Constants.DEFINITION_FAILURE; } } if (taskDefinitionLog.getVersion() == 0) { taskDefinitionLog.setVersion(Constants.VERSION_FIRST); } TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion == null) { taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setCreateTime(now); newTaskDefinitionLogs.add(taskDefinitionLog); continue; } if (taskDefinitionLog.equals(definitionCodeAndVersion)) { continue; } taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } int insertResult = 0;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
int updateResult = 0; for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate); if (Boolean.TRUE.equals(syncDefine)) { taskDefinitionToUpdate.setId(task.getId()); updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate); } else { updateResult++; } } } if (!newTaskDefinitionLogs.isEmpty()) { insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); if (Boolean.TRUE.equals(syncDefine)) { updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); } else { updateResult += newTaskDefinitionLogs.size(); } } return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS; } /** * save processDefinition (including create or update processDefinition) */ @Override public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean syncDefine, Boolean isFromProcessDefine) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(!isFromProcessDefine || processDefinitionLog.getReleaseState() == ReleaseState.ONLINE ? ReleaseState.ONLINE : ReleaseState.OFFLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result = 1; if (Boolean.TRUE.equals(syncDefine)) { if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ @Override public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs, Boolean syncDefine) { if (taskRelationList.isEmpty()) { return Constants.EXIT_CODE_SUCCESS; } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog preTaskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (preTaskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(preTaskDefinitionLog.getVersion()); } TaskDefinitionLog postTaskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()); if (postTaskDefinitionLog != null) { processTaskRelationLog.setPostTaskVersion(postTaskDefinitionLog.getVersion()); } } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } int insert = taskRelationList.size(); if (Boolean.TRUE.equals(syncDefine)) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
boolean result = CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet); if (result) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } insert = processTaskRelationMapper.batchInsert(taskRelationList); } int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (insert & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } @Override public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
* Generate the DAG Graph based on the process definition id * Use temporarily before refactoring taskNode * * @param processDefinition process definition * @return dag graph */ @Override public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> taskRelations = this.findRelationByCode(processDefinition.getCode(), processDefinition.getVersion()); List<TaskNode> taskNodeList = transformTask(taskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(taskRelations)); return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ @Override public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> taskRelations = this.findRelationByCode(processDefinition.getCode(), processDefinition.getVersion()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(taskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream().map(t -> (TaskDefinition) t).collect(Collectors.toList()); return new DagData(processDefinition, taskRelations, taskDefinitions); } @Override public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()));
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
} if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } if (taskDefinitionSet.isEmpty()) { return Lists.newArrayList(); } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } @Override public List<TaskDefinitionLog> getTaskDefineLogListByRelation(List<ProcessTaskRelation> processTaskRelations) { List<TaskDefinitionLog> taskDefinitionLogs = new ArrayList<>(); Map<Long, Integer> taskCodeVersionMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskCodeVersionMap.put(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion()); } if (processTaskRelation.getPostTaskCode() > 0) { taskCodeVersionMap.put(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion()); } } taskCodeVersionMap.forEach((code, version) -> { taskDefinitionLogs.add((TaskDefinitionLog) this.findTaskDefinition(code, version)); }); return taskDefinitionLogs; } /** * find task definition by code and version */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
@Override public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by process */ @Override public List<ProcessTaskRelation> findRelationByCode(long processDefinitionCode, int processDefinitionVersion) { List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinitionCode, processDefinitionVersion); return processTaskRelationLogList.stream().map(r -> (ProcessTaskRelation) r).collect(Collectors.toList()); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ @Override public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT)));
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getCode).collect(Collectors.toList()))); taskNode.setTaskGroupId(taskDefinitionLog.getTaskGroupId()); taskNode.setTaskGroupPriority(taskDefinitionLog.getTaskGroupPriority()); taskNodeList.add(taskNode); } } return taskNodeList; } @Override public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } @Override public DqExecuteResult getDqExecuteResultByTaskInstanceId(int taskInstanceId) { return dqExecuteResultMapper.getExecuteResultById(taskInstanceId); } @Override public int updateDqExecuteResultUserId(int taskInstanceId) { DqExecuteResult dqExecuteResult = dqExecuteResultMapper.selectOne(new QueryWrapper<DqExecuteResult>().eq(TASK_INSTANCE_ID, taskInstanceId)); if (dqExecuteResult == null) { return -1; } ProcessInstance processInstance = processInstanceMapper.selectById(dqExecuteResult.getProcessInstanceId()); if (processInstance == null) { return -1; } ProcessDefinition processDefinition = processDefineMapper.queryByCode(processInstance.getProcessDefinitionCode()); if (processDefinition == null) { return -1; } dqExecuteResult.setProcessDefinitionId(processDefinition.getId()); dqExecuteResult.setUserId(processDefinition.getUserId()); dqExecuteResult.setState(DqTaskState.DEFAULT.getCode()); return dqExecuteResultMapper.updateById(dqExecuteResult); } @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public int updateDqExecuteResultState(DqExecuteResult dqExecuteResult) { return dqExecuteResultMapper.updateById(dqExecuteResult); } @Override public int deleteDqExecuteResultByTaskInstanceId(int taskInstanceId) { return dqExecuteResultMapper.delete( new QueryWrapper<DqExecuteResult>() .eq(TASK_INSTANCE_ID, taskInstanceId)); } @Override public int deleteTaskStatisticsValueByTaskInstanceId(int taskInstanceId) { return dqTaskStatisticsValueMapper.delete( new QueryWrapper<DqTaskStatisticsValue>() .eq(TASK_INSTANCE_ID, taskInstanceId)); } @Override public DqRule getDqRule(int ruleId) { return dqRuleMapper.selectById(ruleId); } @Override public List<DqRuleInputEntry> getRuleInputEntry(int ruleId) { return DqRuleUtils.transformInputEntry(dqRuleInputEntryMapper.getRuleInputEntryList(ruleId)); } @Override public List<DqRuleExecuteSql> getDqExecuteSql(int ruleId) { return dqRuleExecuteSqlMapper.getExecuteSqlList(ruleId); } @Override public DqComparisonType getComparisonTypeById(int id) { return dqComparisonTypeMapper.selectById(id);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
} /** * the first time (when submit the task ) get the resource of the task group * * @param taskId task id */ @Override public boolean acquireTaskGroup(int taskId, String taskName, int groupId, int processId, int priority) { TaskGroup taskGroup = taskGroupMapper.selectById(groupId); if (taskGroup == null) { return true; } if (taskGroup.getStatus() == Flag.NO.getCode()) { return true; } TaskGroupQueue taskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskId); if (taskGroupQueue == null) { taskGroupQueue = insertIntoTaskGroupQueue(taskId, taskName, groupId, processId, priority, TaskGroupQueueStatus.WAIT_QUEUE); } else { if (taskGroupQueue.getStatus() == TaskGroupQueueStatus.ACQUIRE_SUCCESS) { return true; } taskGroupQueue.setInQueue(Flag.NO.getCode()); taskGroupQueue.setStatus(TaskGroupQueueStatus.WAIT_QUEUE); this.taskGroupQueueMapper.updateById(taskGroupQueue); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
List<TaskGroupQueue> highPriorityTasks = taskGroupQueueMapper.queryHighPriorityTasks(groupId, priority, TaskGroupQueueStatus.WAIT_QUEUE.getCode()); if (CollectionUtils.isNotEmpty(highPriorityTasks)) { this.taskGroupQueueMapper.updateInQueue(Flag.NO.getCode(), taskGroupQueue.getId()); return false; } int count = taskGroupMapper.selectAvailableCountById(groupId); if (count == 1 && robTaskGroupResouce(taskGroupQueue)) { return true; } this.taskGroupQueueMapper.updateInQueue(Flag.NO.getCode(), taskGroupQueue.getId()); return false; } /** * try to get the task group resource(when other task release the resource) */ @Override public boolean robTaskGroupResouce(TaskGroupQueue taskGroupQueue) { TaskGroup taskGroup = taskGroupMapper.selectById(taskGroupQueue.getGroupId()); int affectedCount = taskGroupMapper.updateTaskGroupResource(taskGroup.getId(), taskGroupQueue.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode()); if (affectedCount > 0) { taskGroupQueue.setStatus(TaskGroupQueueStatus.ACQUIRE_SUCCESS); this.taskGroupQueueMapper.updateById(taskGroupQueue); this.taskGroupQueueMapper.updateInQueue(Flag.NO.getCode(), taskGroupQueue.getId()); return true; } return false; } @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public boolean acquireTaskGroupAgain(TaskGroupQueue taskGroupQueue) { return robTaskGroupResouce(taskGroupQueue); } @Override public void releaseAllTaskGroup(int processInstanceId) { List<TaskInstance> taskInstances = this.taskInstanceMapper.loadAllInfosNoRelease(processInstanceId, TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()); for (TaskInstance info : taskInstances) { releaseTaskGroup(info); } } /** * release the TGQ resource when the corresponding task is finished. * * @return the result code and msg */ @Override public TaskInstance releaseTaskGroup(TaskInstance taskInstance) { TaskGroup taskGroup = taskGroupMapper.selectById(taskInstance.getTaskGroupId()); if (taskGroup == null) { return null; } TaskGroupQueue thisTaskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskInstance.getId()); if (thisTaskGroupQueue.getStatus() == TaskGroupQueueStatus.RELEASE) { return null; } try { while (taskGroupMapper.releaseTaskGroupResource(taskGroup.getId(), taskGroup.getUseSize() , thisTaskGroupQueue.getId(), TaskGroupQueueStatus.ACQUIRE_SUCCESS.getCode()) != 1) { thisTaskGroupQueue = this.taskGroupQueueMapper.queryByTaskId(taskInstance.getId()); if (thisTaskGroupQueue.getStatus() == TaskGroupQueueStatus.RELEASE) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
return null; } taskGroup = taskGroupMapper.selectById(taskInstance.getTaskGroupId()); } } catch (Exception e) { logger.error("release the task group error", e); } logger.info("updateTask:{}", taskInstance.getName()); changeTaskGroupQueueStatus(taskInstance.getId(), TaskGroupQueueStatus.RELEASE); TaskGroupQueue taskGroupQueue = this.taskGroupQueueMapper.queryTheHighestPriorityTasks(taskGroup.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode(), Flag.NO.getCode(), Flag.NO.getCode()); if (taskGroupQueue == null) { return null; } while (this.taskGroupQueueMapper.updateInQueueCAS(Flag.NO.getCode(), Flag.YES.getCode(), taskGroupQueue.getId()) != 1) { taskGroupQueue = this.taskGroupQueueMapper.queryTheHighestPriorityTasks(taskGroup.getId(), TaskGroupQueueStatus.WAIT_QUEUE.getCode(), Flag.NO.getCode(), Flag.NO.getCode()); if (taskGroupQueue == null) { return null; } } return this.taskInstanceMapper.selectById(taskGroupQueue.getTaskId()); } /** * release the TGQ resource when the corresponding task is finished. * * @param taskId task id * @return the result code and msg */ @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
public void changeTaskGroupQueueStatus(int taskId, TaskGroupQueueStatus status) { TaskGroupQueue taskGroupQueue = taskGroupQueueMapper.queryByTaskId(taskId); taskGroupQueue.setStatus(status); taskGroupQueue.setUpdateTime(new Date(System.currentTimeMillis())); taskGroupQueueMapper.updateById(taskGroupQueue); } /** * insert into task group queue * * @param taskId task id * @param taskName task name * @param groupId group id * @param processId process id * @param priority priority * @return result and msg code */ @Override public TaskGroupQueue insertIntoTaskGroupQueue(Integer taskId, String taskName, Integer groupId, Integer processId, Integer priority, TaskGroupQueueStatus status) { TaskGroupQueue taskGroupQueue = new TaskGroupQueue(taskId, taskName, groupId, processId, priority, status); taskGroupQueue.setCreateTime(new Date()); taskGroupQueue.setUpdateTime(new Date()); taskGroupQueueMapper.insert(taskGroupQueue); return taskGroupQueue; } @Override public int updateTaskGroupQueueStatus(Integer taskId, int status) { return taskGroupQueueMapper.updateStatusByTaskId(taskId, status); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,776
[Bug-RD] Serial wait state task instance cannot be executed
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Serial wait state task instance cannot be executed. Serial wait state task instance is missing instance ID parameter when querying ### What you expected to happen Serial wait state task instances can be executed normally. ### How to reproduce Create a task flow definition whose execution strategy is serial wait (nodes with wait time), execute multiple times. ### Anything else No Response ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9776
https://github.com/apache/dolphinscheduler/pull/9777
637028735dbf8a78efa63cf8358d2bead7bb5c6e
5c0be8a3d78bfb97fb4a599979ce3e77ee429784
2022-04-26T06:16:39Z
java
2022-04-26T07:47:01Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessServiceImpl.java
@Override public int updateTaskGroupQueue(TaskGroupQueue taskGroupQueue) { return taskGroupQueueMapper.updateById(taskGroupQueue); } @Override public TaskGroupQueue loadTaskGroupQueue(int taskId) { return this.taskGroupQueueMapper.queryByTaskId(taskId); } @Override public void sendStartTask2Master(ProcessInstance processInstance, int taskId, org.apache.dolphinscheduler.remote.command.CommandType taskType) { String host = processInstance.getHost(); String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); TaskEventChangeCommand taskEventChangeCommand = new TaskEventChangeCommand( processInstance.getId(), taskId ); stateEventCallbackService.sendResult(address, port, taskEventChangeCommand.convert2Command(taskType)); } @Override public ProcessInstance loadNextProcess4Serial(long code, int state) { return this.processInstanceMapper.loadNextProcess4Serial(code, state); } protected void deleteCommandWithCheck(int commandId) { int delete = this.commandMapper.deleteById(commandId); if (delete != 1) { throw new ServiceException("delete command fail, id:" + commandId); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannelFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.alert.http; import org.apache.dolphinscheduler.alert.api.AlertChannel; import org.apache.dolphinscheduler.alert.api.AlertChannelFactory; import org.apache.dolphinscheduler.spi.params.base.PluginParams; import org.apache.dolphinscheduler.spi.params.base.Validate; import org.apache.dolphinscheduler.spi.params.input.InputParam; import java.util.Arrays; import java.util.List; import com.google.auto.service.AutoService; @AutoService(AlertChannelFactory.class)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannelFactory.java
public final class HttpAlertChannelFactory implements AlertChannelFactory { @Override public String name() { return "Http"; } @Override public List<PluginParams> params() { InputParam url = InputParam.newBuilder(HttpAlertConstants.NAME_URL, HttpAlertConstants.URL) .addValidate(Validate.newBuilder() .setRequired(true)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannelFactory.java
.build()) .build(); InputParam headerParams = InputParam.newBuilder(HttpAlertConstants.NAME_HEADER_PARAMS, HttpAlertConstants.HEADER_PARAMS) .addValidate(Validate.newBuilder() .setRequired(true) .build()) .build(); InputParam bodyParams = InputParam.newBuilder(HttpAlertConstants.NAME_BODY_PARAMS, HttpAlertConstants.BODY_PARAMS) .addValidate(Validate.newBuilder() .setRequired(true) .build()) .build(); InputParam contentField = InputParam.newBuilder(HttpAlertConstants.NAME_CONTENT_FIELD, HttpAlertConstants.CONTENT_FIELD) .addValidate(Validate.newBuilder() .setRequired(true) .build()) .build(); InputParam requestType = InputParam.newBuilder(HttpAlertConstants.NAME_REQUEST_TYPE, HttpAlertConstants.REQUEST_TYPE) .addValidate(Validate.newBuilder() .setRequired(true) .build()) .build(); return Arrays.asList(url, requestType, headerParams, bodyParams, contentField); } @Override public AlertChannel create() { return new HttpAlertChannel(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpSender.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpSender.java
*/ package org.apache.dolphinscheduler.plugin.alert.http; import org.apache.dolphinscheduler.alert.api.AlertResult; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import org.apache.dolphinscheduler.spi.utils.StringUtils; import org.apache.http.HttpEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.node.ObjectNode; public final class HttpSender { private static final Logger logger = LoggerFactory.getLogger(HttpSender.class); private static final String URL_SPLICE_CHAR = "?"; /** * request type post */ private static final String REQUEST_TYPE_POST = "POST"; /** * request type get */ private static final String REQUEST_TYPE_GET = "GET";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpSender.java
private static final String DEFAULT_CHARSET = "utf-8"; private final String headerParams; private final String bodyParams; private final String contentField; private final String requestType; private String url; private HttpRequestBase httpRequest; public HttpSender(Map<String, String> paramsMap) { url = paramsMap.get(HttpAlertConstants.NAME_URL); headerParams = paramsMap.get(HttpAlertConstants.NAME_HEADER_PARAMS); bodyParams = paramsMap.get(HttpAlertConstants.NAME_BODY_PARAMS); contentField = paramsMap.get(HttpAlertConstants.NAME_CONTENT_FIELD); requestType = paramsMap.get(HttpAlertConstants.NAME_REQUEST_TYPE); } public AlertResult send(String msg) { AlertResult alertResult = new AlertResult(); createHttpRequest(msg); if (httpRequest == null) { alertResult.setStatus("false"); alertResult.setMessage("Request types are not supported"); return alertResult; } try { CloseableHttpClient httpClient = HttpClientBuilder.create().build(); CloseableHttpResponse response = httpClient.execute(httpRequest); HttpEntity entity = response.getEntity(); String resp = EntityUtils.toString(entity, DEFAULT_CHARSET); alertResult.setStatus("true"); alertResult.setMessage(resp); } catch (Exception e) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpSender.java
logger.error("send http alert msg exception : {}", e.getMessage()); alertResult.setStatus("false"); alertResult.setMessage("send http request alert fail."); } return alertResult; } private void createHttpRequest(String msg) { if (REQUEST_TYPE_POST.equals(requestType)) { httpRequest = new HttpPost(url); setHeader(); setMsgInRequestBody(msg); } else if (REQUEST_TYPE_GET.equals(requestType)) { setMsgInUrl(msg); httpRequest = new HttpGet(url); setHeader(); } } /** * add msg param in url */ private void setMsgInUrl(String msg) { if (StringUtils.isNotBlank(contentField)) { String type = "&"; if (!url.contains(URL_SPLICE_CHAR)) { type = URL_SPLICE_CHAR; } url = String.format("%s%s%s=%s", url, type, contentField, msg);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,786
[Bug] [Alert] Http alert Get method not working
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened http aler GET method not working, forget to escape symbol characters, will throw URI cannot extract when running ### What you expected to happen http alert GET method working ### How to reproduce create a new http GET method alert ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9786
https://github.com/apache/dolphinscheduler/pull/9787
e3070d9fa3fdccacb3d42380862c85a8b1320f8b
6788c995781f74e3321ebc1ad33050fff5fe94a1
2022-04-26T09:20:25Z
java
2022-04-26T09:55:34Z
dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpSender.java
} } /** * set header params */ private void setHeader() { if (httpRequest == null) { return; } HashMap<String, Object> map = JSONUtils.parseObject(headerParams, HashMap.class); for (Map.Entry<String, Object> entry : map.entrySet()) { httpRequest.setHeader(entry.getKey(), String.valueOf(entry.getValue())); } } /** * set body params */ private void setMsgInRequestBody(String msg) { try { ObjectNode objectNode = JSONUtils.parseObject(bodyParams); objectNode.put(contentField, msg); StringEntity entity = new StringEntity(JSONUtils.toJsonString(objectNode), DEFAULT_CHARSET); ((HttpPost) httpRequest).setEntity(entity); } catch (Exception e) { logger.error("send http alert msg exception : {}", e.getMessage()); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http:www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.storage.StorageOperate; import org.apache.dolphinscheduler.common.utils.EncryptionUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.AlertGroup; import org.apache.dolphinscheduler.dao.entity.DatasourceUser; import org.apache.dolphinscheduler.dao.entity.K8sNamespaceUser; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.ResourcesUser; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UDFUser; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
import org.apache.dolphinscheduler.dao.mapper.K8sNamespaceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UDFUserMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TimeZone; import java.util.stream.Collectors; /** * users service impl */ @Service
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
public class UsersServiceImpl extends BaseServiceImpl implements UsersService { private static final Logger logger = LoggerFactory.getLogger(UsersServiceImpl.class); @Autowired private AccessTokenMapper accessTokenMapper; @Autowired private UserMapper userMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectUserMapper projectUserMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private DataSourceUserMapper datasourceUserMapper; @Autowired private UDFUserMapper udfUserMapper; @Autowired private AlertGroupMapper alertGroupMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
@Autowired private ProjectMapper projectMapper; @Autowired(required = false) private StorageOperate storageOperate; @Autowired private K8sNamespaceUserMapper k8sNamespaceUserMapper; /** * create user, only system admin have permission * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code * @throws Exception exception */ @Override @Transactional(rollbackFor = Exception.class) public Map<String, Object> createUser(User loginUser, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws Exception { Map<String, Object> result = new HashMap<>();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
String msg = this.checkUserParams(userName, userPassword, email, phone); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!checkTenantExists(tenantId)) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } User user = createUser(userName, userPassword, email, tenantId, phone, queue, state); Tenant tenant = tenantMapper.queryById(tenantId); if (PropertyUtils.getResUploadStartupState()) { storageOperate.createTenantDirIfNotExists(tenant.getTenantCode()); } result.put(Constants.DATA_LIST, user); putMsg(result, Status.SUCCESS); return result; } @Override @Transactional(rollbackFor = RuntimeException.class) public User createUser(String userName, String userPassword, String email, int tenantId,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
String phone, String queue, int state) { User user = new User(); Date now = new Date(); user.setUserName(userName); user.setUserPassword(EncryptionUtils.getMd5(userPassword)); user.setEmail(email); user.setTenantId(tenantId); user.setPhone(phone); user.setState(state); user.setUserType(UserType.GENERAL_USER); user.setCreateTime(now); user.setUpdateTime(now); if (StringUtils.isEmpty(queue)) { queue = ""; } user.setQueue(queue); userMapper.insert(user); return user; } /*** * create User for ldap login */ @Override @Transactional(rollbackFor = Exception.class) public User createUser(UserType userType, String userId, String email) { User user = new User();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
Date now = new Date(); user.setUserName(userId); user.setEmail(email); user.setUserType(userType); user.setCreateTime(now); user.setUpdateTime(now); user.setQueue(""); userMapper.insert(user); return user; } /** * get user by user name * * @param userName user name * @return exist user or null */ @Override public User getUserByUserName(String userName) { return userMapper.queryByUserNameAccurately(userName); } /** * query user by id * * @param id id * @return user info */ @Override public User queryUser(int id) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
return userMapper.selectById(id); } @Override public List<User> queryUser(List<Integer> ids) { if (CollectionUtils.isEmpty(ids)) { return new ArrayList<>(); } return userMapper.selectByIds(ids); } /** * query user * * @param name name * @return user info */ @Override public User queryUser(String name) { return userMapper.queryByUserNameAccurately(name); } /** * query user * * @param name name * @param password password * @return user info */ @Override public User queryUser(String name, String password) { String md5 = EncryptionUtils.getMd5(password); return userMapper.queryUserByNamePassword(name, md5);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
} /** * get user id by user name * * @param name user name * @return if name empty 0, user not exists -1, user exist user id */ @Override public int getUserIdByName(String name) { int executorId = 0; if (StringUtils.isNotEmpty(name)) { User executor = queryUser(name); if (null != executor) { executorId = executor.getId(); } else { executorId = -1; } } return executorId; } /** * query user list * * @param loginUser login user * @param pageNo page number * @param searchVal search value * @param pageSize page size * @return user list page */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
@Override public Result<Object> queryUserList(User loginUser, String searchVal, Integer pageNo, Integer pageSize) { Result<Object> result = new Result<>(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } Page<User> page = new Page<>(pageNo, pageSize); IPage<User> scheduleList = userMapper.queryUserPaging(page, searchVal); PageInfo<User> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) scheduleList.getTotal()); pageInfo.setTotalList(scheduleList.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * updateProcessInstance user * * @param userId user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @param state state * @param timeZone timeZone * @return update result code * @throws Exception exception
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
*/ @Override public Map<String, Object> updateUser(User loginUser, int userId, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state, String timeZone) throws IOException { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (check(result, !canOperator(loginUser, userId), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } if (StringUtils.isNotEmpty(userName)) { if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User tempUser = userMapper.queryByUserNameAccurately(userName); if (tempUser != null && tempUser.getId() != userId) { putMsg(result, Status.USER_NAME_EXIST); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
} user.setUserName(userName); } if (StringUtils.isNotEmpty(userPassword)) { if (!CheckUtils.checkPassword(userPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userPassword); return result; } user.setUserPassword(EncryptionUtils.getMd5(userPassword)); } if (StringUtils.isNotEmpty(email)) { if (!CheckUtils.checkEmail(email)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, email); return result; } user.setEmail(email); } if (StringUtils.isNotEmpty(phone) && !CheckUtils.checkPhone(phone)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, phone); return result; } if (state == 0 && user.getState() != state && loginUser.getId() == user.getId()) { putMsg(result, Status.NOT_ALLOW_TO_DISABLE_OWN_ACCOUNT); return result; } if (StringUtils.isNotEmpty(timeZone)) { if (!CheckUtils.checkTimeZone(timeZone)) { putMsg(result, Status.TIME_ZONE_ILLEGAL, timeZone); return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
user.setTimeZone(timeZone); } user.setPhone(phone); user.setQueue(queue); user.setState(state); Date now = new Date(); user.setUpdateTime(now); user.setTenantId(tenantId); userMapper.updateById(user); putMsg(result, Status.SUCCESS); return result; } /** * delete user * * @param loginUser login user * @param id user id * @return delete result code * @throws Exception exception when operate hdfs */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteUserById(User loginUser, int id) throws IOException { Map<String, Object> result = new HashMap<>(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM, id); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
} User tempUser = userMapper.selectById(id); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, id); return result; } List<Project> projects = projectMapper.queryProjectCreatedByUser(id); if (CollectionUtils.isNotEmpty(projects)) { String projectNames = projects.stream().map(Project::getName).collect(Collectors.joining(",")); putMsg(result, Status.TRANSFORM_PROJECT_OWNERSHIP, projectNames); return result; } userMapper.queryTenantCodeByUserId(id); accessTokenMapper.deleteAccessTokenByUserId(id); userMapper.deleteById(id); putMsg(result, Status.SUCCESS); return result; } /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
@Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantProject(User loginUser, int userId, String projectIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); User tempUser = userMapper.selectById(userId); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } projectUserMapper.deleteProjectRelation(0, userId); if (check(result, StringUtils.isEmpty(projectIds), Status.SUCCESS)) { return result; } String[] projectIdArr = projectIds.split(","); for (String projectId : projectIdArr) { Date now = new Date(); ProjectUser projectUser = new ProjectUser(); projectUser.setUserId(userId); projectUser.setProjectId(Integer.parseInt(projectId)); projectUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM); projectUser.setCreateTime(now); projectUser.setUpdateTime(now); projectUserMapper.insert(projectUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant project by code
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
* * @param loginUser login user * @param userId user id * @param projectCode project code * @return grant result code */ @Override public Map<String, Object> grantProjectByCode(final User loginUser, final int userId, final long projectCode) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); User tempUser = this.userMapper.selectById(userId); if (tempUser == null) { this.putMsg(result, Status.USER_NOT_EXIST, userId); return result; } Project project = this.projectMapper.queryByCode(projectCode); if (project == null) { this.putMsg(result, Status.PROJECT_NOT_FOUND, projectCode); return result; } if (!this.canOperator(loginUser, project.getUserId())) { this.putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } final Date today = new Date(); ProjectUser projectUser = new ProjectUser();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
projectUser.setUserId(userId); projectUser.setProjectId(project.getId()); projectUser.setPerm(7); projectUser.setCreateTime(today); projectUser.setUpdateTime(today); this.projectUserMapper.insert(projectUser); this.putMsg(result, Status.SUCCESS); return result; } /** * revoke the project permission for specified user. * * @param loginUser Login user * @param userId User id * @param projectCode Project Code * @return */ @Override public Map<String, Object> revokeProject(User loginUser, int userId, long projectCode) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (this.check(result, !this.isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = this.userMapper.selectById(userId); if (user == null) { this.putMsg(result, Status.USER_NOT_EXIST, userId); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
} Project project = this.projectMapper.queryByCode(projectCode); if (project == null) { this.putMsg(result, Status.PROJECT_NOT_FOUND, projectCode); return result; } this.projectUserMapper.deleteProjectRelation(project.getId(), user.getId()); this.putMsg(result, Status.SUCCESS); return result; } /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantResources(User loginUser, int userId, String resourceIds) { Map<String, Object> result = new HashMap<>(); User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } Set<Integer> needAuthorizeResIds = new HashSet<>();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
if (StringUtils.isNotBlank(resourceIds)) { String[] resourceFullIdArr = resourceIds.split(","); for (String resourceFullId : resourceFullIdArr) { String[] resourceIdArr = resourceFullId.split("-"); for (int i = 0; i <= resourceIdArr.length - 1; i++) { int resourceIdValue = Integer.parseInt(resourceIdArr[i]); needAuthorizeResIds.add(resourceIdValue); } } } List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, Constants.AUTHORIZE_WRITABLE_PERM); List<Resource> oldAuthorizedRes = CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourceMapper.queryResourceListById(resIds); Set<Integer> oldAuthorizedResIds = oldAuthorizedRes.stream().map(Resource::getId).collect(Collectors.toSet()); oldAuthorizedResIds.removeAll(needAuthorizeResIds); if (CollectionUtils.isNotEmpty(oldAuthorizedResIds)) { List<Map<String, Object>> list = processDefinitionMapper.listResourcesByUser(userId); Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); resourceIdSet.retainAll(oldAuthorizedResIds); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}", resId, resourceProcessMap.get(resId)); } putMsg(result, Status.RESOURCE_IS_USED);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
return result; } } resourceUserMapper.deleteResourceUser(userId, 0); if (check(result, StringUtils.isEmpty(resourceIds), Status.SUCCESS)) { return result; } for (int resourceIdValue : needAuthorizeResIds) { Resource resource = resourceMapper.selectById(resourceIdValue); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Date now = new Date(); ResourcesUser resourcesUser = new ResourcesUser(); resourcesUser.setUserId(userId); resourcesUser.setResourcesId(resourceIdValue); if (resource.isDirectory()) { resourcesUser.setPerm(Constants.AUTHORIZE_READABLE_PERM); } else { resourcesUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM); } resourcesUser.setCreateTime(now); resourcesUser.setUpdateTime(now); resourceUserMapper.insert(resourcesUser); } putMsg(result, Status.SUCCESS); return result; } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
* grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantUDFFunction(User loginUser, int userId, String udfIds) { Map<String, Object> result = new HashMap<>(); User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } udfUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(udfIds), Status.SUCCESS)) { return result; } String[] resourcesIdArr = udfIds.split(","); for (String udfId : resourcesIdArr) { Date now = new Date(); UDFUser udfUser = new UDFUser(); udfUser.setUserId(userId); udfUser.setUdfId(Integer.parseInt(udfId)); udfUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM); udfUser.setCreateTime(now); udfUser.setUpdateTime(now); udfUserMapper.insert(udfUser);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
} putMsg(result, Status.SUCCESS); return result; } /** * grant namespace * * @param loginUser login user * @param userId user id * @param namespaceIds namespace id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantNamespaces(User loginUser, int userId, String namespaceIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User tempUser = userMapper.selectById(userId); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } k8sNamespaceUserMapper.deleteNamespaceRelation(0, userId); if (StringUtils.isNotEmpty(namespaceIds)) { String[] namespaceIdArr = namespaceIds.split(",");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
for (String namespaceId : namespaceIdArr) { Date now = new Date(); K8sNamespaceUser namespaceUser = new K8sNamespaceUser(); namespaceUser.setUserId(userId); namespaceUser.setNamespaceId(Integer.parseInt(namespaceId)); namespaceUser.setPerm(7); namespaceUser.setCreateTime(now); namespaceUser.setUpdateTime(now); k8sNamespaceUserMapper.insert(namespaceUser); } } putMsg(result, Status.SUCCESS); return result; } /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantDataSource(User loginUser, int userId, String datasourceIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
return result; } datasourceUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(datasourceIds), Status.SUCCESS)) { return result; } String[] datasourceIdArr = datasourceIds.split(","); for (String datasourceId : datasourceIdArr) { Date now = new Date(); DatasourceUser datasourceUser = new DatasourceUser(); datasourceUser.setUserId(userId); datasourceUser.setDatasourceId(Integer.parseInt(datasourceId)); datasourceUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM); datasourceUser.setCreateTime(now); datasourceUser.setUpdateTime(now); datasourceUserMapper.insert(datasourceUser); } putMsg(result, Status.SUCCESS); return result; } /** * query user info * * @param loginUser login user * @return user info */ @Override public Map<String, Object> getUserInfo(User loginUser) { Map<String, Object> result = new HashMap<>(); User user = null;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
if (loginUser.getUserType() == UserType.ADMIN_USER) { user = loginUser; } else { user = userMapper.queryDetailsById(loginUser.getId()); List<AlertGroup> alertGroups = alertGroupMapper.queryByUserId(loginUser.getId()); StringBuilder sb = new StringBuilder(); if (alertGroups != null && !alertGroups.isEmpty()) { for (int i = 0; i < alertGroups.size() - 1; i++) { sb.append(alertGroups.get(i).getGroupName()).append(","); } sb.append(alertGroups.get(alertGroups.size() - 1)); user.setAlertGroup(sb.toString()); } } if (StringUtils.isEmpty(user.getTimeZone())) { user.setTimeZone(TimeZone.getDefault().toZoneId().getId()); } result.put(Constants.DATA_LIST, user); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryAllGeneralUsers(User loginUser) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
Map<String, Object> result = new HashMap<>(); if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.queryAllGeneralUser(); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryUserList(User loginUser) { Map<String, Object> result = new HashMap<>(); if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * verify user name exists
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
* * @param userName user name * @return true if user name not exists, otherwise return false */ @Override public Result<Object> verifyUserName(String userName) { Result<Object> result = new Result<>(); User user = userMapper.queryByUserNameAccurately(userName); if (user != null) { putMsg(result, Status.USER_NAME_EXIST); } else { putMsg(result, Status.SUCCESS); } return result; } /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ @Override public Map<String, Object> unauthorizedUser(User loginUser, Integer alertgroupId) { Map<String, Object> result = new HashMap<>(); if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
List<User> resultUsers = new ArrayList<>(); Set<User> userSet = null; if (userList != null && !userList.isEmpty()) { userSet = new HashSet<>(userList); List<User> authedUserList = userMapper.queryUserListByAlertGroupId(alertgroupId); Set<User> authedUserSet = null; if (authedUserList != null && !authedUserList.isEmpty()) { authedUserSet = new HashSet<>(authedUserList); userSet.removeAll(authedUserSet); } resultUsers = new ArrayList<>(userSet); } result.put(Constants.DATA_LIST, resultUsers); putMsg(result, Status.SUCCESS); return result; } /** * authorized user * * @param loginUser login user * @param alertGroupId alert group id * @return authorized result code */ @Override public Map<String, Object> authorizedUser(User loginUser, Integer alertGroupId) { Map<String, Object> result = new HashMap<>(); if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
List<User> userList = userMapper.queryUserListByAlertGroupId(alertGroupId); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * @param tenantId tenant id * @return true if tenant exists, otherwise return false */ private boolean checkTenantExists(int tenantId) { return tenantMapper.queryById(tenantId) != null; } /** * @return if check failed return the field, otherwise return null */ private String checkUserParams(String userName, String password, String email, String phone) { String msg = null; if (!CheckUtils.checkUserName(userName)) { msg = userName; } else if (!CheckUtils.checkPassword(password)) { msg = password; } else if (!CheckUtils.checkEmail(email)) { msg = email; } else if (!CheckUtils.checkPhone(phone)) { msg = phone; } return msg; } /** * copy resource files
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
* xxx unchecked * * @param resourceComponent resource component * @param srcBasePath src base path * @param dstBasePath dst base path * @throws IOException io exception */ private void copyResourceFiles(String oldTenantCode, String newTenantCode, ResourceComponent resourceComponent, String srcBasePath, String dstBasePath) { List<ResourceComponent> components = resourceComponent.getChildren(); try { if (CollectionUtils.isNotEmpty(components)) { for (ResourceComponent component : components) { if (!storageOperate.exists(oldTenantCode, String.format(Constants.FORMAT_S_S, srcBasePath, component.getFullName()))) { logger.error("resource file: {} not exist,copy error", component.getFullName()); throw new ServiceException(Status.RESOURCE_NOT_EXIST); } if (!component.isDirctory()) { storageOperate.copy(String.format(Constants.FORMAT_S_S, srcBasePath, component.getFullName()), String.format(Constants.FORMAT_S_S, dstBasePath, component.getFullName()), false, true); continue; } if (CollectionUtils.isEmpty(component.getChildren())) { if (!storageOperate.exists(oldTenantCode, String.format(Constants.FORMAT_S_S, dstBasePath, component.getFullName()))) { storageOperate.mkdir(newTenantCode, String.format(Constants.FORMAT_S_S, dstBasePath, component.getFullName())); } } else { copyResourceFiles(oldTenantCode, newTenantCode, component, srcBasePath, dstBasePath); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
} } } catch (IOException e) { logger.error("copy the resources failed,the error message is {}", e.getMessage()); } } /** * registry user, default state is 0, default tenant_id is 1, no phone, no queue * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email email * @return registry result code * @throws Exception exception */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email) { Map<String, Object> result = new HashMap<>(); String msg = this.checkUserParams(userName, userPassword, email, ""); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!userPassword.equals(repeatPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "two passwords are not same"); return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
User user = createUser(userName, userPassword, email, 1, "", "", Flag.NO.ordinal()); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, user); return result; } /** * activate user, only system admin have permission, change user state code 0 to 1 * * @param loginUser login user * @param userName user name * @return create result code */ @Override public Map<String, Object> activateUser(User loginUser, String userName) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User user = userMapper.queryByUserNameAccurately(userName); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userName); return result; } if (user.getState() != Flag.NO.ordinal()) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } user.setState(Flag.YES.ordinal()); Date now = new Date(); user.setUpdateTime(now); userMapper.updateById(user); User responseUser = userMapper.queryByUserNameAccurately(userName); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, responseUser); return result; } /** * activate user, only system admin have permission, change users state code 0 to 1 * * @param loginUser login user * @param userNames user name * @return create result code */ @Override public Map<String, Object> batchActivateUser(User loginUser, List<String> userNames) { Map<String, Object> result = new HashMap<>(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } int totalSuccess = 0; List<String> successUserNames = new ArrayList<>(); Map<String, Object> successRes = new HashMap<>(); int totalFailed = 0;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,290
[Bug] [t_ds_relation_project_user] Duplicate data exists
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Duplicate data exists in the database when authorizing projects to users; There is a cache when writing, suspected to write the first project repeatedly ![image](https://user-images.githubusercontent.com/52615903/160808415-d67ee38c-035e-412f-87ce-b244823fb9b2.png) ![image](https://user-images.githubusercontent.com/52615903/160809161-f614da97-40ec-4de4-81b4-50f69ee24230.png) ### What you expected to happen when authorizing projects to users ### How to reproduce authorizing projects to users,When an item is selected and then deleted and then selected again ### Anything else nothing ### Version 2.0.3 ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [ ] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9290
https://github.com/apache/dolphinscheduler/pull/9536
8fab44cb934b6b4cf66b9b3130f9e170da168f3c
c837580c681ee2ea8f62a3a13d9fe8b95423bbdb
2022-03-30T10:20:49Z
java
2022-04-27T04:04:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
List<Map<String, String>> failedInfo = new ArrayList<>(); Map<String, Object> failedRes = new HashMap<>(); for (String userName : userNames) { Map<String, Object> tmpResult = activateUser(loginUser, userName); if (tmpResult.get(Constants.STATUS) != Status.SUCCESS) { totalFailed++; Map<String, String> failedBody = new HashMap<>(); failedBody.put("userName", userName); Status status = (Status) tmpResult.get(Constants.STATUS); String errorMessage = MessageFormat.format(status.getMsg(), userName); failedBody.put("msg", errorMessage); failedInfo.add(failedBody); } else { totalSuccess++; successUserNames.add(userName); } } successRes.put("sum", totalSuccess); successRes.put("userName", successUserNames); failedRes.put("sum", totalFailed); failedRes.put("info", failedInfo); Map<String, Object> res = new HashMap<>(); res.put("success", successRes); res.put("failed", failedRes); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, res); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/main/java/org/apache/dolphinscheduler/plugin/datasource/api/plugin/DataSourceClientProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.datasource.api.plugin; import org.apache.dolphinscheduler.plugin.datasource.api.utils.DataSourceUtils; import org.apache.dolphinscheduler.spi.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.spi.datasource.ConnectionParam; import org.apache.dolphinscheduler.spi.datasource.DataSourceChannel; import org.apache.dolphinscheduler.spi.datasource.DataSourceClient; import org.apache.dolphinscheduler.spi.enums.DbType; import java.sql.Connection; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class DataSourceClientProvider {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/main/java/org/apache/dolphinscheduler/plugin/datasource/api/plugin/DataSourceClientProvider.java
private static final Logger logger = LoggerFactory.getLogger(DataSourceClientProvider.class); private static final Map<String, DataSourceClient> uniqueId2dataSourceClientMap = new ConcurrentHashMap<>(); private DataSourcePluginManager dataSourcePluginManager; private DataSourceClientProvider() { initDataSourcePlugin(); } private static class DataSourceClientProviderHolder {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/main/java/org/apache/dolphinscheduler/plugin/datasource/api/plugin/DataSourceClientProvider.java
private static final DataSourceClientProvider INSTANCE = new DataSourceClientProvider(); } public static DataSourceClientProvider getInstance() { return DataSourceClientProviderHolder.INSTANCE; } public Connection getConnection(DbType dbType, ConnectionParam connectionParam) { BaseConnectionParam baseConnectionParam = (BaseConnectionParam) connectionParam; String datasourceUniqueId = DataSourceUtils.getDatasourceUniqueId(baseConnectionParam, dbType); logger.info("getConnection datasourceUniqueId {}", datasourceUniqueId); DataSourceClient dataSourceClient = uniqueId2dataSourceClientMap.computeIfAbsent(datasourceUniqueId, $ -> { Map<String, DataSourceChannel> dataSourceChannelMap = dataSourcePluginManager.getDataSourceChannelMap(); DataSourceChannel dataSourceChannel = dataSourceChannelMap.get(dbType.getDescp()); if (null == dataSourceChannel) { throw new RuntimeException(String.format("datasource plugin '%s' is not found", dbType.getDescp())); } return dataSourceChannel.createDataSourceClient(baseConnectionParam, dbType); }); return dataSourceClient.getConnection(); } private void initDataSourcePlugin() { dataSourcePluginManager = new DataSourcePluginManager(); dataSourcePluginManager.installPlugin(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/test/java/org/apache/dolphinscheduler/plugin/datasource/api/utils/DataSourceUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/test/java/org/apache/dolphinscheduler/plugin/datasource/api/utils/DataSourceUtilsTest.java
* the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.datasource.api.utils; import org.apache.dolphinscheduler.plugin.datasource.api.datasource.mysql.MySQLConnectionParam; import org.apache.dolphinscheduler.plugin.datasource.api.datasource.mysql.MySQLDataSourceParamDTO; import org.apache.dolphinscheduler.plugin.datasource.api.datasource.mysql.MySQLDataSourceProcessor; import org.apache.dolphinscheduler.plugin.datasource.api.plugin.DataSourceClientProvider; import org.apache.dolphinscheduler.spi.datasource.ConnectionParam; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import java.sql.Connection; import java.sql.DriverManager; import java.util.HashMap; import java.util.Map; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; @RunWith(PowerMockRunner.class)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/test/java/org/apache/dolphinscheduler/plugin/datasource/api/utils/DataSourceUtilsTest.java
@PrepareForTest({Class.class, DriverManager.class, MySQLDataSourceProcessor.class, DataSourceClientProvider.class, PasswordUtils.class, CommonUtils.class}) public class DataSourceUtilsTest { @Test public void testCheckDatasourceParam() { MySQLDataSourceParamDTO mysqlDatasourceParamDTO = new MySQLDataSourceParamDTO(); mysqlDatasourceParamDTO.setHost("localhost"); mysqlDatasourceParamDTO.setDatabase("default"); Map<String, String> other = new HashMap<>(); other.put("serverTimezone", "Asia/Shanghai"); other.put("queryTimeout", "-1"); other.put("characterEncoding", "utf8"); mysqlDatasourceParamDTO.setOther(other); DataSourceUtils.checkDatasourceParam(mysqlDatasourceParamDTO); Assert.assertTrue(true); } @Test public void testBuildConnectionParams() { MySQLDataSourceParamDTO mysqlDatasourceParamDTO = new MySQLDataSourceParamDTO(); mysqlDatasourceParamDTO.setHost("localhost"); mysqlDatasourceParamDTO.setDatabase("default"); mysqlDatasourceParamDTO.setUserName("root"); mysqlDatasourceParamDTO.setPort(3306); mysqlDatasourceParamDTO.setPassword("123456"); PowerMockito.mockStatic(PasswordUtils.class); PowerMockito.when(PasswordUtils.encodePassword(Mockito.anyString())).thenReturn("123456"); PowerMockito.mockStatic(CommonUtils.class); PowerMockito.when(CommonUtils.getKerberosStartupState()).thenReturn(false); ConnectionParam connectionParam = DataSourceUtils.buildConnectionParams(mysqlDatasourceParamDTO);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/test/java/org/apache/dolphinscheduler/plugin/datasource/api/utils/DataSourceUtilsTest.java
Assert.assertNotNull(connectionParam); } @Test public void testBuildConnectionParams2() { MySQLDataSourceParamDTO mysqlDatasourceParamDTO = new MySQLDataSourceParamDTO(); mysqlDatasourceParamDTO.setHost("localhost"); mysqlDatasourceParamDTO.setDatabase("default"); mysqlDatasourceParamDTO.setUserName("root"); mysqlDatasourceParamDTO.setPort(3306); mysqlDatasourceParamDTO.setPassword("123456"); ConnectionParam connectionParam = DataSourceUtils.buildConnectionParams(DbType.MYSQL, JSONUtils.toJsonString(mysqlDatasourceParamDTO)); Assert.assertNotNull(connectionParam); } @Test public void testGetConnection() { PowerMockito.mockStatic(DataSourceClientProvider.class); DataSourceClientProvider clientProvider = PowerMockito.mock(DataSourceClientProvider.class); PowerMockito.when(DataSourceClientProvider.getInstance()).thenReturn(clientProvider); Connection connection = PowerMockito.mock(Connection.class); PowerMockito.when(clientProvider.getConnection(Mockito.any(), Mockito.any())).thenReturn(connection); MySQLConnectionParam connectionParam = new MySQLConnectionParam(); connectionParam.setUser("root"); connectionParam.setPassword("123456"); connection = DataSourceClientProvider.getInstance().getConnection(DbType.MYSQL, connectionParam); Assert.assertNotNull(connection); } @Test public void testGetJdbcUrl() { MySQLConnectionParam mysqlConnectionParam = new MySQLConnectionParam(); mysqlConnectionParam.setJdbcUrl("jdbc:mysql://localhost:3308");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-api/src/test/java/org/apache/dolphinscheduler/plugin/datasource/api/utils/DataSourceUtilsTest.java
String jdbcUrl = DataSourceUtils.getJdbcUrl(DbType.MYSQL, mysqlConnectionParam); Assert.assertEquals("jdbc:mysql://localhost:3308?allowLoadLocalInfile=false&autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false", jdbcUrl); } @Test public void testBuildDatasourceParamDTO() { MySQLConnectionParam connectionParam = new MySQLConnectionParam(); connectionParam.setJdbcUrl("jdbc:mysql://localhost:3308?allowLoadLocalInfile=false&autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false"); connectionParam.setAddress("jdbc:mysql://localhost:3308"); connectionParam.setUser("root"); connectionParam.setPassword("123456"); Assert.assertNotNull(DataSourceUtils.buildDatasourceParamDTO(DbType.MYSQL, JSONUtils.toJsonString(connectionParam))); } @Test public void testGetDatasourceProcessor() { Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.MYSQL)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.POSTGRESQL)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.HIVE)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.SPARK)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.CLICKHOUSE)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.ORACLE)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.SQLSERVER)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.DB2)); Assert.assertNotNull(DataSourceUtils.getDatasourceProcessor(DbType.PRESTO)); } @Test(expected = Exception.class) public void testGetDatasourceProcessorError() { DataSourceUtils.getDatasourceProcessor(null); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-hive/src/main/java/org/apache/dolphinscheduler/plugin/datasource/hive/HiveDataSourceClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.datasource.hive; import static org.apache.dolphinscheduler.plugin.task.api.TaskConstants.JAVA_SECURITY_KRB5_CONF; import static org.apache.dolphinscheduler.plugin.task.api.TaskConstants.JAVA_SECURITY_KRB5_CONF_PATH; import static org.apache.dolphinscheduler.plugin.task.api.TaskConstants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE; import org.apache.dolphinscheduler.plugin.datasource.api.client.CommonDataSourceClient;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-hive/src/main/java/org/apache/dolphinscheduler/plugin/datasource/hive/HiveDataSourceClient.java
import org.apache.dolphinscheduler.plugin.datasource.api.provider.JDBCDataSourceProvider; import org.apache.dolphinscheduler.plugin.datasource.utils.CommonUtil; import org.apache.dolphinscheduler.spi.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.dolphinscheduler.spi.utils.Constants; import org.apache.dolphinscheduler.spi.utils.PropertyUtils; import org.apache.dolphinscheduler.spi.utils.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.lang.reflect.Field; import java.sql.Connection; import java.sql.SQLException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.zaxxer.hikari.HikariDataSource; import sun.security.krb5.Config; public class HiveDataSourceClient extends CommonDataSourceClient { private static final Logger logger = LoggerFactory.getLogger(HiveDataSourceClient.class); private ScheduledExecutorService kerberosRenewalService; private Configuration hadoopConf; protected HikariDataSource oneSessionDataSource; private UserGroupInformation ugi; public HiveDataSourceClient(BaseConnectionParam baseConnectionParam, DbType dbType) { super(baseConnectionParam, dbType); } @Override
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-hive/src/main/java/org/apache/dolphinscheduler/plugin/datasource/hive/HiveDataSourceClient.java
protected void preInit() { logger.info("PreInit in {}", getClass().getName()); this.kerberosRenewalService = Executors.newSingleThreadScheduledExecutor(); } @Override protected void initClient(BaseConnectionParam baseConnectionParam, DbType dbType) { logger.info("Create Configuration for hive configuration."); this.hadoopConf = createHadoopConf(); logger.info("Create Configuration success."); logger.info("Create UserGroupInformation."); this.ugi = createUserGroupInformation(baseConnectionParam.getUser()); logger.info("Create ugi success."); super.initClient(baseConnectionParam, dbType); this.oneSessionDataSource = JDBCDataSourceProvider.createOneSessionJdbcDataSource(baseConnectionParam, dbType); logger.info("Init {} success.", getClass().getName()); } @Override protected void checkEnv(BaseConnectionParam baseConnectionParam) { super.checkEnv(baseConnectionParam); checkKerberosEnv(); } private void checkKerberosEnv() { String krb5File = PropertyUtils.getString(JAVA_SECURITY_KRB5_CONF_PATH); Boolean kerberosStartupState = PropertyUtils.getBoolean(HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false); if (kerberosStartupState && StringUtils.isNotBlank(krb5File)) { System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5File); try { Config.refresh(); Class<?> kerberosName = Class.forName("org.apache.hadoop.security.authentication.util.KerberosName"); Field field = kerberosName.getDeclaredField("defaultRealm");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-hive/src/main/java/org/apache/dolphinscheduler/plugin/datasource/hive/HiveDataSourceClient.java
field.setAccessible(true); field.set(null, Config.getInstance().getDefaultRealm()); } catch (Exception e) { throw new RuntimeException("Update Kerberos environment failed.", e); } } } private UserGroupInformation createUserGroupInformation(String username) { String krb5File = PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH); String keytab = PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH); String principal = PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME); try { UserGroupInformation ugi = CommonUtil.createUGI(getHadoopConf(), principal, keytab, krb5File, username); try { Field isKeytabField = ugi.getClass().getDeclaredField("isKeytab"); isKeytabField.setAccessible(true); isKeytabField.set(ugi, true); } catch (NoSuchFieldException | IllegalAccessException e) { logger.warn(e.getMessage()); } kerberosRenewalService.scheduleWithFixedDelay(() -> { try { ugi.checkTGTAndReloginFromKeytab(); } catch (IOException e) { logger.error("Check TGT and Renewal from Keytab error", e); } }, 5, 5, TimeUnit.MINUTES); return ugi; } catch (IOException e) { throw new RuntimeException("createUserGroupInformation fail. ", e);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-datasource-plugin/dolphinscheduler-datasource-hive/src/main/java/org/apache/dolphinscheduler/plugin/datasource/hive/HiveDataSourceClient.java
} } protected Configuration createHadoopConf() { Configuration hadoopConf = new Configuration(); hadoopConf.setBoolean("ipc.client.fallback-to-simple-auth-allowed", true); return hadoopConf; } protected Configuration getHadoopConf() { return this.hadoopConf; } @Override public Connection getConnection() { try { return oneSessionDataSource.getConnection(); } catch (SQLException e) { logger.error("get oneSessionDataSource Connection fail SQLException: {}", e.getMessage(), e); return null; } } @Override public void close() { super.close(); logger.info("close {}.", this.getClass().getSimpleName()); kerberosRenewalService.shutdown(); this.ugi = null; this.oneSessionDataSource.close(); this.oneSessionDataSource = null; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxTask.java
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.datax; import static org.apache.dolphinscheduler.plugin.datasource.api.utils.PasswordUtils.decodePassword; import static org.apache.dolphinscheduler.plugin.task.api.TaskConstants.EXIT_CODE_FAILURE; import static org.apache.dolphinscheduler.plugin.task.api.TaskConstants.RWXR_XR_X; import org.apache.dolphinscheduler.plugin.datasource.api.plugin.DataSourceClientProvider; import org.apache.dolphinscheduler.plugin.datasource.api.utils.DataSourceUtils; import org.apache.dolphinscheduler.plugin.task.api.AbstractTaskExecutor; import org.apache.dolphinscheduler.plugin.task.api.ShellCommandExecutor; import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContext; import org.apache.dolphinscheduler.plugin.task.api.model.Property; import org.apache.dolphinscheduler.plugin.task.api.model.TaskResponse; import org.apache.dolphinscheduler.plugin.task.api.parameters.AbstractParameters; import org.apache.dolphinscheduler.plugin.task.api.parser.ParamUtils; import org.apache.dolphinscheduler.plugin.task.api.parser.ParameterUtils; import org.apache.dolphinscheduler.plugin.task.api.utils.MapUtils; import org.apache.dolphinscheduler.plugin.task.api.utils.OSUtils; import org.apache.dolphinscheduler.spi.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.dolphinscheduler.spi.enums.Flag; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import org.apache.dolphinscheduler.spi.utils.StringUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.FileUtils; import java.io.File; import java.nio.charset.StandardCharsets; import java.nio.file.Files;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxTask.java
import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardOpenOption; import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.alibaba.druid.sql.ast.SQLStatement; import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr; import com.alibaba.druid.sql.ast.statement.SQLSelect; import com.alibaba.druid.sql.ast.statement.SQLSelectItem; import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock; import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; import com.alibaba.druid.sql.ast.statement.SQLUnionQuery; import com.alibaba.druid.sql.parser.SQLStatementParser; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; public class DataxTask extends AbstractTaskExecutor {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxTask.java
/** * jvm parameters */ public static final String JVM_PARAM = " --jvm=\"-Xms%sG -Xmx%sG\" "; /** * python process(datax only supports version 2.7 by default) */ private static final String DATAX_PYTHON = "python2.7"; private static final Pattern PYTHON_PATH_PATTERN = Pattern.compile("/bin/python[\\d.]*$"); /** * datax path */ private static final String DATAX_PATH = "${DATAX_HOME}/bin/datax.py"; /** * datax channel count */ private static final int DATAX_CHANNEL_COUNT = 1; /** * datax parameters */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxTask.java
private DataxParameters dataXParameters; /** * shell command executor */ private ShellCommandExecutor shellCommandExecutor; /** * taskExecutionContext */ private TaskExecutionContext taskExecutionContext; private DataxTaskExecutionContext dataxTaskExecutionContext; /** * constructor * * @param taskExecutionContext taskExecutionContext */ public DataxTask(TaskExecutionContext taskExecutionContext) { super(taskExecutionContext); this.taskExecutionContext = taskExecutionContext; this.shellCommandExecutor = new ShellCommandExecutor(this::logHandle, taskExecutionContext, logger); } /** * init DataX config */ @Override public void init() { logger.info("datax task params {}", taskExecutionContext.getTaskParams()); dataXParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), DataxParameters.class); if (!dataXParameters.checkParameters()) { throw new RuntimeException("datax task params is not valid");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
9,352
[Bug] [datasource-api] Hive datasource connection failed when kerberos renew ticket lifetime expire
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened in krb5.conf file, >>> ticket_lifetime = 24h renew_lifetime = 7d >>> when after renew lifetime expired, connecting to the hive data source failed ,because the cached hive data source client uses the old ticket ,it cannot get the new ticket error log ``` [WARN] 2022-04-01 16:59:53.260 org.apache.hive.jdbc.HiveConnection:[237] - Failed to connect to **.**.**.**:**** [ERROR] 2022-04-01 16:59:56.127 org.apache.thrift.transport.TSaslTransport:[315] - SASL negotiation failure javax.security.sasl.SaslException: GSS initiate failed at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:211) at org.apache.thrift.transport.TSaslClientTransport.handleSaslStartMessage(TSaslClientTransport.java:94) at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271) at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:51) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport$1.run(TUGIAssumingTransport.java:48) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) at org.apache.hadoop.hive.metastore.security.TUGIAssumingTransport.open(TUGIAssumingTransport.java:48) at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:343) at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:228) at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107) at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:364) at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:476) at com.zaxxer.hikari.pool.HikariPool.access$100(HikariPool.java:71) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:726) at com.zaxxer.hikari.pool.HikariPool$PoolEntryCreator.call(HikariPool.java:712) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: org.ietf.jgss.GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt) at sun.security.jgss.krb5.Krb5InitCredential.getInstance(Krb5InitCredential.java:162) at sun.security.jgss.krb5.Krb5MechFactory.getCredentialElement(Krb5MechFactory.java:122) at sun.security.jgss.krb5.Krb5MechFactory.getMechanismContext(Krb5MechFactory.java:189) at sun.security.jgss.GSSManagerImpl.getMechanismContext(GSSManagerImpl.java:224) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:212) at sun.security.jgss.GSSContextImpl.initSecContext(GSSContextImpl.java:179) at com.sun.security.sasl.gsskerb.GssKrb5Client.evaluateChallenge(GssKrb5Client.java:192) ... 23 common frames omitted ``` ### What you expected to happen I think the data source client cache should have an expiration time, for example, use the Guava Cache instead of CurrentHashMap. ### How to reproduce 1. in krb5.conf file, change config renew life to short time, e.g 1h, then restart KDC 2. restart api server 3. check hive connection, now it's a success 4. after renew life expired, check hive connection, now it's a failed ### Anything else _No response_ ### Version 2.0.5 ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/9352
https://github.com/apache/dolphinscheduler/pull/9353
70dae6945f126414e36ffc28c99e1bf6e2ac7428
206b7c1c51e84f2e937748bc94191b23f95298e9
2022-04-05T09:23:28Z
java
2022-04-27T08:56:24Z
dolphinscheduler-task-plugin/dolphinscheduler-task-datax/src/main/java/org/apache/dolphinscheduler/plugin/task/datax/DataxTask.java
} dataxTaskExecutionContext = dataXParameters.generateExtendedContext(taskExecutionContext.getResourceParametersHelper()); } /** * run DataX process * * @throws Exception if error throws Exception */ @Override public void handle() throws Exception { try { Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext, getParameters()); if (MapUtils.isEmpty(paramsMap)) { paramsMap = new HashMap<>(); } if (MapUtils.isNotEmpty(taskExecutionContext.getParamsMap())) { paramsMap.putAll(taskExecutionContext.getParamsMap()); } String jsonFilePath = buildDataxJsonFile(paramsMap); String shellCommandFilePath = buildShellCommandFile(jsonFilePath, paramsMap); TaskResponse commandExecuteResult = shellCommandExecutor.run(shellCommandFilePath); setExitStatusCode(commandExecuteResult.getExitStatusCode()); setAppIds(commandExecuteResult.getAppIds()); setProcessId(commandExecuteResult.getProcessId()); } catch (Exception e) { setExitStatusCode(EXIT_CODE_FAILURE); throw e; }