status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param argTypes argument types * @param database data base * @param description description * @param resourceId resource id * @param className class name * @param udfFuncId udf function id * @return update result code */ @ApiOperation(value = "updateUdfFunc", notes = "UPDATE_UDF_FUNCTION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"), @ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"), @ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"), @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") }) @PostMapping(value = "/udf-func/update") @ApiException(UPDATE_UDF_FUNCTION_ERROR) public Result updateUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int udfFuncId, @RequestParam(value = "type") UdfType type, @RequestParam(value = "funcName") String funcName, @RequestParam(value = "className") String className, @RequestParam(value = "argTypes", required = false) String argTypes, @RequestParam(value = "database", required = false) String database, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "resourceId") int resourceId) { logger.info("login user {}, updateProcessInstance udf function id: {},type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
loginUser.getUserName(), udfFuncId, type, funcName, argTypes, database, description, resourceId); Map<String, Object> result = udfFuncService.updateUdfFunc(udfFuncId, funcName, className, argTypes, database, description, type, resourceId); return returnDataList(result); } /** * query udf function list paging * * @param loginUser login user * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return udf function list page */ @ApiOperation(value = "queryUdfFuncListPaging", notes = "QUERY_UDF_FUNCTION_LIST_PAGING_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20") }) @GetMapping(value = "/udf-func/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_UDF_FUNCTION_LIST_PAGING_ERROR) public Result<Object> queryUdfFuncListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("pageNo") Integer pageNo, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageSize") Integer pageSize ) { logger.info("query udf functions list, login user:{},search value:{}", loginUser.getUserName(), searchVal); Map<String, Object> result = checkPageParams(pageNo, pageSize);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataListPaging(result); } result = udfFuncService.queryUdfFuncListPaging(loginUser, searchVal, pageNo, pageSize); return returnDataListPaging(result); } /** * query udf func list by type * * @param loginUser login user * @param type resource type * @return resource list */ @ApiOperation(value = "queryUdfFuncList", notes = "QUERY_UDF_FUNC_LIST_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType") }) @GetMapping(value = "/udf-func/list") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATASOURCE_BY_TYPE_ERROR) public Result<Object> queryUdfFuncList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("type") UdfType type) { String userName = loginUser.getUserName(); userName = userName.replaceAll("[\n|\r|\t]", "_"); logger.info("query udf func list, user:{}, type:{}", userName, type); Map<String, Object> result = udfFuncService.queryUdfFuncList(loginUser, type.ordinal()); return returnDataList(result); } /** * verify udf function name can use or not
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* * @param loginUser login user * @param name name * @return true if the name can user, otherwise return false */ @ApiOperation(value = "verifyUdfFuncName", notes = "VERIFY_UDF_FUNCTION_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "FUNC_NAME", required = true, dataType = "String") }) @GetMapping(value = "/udf-func/verify-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_UDF_FUNCTION_NAME_ERROR) public Result verifyUdfFuncName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "name") String name ) { logger.info("login user {}, verfiy udf function name: {}", loginUser.getUserName(), name); return udfFuncService.verifyUdfFuncByName(name); } /** * delete udf function * * @param loginUser login user * @param udfFuncId udf function id * @return delete result code */ @ApiOperation(value = "deleteUdfFunc", notes = "DELETE_UDF_FUNCTION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
@GetMapping(value = "/udf-func/delete") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_UDF_FUNCTION_ERROR) public Result deleteUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int udfFuncId ) { logger.info("login user {}, delete udf function id: {}", loginUser.getUserName(), udfFuncId); return udfFuncService.delete(udfFuncId); } /** * authorized file resource list * * @param loginUser login user * @param userId user id * @return authorized result */ @ApiOperation(value = "authorizedFile", notes = "AUTHORIZED_FILE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/authed-file") @ResponseStatus(HttpStatus.CREATED) @ApiException(AUTHORIZED_FILE_RESOURCE_ERROR) public Result authorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { logger.info("authorized file resource, user: {}, user id:{}", loginUser.getUserName(), userId); Map<String, Object> result = resourceService.authorizedFile(loginUser, userId); return returnDataList(result); } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* unauthorized file resource list * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @ApiOperation(value = "authorizeResourceTree", notes = "AUTHORIZE_RESOURCE_TREE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/authorize-resource-tree") @ResponseStatus(HttpStatus.CREATED) @ApiException(AUTHORIZE_RESOURCE_TREE) public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId); Map<String, Object> result = resourceService.authorizeResourceTree(loginUser, userId); return returnDataList(result); } /** * unauthorized udf function * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @ApiOperation(value = "unauthUDFFunc", notes = "UNAUTHORIZED_UDF_FUNC_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100") })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
@GetMapping(value = "/unauth-udf-func") @ResponseStatus(HttpStatus.CREATED) @ApiException(UNAUTHORIZED_UDF_FUNCTION_ERROR) public Result unauthUDFFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { logger.info("unauthorized udf function, login user:{}, unauthorized user id:{}", loginUser.getUserName(), userId); Map<String, Object> result = resourceService.unauthorizedUDFFunction(loginUser, userId); return returnDataList(result); } /** * authorized udf function * * @param loginUser login user * @param userId user id * @return authorized result code */ @ApiOperation(value = "authUDFFunc", notes = "AUTHORIZED_UDF_FUNC_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/authed-udf-func") @ResponseStatus(HttpStatus.CREATED) @ApiException(AUTHORIZED_UDF_FUNCTION_ERROR) public Result authorizedUDFFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { logger.info("auth udf function, login user:{}, auth user id:{}", loginUser.getUserName(), userId); Map<String, Object> result = resourceService.authorizedUDFFunction(loginUser, userId); return returnDataList(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.dto.resources.filter; import org.apache.dolphinscheduler.dao.entity.Resource; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; /** * resource filter */ public class ResourceFilter implements IFilter {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
/** * resource suffix */ private String suffix;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
/** * resource list */ private List<Resource> resourceList; /** * parent list */ /** * constructor * @param suffix resource suffix * @param resourceList resource list */ public ResourceFilter(String suffix, List<Resource> resourceList) { this.suffix = suffix; this.resourceList = resourceList; } /** * file filter * @return file filtered by suffix */ public Set<Resource> fileFilter(){ Set<Resource> resources = resourceList.stream().filter(t -> { String alias = t.getAlias(); return alias.endsWith(suffix); }).collect(Collectors.toSet()); return resources; } /** * list all parent dir
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
* @return parent resource dir set */ Set<Resource> listAllParent(){ Set<Resource> parentList = new HashSet<>(); Set<Resource> filterFileList = fileFilter(); for(Resource file:filterFileList){ parentList.add(file); setAllParent(file,parentList); } return parentList; } /** * list all parent dir * @param resource resource * @return parent resource dir set */ private void setAllParent(Resource resource,Set<Resource> parentList){ for (Resource resourceTemp : resourceList) { if (resourceTemp.getId() == resource.getPid()) { parentList.add(resourceTemp); setAllParent(resourceTemp,parentList); } } } @Override public List<Resource> filter() { return new ArrayList<>(listAllParent()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.serializer.SerializerFeature; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import org.apache.commons.collections.BeanMap; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.utils.*; import org.apache.dolphinscheduler.dao.entity.*; import org.apache.dolphinscheduler.dao.mapper.*; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import java.io.IOException; import java.text.MessageFormat; import java.util.*; import java.util.regex.Matcher; import java.util.stream.Collectors; import static org.apache.dolphinscheduler.common.Constants.*; /** * resources service */ @Service public class ResourcesService extends BaseService {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class); @Autowired private ResourceMapper resourcesMapper; @Autowired private UdfFuncMapper udfFunctionMapper; @Autowired private TenantMapper tenantMapper; @Autowired private UserMapper userMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; /** * create directory * * @param loginUser login user * @param name alias * @param description description * @param type type * @param pid parent id * @param currentDir current directory * @return create directory result */ @Transactional(rollbackFor = Exception.class) public Result createDirectory(User loginUser, String name, String description, ResourceType type, int pid,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
String currentDir) { Result result = new Result(); if (!PropertyUtils.getResUploadStartupState()){ logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name); if (pid != -1) { Resource parentResource = resourcesMapper.selectById(pid); if (parentResource == null) { putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, parentResource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } } if (checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource directory {} has exist, can't recreate", fullName); putMsg(result, Status.RESOURCE_EXIST); return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<String, Object>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new RuntimeException("resource already exists, can't recreate"); } createDirecotry(loginUser,fullName,type,result); return result; } /** * create resource * * @param loginUser login user * @param name alias * @param desc description * @param file file * @param type type * @param pid parent id * @param currentDir current directory * @return create result code */ @Transactional(rollbackFor = Exception.class) public Result createResource(User loginUser,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
String name, String desc, ResourceType type, MultipartFile file, int pid, String currentDir) { Result result = new Result(); if (!PropertyUtils.getResUploadStartupState()){ logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } if (pid != -1) { Resource parentResource = resourcesMapper.selectById(pid); if (parentResource == null) { putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, parentResource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } } if (file.isEmpty()) { logger.error("file is empty: {}", file.getOriginalFilename()); putMsg(result, Status.RESOURCE_FILE_IS_EMPTY); return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
String fileSuffix = FileUtils.suffix(file.getOriginalFilename()); String nameSuffix = FileUtils.suffix(name); if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { /** * rename file suffix and original suffix must be consistent */ logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename()); putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE); return result; } if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) { logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg()); putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR); return result; } if (file.getSize() > Constants.MAX_FILE_SIZE) { logger.error("file size is too large: {}", file.getOriginalFilename()); putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT); return result; } String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name); if (checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource {} has exist, can't recreate", name); putMsg(result, Status.RESOURCE_EXIST); return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new RuntimeException("resource already exists, can't recreate"); } if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename()); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } return result; } /** * check resource is exists * * @param fullName fullName * @param userId user id
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
* @param type type * @return true if resource exists */ private boolean checkResourceExists(String fullName, int userId, int type ){ List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type); if (resources != null && resources.size() > 0) { return true; } return false; } /** * update resource * @param loginUser login user * @param resourceId resource id * @param name name * @param desc description * @param type resource type * @param file resource file * @return update result code */ @Transactional(rollbackFor = Exception.class) public Result updateResource(User loginUser, int resourceId, String name, String desc, ResourceType type, MultipartFile file) { Result result = new Result(); if (!PropertyUtils.getResUploadStartupState()){
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) { putMsg(result, Status.SUCCESS); return result; } String originFullName = resource.getFullName(); String originResourceName = resource.getAlias(); String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name); if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource {} already exists, can't recreate", name); putMsg(result, Status.RESOURCE_EXIST); return result; } if (file != null) { if (file.isEmpty()) { logger.error("file is empty: {}", file.getOriginalFilename());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY); return result; } String fileSuffix = FileUtils.suffix(file.getOriginalFilename()); String nameSuffix = FileUtils.suffix(name); if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { /** * rename file suffix and original suffix must be consistent */ logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename()); putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE); return result; } if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(FileUtils.suffix(originFullName))) { logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg()); putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR); return result; } if (file.getSize() > Constants.MAX_FILE_SIZE) { logger.error("file size is too large: {}", file.getOriginalFilename()); putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)){
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
return result; } String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName); try { if (!HadoopUtils.getInstance().exists(originHdfsFileName)) { logger.error("{} not exist", originHdfsFileName); putMsg(result,Status.RESOURCE_NOT_EXIST); return result; } } catch (IOException e) { logger.error(e.getMessage(),e); throw new ServiceException(Status.HDFS_OPERATION_ERROR); } if (!resource.isDirectory()) { String originSuffix = FileUtils.suffix(originFullName); String suffix = FileUtils.suffix(fullName); boolean suffixIsChanged = false; if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) { suffixIsChanged = true; } if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) { suffixIsChanged = true; } if (suffixIsChanged) { Map<String, Object> columnMap = new HashMap<>();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
columnMap.put("resources_id", resourceId); List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap); if (CollectionUtils.isNotEmpty(resourcesUsers)) { List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList()); List<User> users = userMapper.selectBatchIds(userIds); String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString(); logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames); putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames); return result; } } } Date now = new Date(); resource.setAlias(name); resource.setFullName(fullName); resource.setDescription(desc); resource.setUpdateTime(now); if (file != null) { resource.setFileName(file.getOriginalFilename()); resource.setSize(file.getSize()); } try { resourcesMapper.updateById(resource); if (resource.isDirectory()) { List<Integer> childrenResource = listAllChildren(resource,false); if (CollectionUtils.isNotEmpty(childrenResource)) { String matcherFullName = Matcher.quoteReplacement(fullName); List<Resource> childResourceList = new ArrayList<>(); Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray); childResourceList = resourceList.stream().map(t -> { t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); resourcesMapper.batchUpdateResource(childResourceList); if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } } } } else if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId}); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(fullName); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(5); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e); throw new ServiceException(Status.UPDATE_RESOURCE_ERROR); } if (originResourceName.equals(name) && file == null) { return result; } if (file != null) { if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename()); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } if (!fullName.equals(originFullName)) { try { HadoopUtils.getInstance().delete(originHdfsFileName,false); } catch (IOException e) { logger.error(e.getMessage(),e); throw new RuntimeException(String.format("delete resource: %s failed.", originFullName));
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
} } return result; } String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName); try { logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName); HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true); } catch (Exception e) { logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e); putMsg(result,Status.HDFS_COPY_FAIL); throw new ServiceException(Status.HDFS_COPY_FAIL); } return result; } /** * query resources list paging * * @param loginUser login user * @param type resource type * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return resource list page */ public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) { HashMap<String, Object> result = new HashMap<>(5); Page<Resource> page = new Page(pageNo, pageSize); int userId = loginUser.getId();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
if (isAdmin(loginUser)) { userId= 0; } if (direcotryId != -1) { Resource directory = resourcesMapper.selectById(direcotryId); if (directory == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } } IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page, userId,direcotryId, type.ordinal(), searchVal); PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize); pageInfo.setTotalCount((int)resourceIPage.getTotal()); pageInfo.setLists(resourceIPage.getRecords()); result.put(Constants.DATA_LIST, pageInfo); putMsg(result,Status.SUCCESS); return result; } /** * create direcoty * @param loginUser login user * @param fullName full name * @param type resource type * @param result Result */ private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){ String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode); try { if (!HadoopUtils.getInstance().exists(resourceRootPath)) { createTenantDirIfNotExists(tenantCode); } if (!HadoopUtils.getInstance().mkdir(directoryName)) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName)); } } catch (Exception e) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName)); } } /** * upload file to hdfs * * @param loginUser login user * @param fullName full name * @param file file */ private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) { String fileSuffix = FileUtils.suffix(file.getOriginalFilename()); String nameSuffix = FileUtils.suffix(fullName); if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { return false;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
} String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode); try { if (!HadoopUtils.getInstance().exists(resourcePath)) { createTenantDirIfNotExists(tenantCode); } org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename); HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true); } catch (Exception e) { logger.error(e.getMessage(), e); return false; } return true; } /** * query resource list * * @param loginUser login user * @param type resource type * @return resource list */ public Map<String, Object> queryResourceList(User loginUser, ResourceType type) { Map<String, Object> result = new HashMap<>(5);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
int userId = loginUser.getId(); if(isAdmin(loginUser)){ userId = 0; } List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0); Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result,Status.SUCCESS); return result; } /** * query resource list * * @param loginUser login user * @param type resource type * @return resource list */ public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) { Map<String, Object> result = new HashMap<>(5); int userId = loginUser.getId(); if(isAdmin(loginUser)){ userId = 0; } List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0); List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter(); Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result,Status.SUCCESS); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
} /** * delete resource * * @param loginUser login user * @param resourceId resource id * @return delete result code * @throws Exception exception */ @Transactional(rollbackFor = Exception.class) public Result delete(User loginUser, int resourceId) throws Exception { Result result = new Result(); if (!PropertyUtils.getResUploadStartupState()){ logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("resource file not exist, resource id {}", resourceId); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } String tenantCode = getTenantCode(resource.getUserId(),result);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
if (StringUtils.isEmpty(tenantCode)){ return result; } List<Map<String, Object>> list = processDefinitionMapper.listResources(); Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); List<Integer> allChildren = listAllChildren(resource,true); Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]); if (resource.getType() == (ResourceType.UDF)) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString()); putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName()); return result; } } if (resourceIdSet.contains(resource.getPid())) { logger.error("can't be deleted,because it is used of process definition"); putMsg(result, Status.RESOURCE_IS_USED); return result; } resourceIdSet.retainAll(allChildren); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId)); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
putMsg(result, Status.RESOURCE_IS_USED); return result; } String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); resourcesMapper.deleteIds(needDeleteResourceIdArray); resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray); HadoopUtils.getInstance().delete(hdfsFilename, true); putMsg(result, Status.SUCCESS); return result; } /** * verify resource by name and type * @param loginUser login user * @param fullName resource full name * @param type resource type * @return true if the resource name not exists, otherwise return false */ public Result verifyResourceName(String fullName, ResourceType type,User loginUser) { Result result = new Result(); putMsg(result, Status.SUCCESS); if (checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName); putMsg(result, Status.RESOURCE_EXIST); } else { Tenant tenant = tenantMapper.queryById(loginUser.getTenantId()); if(tenant != null){
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
String tenantCode = tenant.getTenantCode(); try { String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); if(HadoopUtils.getInstance().exists(hdfsFilename)){ logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename); putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename); } } catch (Exception e) { logger.error(e.getMessage(),e); putMsg(result,Status.HDFS_OPERATION_ERROR); } }else{ putMsg(result,Status.TENANT_NOT_EXIST); } } return result; } /** * verify resource by full name or pid and type * @param fullName resource full name * @param id resource id * @param type resource type * @return true if the resource full name or pid not exists, otherwise return false */ public Result queryResource(String fullName,Integer id,ResourceType type) { Result result = new Result(); if (StringUtils.isBlank(fullName) && id == null) { logger.error("You must input one of fullName and pid"); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
} if (StringUtils.isNotBlank(fullName)) { List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { logger.error("resource file not exist, resource full name {} ", fullName); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(resourceList.get(0)); } else { Resource resource = resourcesMapper.selectById(id); if (resource == null) { logger.error("resource file not exist, resource id {}", id); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Resource parentResource = resourcesMapper.selectById(resource.getPid()); if (parentResource == null) { logger.error("parent resource file not exist, resource id {}", id); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(parentResource); } return result; } /** * view resource file online
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
* * @param resourceId resource id * @param skipLineNum skip line number * @param limit limit * @return resource content */ public Result readResource(int resourceId, int skipLineNum, int limit) { Result result = new Result(); if (!PropertyUtils.getResUploadStartupState()){ logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("resource file not exist, resource id {}", resourceId); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } String nameSuffix = FileUtils.suffix(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
} } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)){ return result; } String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName()); logger.info("resource hdfs path is {} ", hdfsFileName); try { if(HadoopUtils.getInstance().exists(hdfsFileName)){ List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit); putMsg(result, Status.SUCCESS); Map<String, Object> map = new HashMap<>(); map.put(ALIAS, resource.getAlias()); map.put(CONTENT, String.join("\n", content)); result.setData(map); }else{ logger.error("read file {} not exist in hdfs", hdfsFileName); putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName); } } catch (Exception e) { logger.error("Resource {} read failed", hdfsFileName, e); putMsg(result, Status.HDFS_OPERATION_ERROR); } return result; } /** * create resource file online *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
* @param loginUser login user * @param type resource type * @param fileName file name * @param fileSuffix file suffix * @param desc description * @param content content * @return create result code */ @Transactional(rollbackFor = Exception.class) public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) { Result result = new Result(); if (!PropertyUtils.getResUploadStartupState()){ logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } String nameSuffix = fileSuffix.trim(); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resouce suffix {} not support create", nameSuffix); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String name = fileName.trim() + "." + nameSuffix; String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
result = verifyResourceName(fullName,type,loginUser); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now); resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); result = uploadContentToHdfs(fullName, tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new RuntimeException(result.getMsg()); } return result; } /** * updateProcessInstance resource * * @param resourceId resource id * @param content content * @return update result cod
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
*/ @Transactional(rollbackFor = Exception.class) public Result updateResourceContent(int resourceId, String content) { Result result = new Result(); if (!PropertyUtils.getResUploadStartupState()){ logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("read file not exist, resource id {}", resourceId); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } String nameSuffix = FileUtils.suffix(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)){ return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
} resource.setSize(content.getBytes().length); resource.setUpdateTime(new Date()); resourcesMapper.updateById(resource); result = uploadContentToHdfs(resource.getFullName(), tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new RuntimeException(result.getMsg()); } return result; } /** * @param resourceName resource name * @param tenantCode tenant code * @param content content * @return result */ private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) { Result result = new Result(); String localFilename = ""; String hdfsFileName = ""; try { localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); if (!FileUtils.writeContent2File(content, localFilename)) { logger.error("file {} fail, content is {}", localFilename, content); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode); logger.info("resource hdfs path is {} ", hdfsFileName); HadoopUtils hadoopUtils = HadoopUtils.getInstance(); if (!hadoopUtils.exists(resourcePath)) { createTenantDirIfNotExists(tenantCode); } if (hadoopUtils.exists(hdfsFileName)) { hadoopUtils.delete(hdfsFileName, false); } hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true); } catch (Exception e) { logger.error(e.getMessage(), e); result.setCode(Status.HDFS_OPERATION_ERROR.getCode()); result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName)); return result; } putMsg(result, Status.SUCCESS); return result; } /** * download file * * @param resourceId resource id * @return resource content * @throws Exception exception */ public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception { if (!PropertyUtils.getResUploadStartupState()){
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); throw new RuntimeException("hdfs not startup"); } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("download file not exist, resource id {}", resourceId); return null; } if (resource.isDirectory()) { logger.error("resource id {} is directory,can't download it", resourceId); throw new RuntimeException("cant't download directory"); } int userId = resource.getUserId(); User user = userMapper.selectById(userId); if(user == null){ logger.error("user id {} not exists", userId); throw new RuntimeException(String.format("resource owner id %d not exist",userId)); } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if(tenant == null){ logger.error("tenant id {} not exists", user.getTenantId()); throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId())); } String tenantCode = tenant.getTenantCode(); String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); String localFileName = FileUtils.getDownloadFilename(resource.getAlias()); logger.info("resource hdfs path is {} ", hdfsFileName); HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true); return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
/** * list all file * * @param loginUser login user * @param userId user id * @return unauthorized result code */ public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (checkAdmin(loginUser, result)) { return result; } List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId); List<ResourceComponent> list ; if (CollectionUtils.isNotEmpty(resourceList)) { Visitor visitor = new ResourceTreeVisitor(resourceList); list = visitor.visit().getChildren(); }else { list = new ArrayList<>(0); } result.put(Constants.DATA_LIST, list); putMsg(result,Status.SUCCESS); return result; } /** * unauthorized file * * @param loginUser login user * @param userId user id * @return unauthorized result code
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
*/ public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (checkAdmin(loginUser, result)) { return result; } List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId); List<Resource> list ; if (resourceList != null && resourceList.size() > 0) { Set<Resource> resourceSet = new HashSet<>(resourceList); List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId); getAuthorizedResourceList(resourceSet, authedResourceList); list = new ArrayList<>(resourceSet); }else { list = new ArrayList<>(0); } Visitor visitor = new ResourceTreeVisitor(list); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result,Status.SUCCESS); return result; } /** * unauthorized udf function * * @param loginUser login user * @param userId user id * @return unauthorized result code */ public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(5);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
if (checkAdmin(loginUser, result)) { return result; } List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId); List<UdfFunc> resultList = new ArrayList<>(); Set<UdfFunc> udfFuncSet = null; if (CollectionUtils.isNotEmpty(udfFuncList)) { udfFuncSet = new HashSet<>(udfFuncList); List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId); getAuthorizedResourceList(udfFuncSet, authedUDFFuncList); resultList = new ArrayList<>(udfFuncSet); } result.put(Constants.DATA_LIST, resultList); putMsg(result,Status.SUCCESS); return result; } /** * authorized udf function * * @param loginUser login user * @param userId user id * @return authorized result code */ public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (checkAdmin(loginUser, result)) { return result; } List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
result.put(Constants.DATA_LIST, udfFuncs); putMsg(result,Status.SUCCESS); return result; } /** * authorized file * * @param loginUser login user * @param userId user id * @return authorized result */ public Map<String, Object> authorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(5); if (checkAdmin(loginUser, result)){ return result; } List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId); Visitor visitor = new ResourceTreeVisitor(authedResources); logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField)); String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField); logger.info(jsonTreeStr); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result,Status.SUCCESS); return result; } /** * get authorized resource list * * @param resourceSet resource set * @param authedResourceList authorized resource list
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
*/ private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) { Set<?> authedResourceSet = null; if (CollectionUtils.isNotEmpty(authedResourceList)) { authedResourceSet = new HashSet<>(authedResourceList); resourceSet.removeAll(authedResourceSet); } } /** * get tenantCode by UserId * * @param userId user id * @param result return result * @return */ private String getTenantCode(int userId,Result result){ User user = userMapper.selectById(userId); if (user == null) { logger.error("user {} not exists", userId); putMsg(result, Status.USER_NOT_EXIST,userId); return null; } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (tenant == null){ logger.error("tenant not exists"); putMsg(result, Status.TENANT_NOT_EXIST); return null; } return tenant.getTenantCode(); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
/** * list all children id * @param resource resource * @param containSelf whether add self to children list * @return all children id */ List<Integer> listAllChildren(Resource resource,boolean containSelf){ List<Integer> childList = new ArrayList<>(); if (resource.getId() != -1 && containSelf) { childList.add(resource.getId()); } if(resource.isDirectory()){ listAllChildren(resource.getId(),childList); } return childList; } /** * list all children id * @param resourceId resource id * @param childList child list */ void listAllChildren(int resourceId,List<Integer> childList){ List<Integer> children = resourcesMapper.listChildren(resourceId); for(int chlidId:children){ childList.add(chlidId); listAllChildren(chlidId,childList); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import com.alibaba.fastjson.JSON; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.UdfType; import org.apache.dolphinscheduler.common.utils.JSONUtils;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
import com.alibaba.fastjson.JSONObject; import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MvcResult; import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; /** * resources controller test */ public class ResourcesControllerTest extends AbstractControllerTest{ private static Logger logger = LoggerFactory.getLogger(ResourcesControllerTest.class); @Test public void testQuerytResourceList() throws Exception { MvcResult mvcResult = mockMvc.perform(get("/resources/list") .header(SESSION_ID, sessionId) .param("type", ResourceType.FILE.name())) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testQueryResourceListPaging() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("type", String.valueOf(ResourceType.FILE)); paramsMap.add("pageNo", "1"); paramsMap.add("searchVal", "test"); paramsMap.add("pageSize", "1"); MvcResult mvcResult = mockMvc.perform(get("/resources/list-paging") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testVerifyResourceName() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("name","list_resources_1.sh"); paramsMap.add("type","FILE"); MvcResult mvcResult = mockMvc.perform(get("/resources/verify-name") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk())
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testViewResource() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id","5"); paramsMap.add("skipLineNum","2"); paramsMap.add("limit","100"); MvcResult mvcResult = mockMvc.perform(get("/resources/view") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testOnlineCreateResource() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("type", String.valueOf(ResourceType.FILE)); paramsMap.add("fileName","test_file_1"); paramsMap.add("suffix","sh"); paramsMap.add("description","test"); paramsMap.add("content","echo 1111");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
MvcResult mvcResult = mockMvc.perform(post("/resources/online-create") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testUpdateResourceContent() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "1"); paramsMap.add("content","echo test_1111"); MvcResult mvcResult = mockMvc.perform(post("/resources/update-content") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testDownloadResource() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "5"); MvcResult mvcResult = mockMvc.perform(get("/resources/download")
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
.header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testCreateUdfFunc() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("type", String.valueOf(UdfType.HIVE)); paramsMap.add("funcName", "test_udf"); paramsMap.add("className", "com.test.word.contWord"); paramsMap.add("argTypes", "argTypes"); paramsMap.add("database", "database"); paramsMap.add("description", "description"); paramsMap.add("resourceId", "1"); MvcResult mvcResult = mockMvc.perform(post("/resources/udf-func/create") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isCreated()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
public void testViewUIUdfFunction() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "1"); MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/update-ui") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testUpdateUdfFunc() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "1"); paramsMap.add("type", String.valueOf(UdfType.HIVE)); paramsMap.add("funcName", "update_duf"); paramsMap.add("className", "com.test.word.contWord"); paramsMap.add("argTypes", "argTypes"); paramsMap.add("database", "database"); paramsMap.add("description", "description"); paramsMap.add("resourceId", "1"); MvcResult mvcResult = mockMvc.perform(post("/resources/udf-func/update") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testQueryUdfFuncList() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("pageNo", "1"); paramsMap.add("searchVal", "udf"); paramsMap.add("pageSize", "1"); MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/list-paging") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testQueryResourceList() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("type", String.valueOf(UdfType.HIVE)); MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/list") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk())
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testVerifyUdfFuncName() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("name", "test"); MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/verify-name") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testAuthorizedFile() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "2"); MvcResult mvcResult = mockMvc.perform(get("/resources/authed-file") .header(SESSION_ID, sessionId)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
.params(paramsMap)) .andExpect(status().isCreated()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testUnauthorizedFile() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "2"); MvcResult mvcResult = mockMvc.perform(get("/resources/unauth-file") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isCreated()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testAuthorizedUDFFunction() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "2");
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
MvcResult mvcResult = mockMvc.perform(get("/resources/authed-udf-func") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isCreated()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testUnauthUDFFunc() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("userId", "2"); MvcResult mvcResult = mockMvc.perform(get("/resources/unauth-udf-func") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isCreated()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testDeleteUdfFunc() throws Exception {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "1"); MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/delete") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testDeleteResource() throws Exception { MvcResult mvcResult = mockMvc.perform(get("/resources/delete") .header(SESSION_ID, sessionId) .param("id", "2")) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); result.getCode().equals(Status.SUCCESS.getCode()); JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString()); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.dto.resources.filter; import org.apache.dolphinscheduler.dao.entity.Resource; import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.List; /** * resource filter test */ public class ResourceFilterTest {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
private static Logger logger = LoggerFactory.getLogger(ResourceFilterTest.class); @Test public void filterTest(){ List<Resource> allList = new ArrayList<>(); Resource resource1 = new Resource(3,-1,"b","/b",true); Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false); Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false); Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false); Resource resource5 = new Resource(7,-1,"b2","/b2",true); Resource resource6 = new Resource(8,-1,"b2","/b/b2",true); Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false); allList.add(resource1); allList.add(resource2); allList.add(resource3); allList.add(resource4); allList.add(resource5); allList.add(resource6); allList.add(resource7); ResourceFilter resourceFilter = new ResourceFilter(".jar",allList); List<Resource> resourceList = resourceFilter.filter(); Assert.assertNotNull(resourceList); resourceList.stream().forEach(t-> logger.info(t.toString())); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
* http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.upgrade; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.utils.*; import org.apache.dolphinscheduler.dao.AbstractBaseDao; import org.apache.dolphinscheduler.dao.datasource.ConnectionFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.sql.DataSource; import java.io.*; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.text.MessageFormat; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; public abstract class UpgradeDao extends AbstractBaseDao {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class); private static final String T_VERSION_NAME = "t_escheduler_version"; private static final String T_NEW_VERSION_NAME = "t_ds_version"; private static final String rootDir = System.getProperty("user.dir"); protected static final DataSource dataSource = getDataSource(); private static final DbType dbType = getCurrentDbType(); @Override protected void init() { } /** * get datasource * @return DruidDataSource */ public static DataSource getDataSource(){ return ConnectionFactory.getInstance().getDataSource(); } /** * get db type * @return dbType */ public static DbType getDbType(){ return dbType; } /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
* get current dbType * @return */ private static DbType getCurrentDbType(){ Connection conn = null; try { conn = dataSource.getConnection(); String name = conn.getMetaData().getDatabaseProductName().toUpperCase(); return DbType.valueOf(name); } catch (Exception e) { logger.error(e.getMessage(),e); return null; }finally { ConnectionUtils.releaseResource(conn); } } /** * init schema */ public void initSchema(){ DbType dbType = getDbType(); String initSqlPath = ""; if (dbType != null) { switch (dbType) { case MYSQL: initSqlPath = "/sql/create/release-1.0.0_schema/mysql/"; initSchema(initSqlPath); break; case POSTGRESQL: initSqlPath = "/sql/create/release-1.2.0_schema/postgresql/";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
initSchema(initSqlPath); break; default: logger.error("not support sql type: {},can't upgrade", dbType); throw new IllegalArgumentException("not support sql type,can't upgrade"); } } } /** * init scheam * @param initSqlPath initSqlPath */ public void initSchema(String initSqlPath) { runInitDDL(initSqlPath); runInitDML(initSqlPath); } /** * run DML * @param initSqlPath initSqlPath */ private void runInitDML(String initSqlPath) { Connection conn = null; if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_dml.sql"; try { conn = dataSource.getConnection();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
conn.setAutoCommit(false); ScriptRunner initScriptRunner = new ScriptRunner(conn, false, true); Reader initSqlReader = new FileReader(new File(mysqlSQLFilePath)); initScriptRunner.runScript(initSqlReader); conn.commit(); } catch (IOException e) { try { conn.rollback(); } catch (SQLException e1) { logger.error(e1.getMessage(),e1); } logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } catch (Exception e) { try { if (null != conn) { conn.rollback(); } } catch (SQLException e1) { logger.error(e1.getMessage(),e1); } logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally { ConnectionUtils.releaseResource(conn); } } /** * run DDL
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
* @param initSqlPath initSqlPath */ private void runInitDDL(String initSqlPath) { Connection conn = null; if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_ddl.sql"; try { conn = dataSource.getConnection(); ScriptRunner initScriptRunner = new ScriptRunner(conn, true, true); Reader initSqlReader = new FileReader(new File(mysqlSQLFilePath)); initScriptRunner.runScript(initSqlReader); } catch (IOException e) { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } catch (Exception e) { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally { ConnectionUtils.releaseResource(conn); } } /** * determines whether a table exists * @param tableName tableName * @return if table exist return true,else return false */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
public abstract boolean isExistsTable(String tableName); /** * determines whether a field exists in the specified table * @param tableName tableName * @param columnName columnName * @return if column name exist return true,else return false */ public abstract boolean isExistsColumn(String tableName,String columnName); /** * get current version * @param versionName versionName * @return version */ public String getCurrentVersion(String versionName) { String sql = String.format("select version from %s",versionName); Connection conn = null; ResultSet rs = null; PreparedStatement pstmt = null; String version = null; try { conn = dataSource.getConnection(); pstmt = conn.prepareStatement(sql); rs = pstmt.executeQuery(); if (rs.next()) { version = rs.getString(1); } return version; } catch (SQLException e) { logger.error(e.getMessage(),e); throw new RuntimeException("sql: " + sql, e);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
} finally { ConnectionUtils.releaseResource(rs, pstmt, conn); } } /** * upgrade DolphinScheduler * @param schemaDir schema dir */ public void upgradeDolphinScheduler(String schemaDir) { upgradeDolphinSchedulerDDL(schemaDir); upgradeDolphinSchedulerDML(schemaDir); } /** * upgrade DolphinScheduler worker group * ds-1.3.0 modify the worker group for process definition json */ public void upgradeDolphinSchedulerWorkerGroup() { updateProcessDefinitionJsonWorkerGroup(); } /** * upgrade DolphinScheduler resource list * ds-1.3.2 modify the resource list for process definition json */ public void upgradeDolphinSchedulerResourceList() { updateProcessDefinitionJsonResourceList(); } /** * updateProcessDefinitionJsonWorkerGroup */ protected void updateProcessDefinitionJsonWorkerGroup(){
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
WorkerGroupDao workerGroupDao = new WorkerGroupDao(); ProcessDefinitionDao processDefinitionDao = new ProcessDefinitionDao(); Map<Integer,String> replaceProcessDefinitionMap = new HashMap<>(); try { Map<Integer, String> oldWorkerGroupMap = workerGroupDao.queryAllOldWorkerGroup(dataSource.getConnection()); Map<Integer,String> processDefinitionJsonMap = processDefinitionDao.queryAllProcessDefinition(dataSource.getConnection()); for (Map.Entry<Integer,String> entry : processDefinitionJsonMap.entrySet()){ JSONObject jsonObject = JSONObject.parseObject(entry.getValue()); JSONArray tasks = JSONArray.parseArray(jsonObject.getString("tasks")); for (int i = 0 ;i < tasks.size() ; i++){ JSONObject task = tasks.getJSONObject(i); Integer workerGroupId = task.getInteger("workerGroupId"); if (workerGroupId != null) { if (workerGroupId == -1) { task.put("workerGroup", "default"); } else { task.put("workerGroup", oldWorkerGroupMap.get(workerGroupId)); } } } jsonObject.remove(jsonObject.getString("tasks")); jsonObject.put("tasks",tasks); replaceProcessDefinitionMap.put(entry.getKey(),jsonObject.toJSONString()); } if (replaceProcessDefinitionMap.size() > 0){ processDefinitionDao.updateProcessDefinitionJson(dataSource.getConnection(),replaceProcessDefinitionMap); } }catch (Exception e){ logger.error("update process definition json workergroup error",e); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
} /** * updateProcessDefinitionJsonResourceList */ protected void updateProcessDefinitionJsonResourceList(){ ResourceDao resourceDao = new ResourceDao(); ProcessDefinitionDao processDefinitionDao = new ProcessDefinitionDao(); Map<Integer,String> replaceProcessDefinitionMap = new HashMap<>(); try { Map<String,Integer> resourcesMap = resourceDao.listAllResources(dataSource.getConnection()); Map<Integer,String> processDefinitionJsonMap = processDefinitionDao.queryAllProcessDefinition(dataSource.getConnection()); for (Map.Entry<Integer,String> entry : processDefinitionJsonMap.entrySet()){ JSONObject jsonObject = JSONObject.parseObject(entry.getValue()); JSONArray tasks = JSONArray.parseArray(jsonObject.getString("tasks")); for (int i = 0 ;i < tasks.size() ; i++){ JSONObject task = tasks.getJSONObject(i); JSONObject param = (JSONObject) task.get("params"); if (param != null) { List<ResourceInfo> resourceList = JSONUtils.toList(param.getString("resourceList"), ResourceInfo.class); if (CollectionUtils.isNotEmpty(resourceList)) { List<ResourceInfo> newResourceList = resourceList.stream().map(resInfo -> { String fullName = resInfo.getRes().startsWith("/") ? resInfo.getRes() : String.format("/%s",resInfo.getRes()); if (resInfo.getId() == 0 && resourcesMap.containsKey(fullName)) { resInfo.setId(resourcesMap.get(fullName)); } return resInfo; }).collect(Collectors.toList()); param.put("resourceList",JSONArray.parse(JSONObject.toJSONString(newResourceList))); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
task.put("params",param); } jsonObject.remove(jsonObject.getString("tasks")); jsonObject.put("tasks",tasks); replaceProcessDefinitionMap.put(entry.getKey(),jsonObject.toJSONString()); } if (replaceProcessDefinitionMap.size() > 0){ processDefinitionDao.updateProcessDefinitionJson(dataSource.getConnection(),replaceProcessDefinitionMap); } }catch (Exception e){ logger.error("update process definition json resource list error",e); } } /** * upgradeDolphinScheduler DML * @param schemaDir schemaDir */ private void upgradeDolphinSchedulerDML(String schemaDir) { String schemaVersion = schemaDir.split("_")[0]; if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } String sqlFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase()); logger.info("sqlSQLFilePath"+sqlFilePath); Connection conn = null; PreparedStatement pstmt = null; try { conn = dataSource.getConnection(); conn.setAutoCommit(false); // E
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
ScriptRunner scriptRunner = new ScriptRunner(conn, false, true); Reader sqlReader = new FileReader(new File(sqlFilePath)); scriptRunner.runScript(sqlReader); if (isExistsTable(T_VERSION_NAME)) { // C String upgradeSQL = String.format("update %s set version = ?",T_VERSION_NAME); pstmt = conn.prepareStatement(upgradeSQL); pstmt.setString(1, schemaVersion); pstmt.executeUpdate(); }else if (isExistsTable(T_NEW_VERSION_NAME)) { // C String upgradeSQL = String.format("update %s set version = ?",T_NEW_VERSION_NAME); pstmt = conn.prepareStatement(upgradeSQL); pstmt.setString(1, schemaVersion); pstmt.executeUpdate(); } conn.commit(); } catch (FileNotFoundException e) { try { conn.rollback(); } catch (SQLException e1) { logger.error(e1.getMessage(),e1); } logger.error(e.getMessage(),e); throw new RuntimeException("sql file not found ", e); } catch (IOException e) { try { conn.rollback(); } catch (SQLException e1) { logger.error(e1.getMessage(),e1);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
} logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } catch (SQLException e) { try { if (null != conn) { conn.rollback(); } } catch (SQLException e1) { logger.error(e1.getMessage(),e1); } logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } catch (Exception e) { try { if (null != conn) { conn.rollback(); } } catch (SQLException e1) { logger.error(e1.getMessage(),e1); } logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally { ConnectionUtils.releaseResource(pstmt, conn); } } /** * upgradeDolphinScheduler DDL * @param schemaDir schemaDir
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
*/ private void upgradeDolphinSchedulerDDL(String schemaDir) { if (StringUtils.isEmpty(rootDir)) { throw new RuntimeException("Environment variable user.dir not found"); } String sqlFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_ddl.sql",rootDir,schemaDir,getDbType().name().toLowerCase()); Connection conn = null; PreparedStatement pstmt = null; try { conn = dataSource.getConnection(); String dbName = conn.getCatalog(); logger.info(dbName); conn.setAutoCommit(true); // E ScriptRunner scriptRunner = new ScriptRunner(conn, true, true); Reader sqlReader = new FileReader(new File(sqlFilePath)); scriptRunner.runScript(sqlReader); } catch (FileNotFoundException e) { logger.error(e.getMessage(),e); throw new RuntimeException("sql file not found ", e); } catch (IOException e) { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } catch (SQLException e) { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } catch (Exception e) { logger.error(e.getMessage(),e); throw new RuntimeException(e.getMessage(),e); } finally {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,469
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
**Describe the question** ![image](https://user-images.githubusercontent.com/45786444/89966662-459d0900-dc82-11ea-9299-910c9b1745d0.png) **Which version of DolphinScheduler:** -[1.3.0] **Additional context** The program type of spark node is selected as PYTHON, how should the main jar package be selected? Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files. **Requirement or improvement** no
https://github.com/apache/dolphinscheduler/issues/3469
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-12T02:09:30Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
ConnectionUtils.releaseResource(pstmt, conn); } } /** * update version * @param version version */ public void updateVersion(String version) { // C String versionName = T_VERSION_NAME; if(!SchemaUtils.isAGreatVersion("1.2.0" , version)){ versionName = "t_ds_version"; } String upgradeSQL = String.format("update %s set version = ?",versionName); PreparedStatement pstmt = null; Connection conn = null; try { conn = dataSource.getConnection(); pstmt = conn.prepareStatement(upgradeSQL); pstmt.setString(1, version); pstmt.executeUpdate(); } catch (SQLException e) { logger.error(e.getMessage(),e); throw new RuntimeException("sql: " + upgradeSQL, e); } finally { ConnectionUtils.releaseResource(pstmt, conn); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import org.apache.commons.lang.StringUtils; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.ResourcesService; import org.apache.dolphinscheduler.api.service.UdfFuncService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.UdfType;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.User; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.core.io.Resource; import org.springframework.http.HttpHeaders; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.*; import org.springframework.web.multipart.MultipartFile; import springfox.documentation.annotations.ApiIgnore; import java.util.Map; import static org.apache.dolphinscheduler.api.enums.Status.*; /** * resources controller */ @Api(tags = "RESOURCES_TAG", position = 1) @RestController @RequestMapping("resources") public class ResourcesController extends BaseController { private static final Logger logger = LoggerFactory.getLogger(ResourcesController.class); @Autowired private ResourcesService resourceService; @Autowired private UdfFuncService udfFuncService; /** * create directory * * @param loginUser login user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param type type * @param alias alias * @param description description * @param pid parent id * @param currentDir current directory * @return create result code */ @ApiOperation(value = "createDirctory", notes = "CREATE_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"), @ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"), @ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile") }) @PostMapping(value = "/directory/create") @ApiException(CREATE_RESOURCE_ERROR) public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") ResourceType type, @RequestParam(value = "name") String alias, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "pid") int pid, @RequestParam(value = "currentDir") String currentDir) { logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}", loginUser.getUserName(), type, alias, description, pid, currentDir); return resourceService.createDirectory(loginUser, alias, description, type, pid, currentDir); } /** * create resource * * @param loginUser login user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param alias alias * @param description description * @param type type * @param file file * @return create result code */ @ApiOperation(value = "createResource", notes = "CREATE_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"), @ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"), @ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile") }) @PostMapping(value = "/create") @ApiException(CREATE_RESOURCE_ERROR) public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") ResourceType type, @RequestParam(value = "name") String alias, @RequestParam(value = "description", required = false) String description, @RequestParam("file") MultipartFile file, @RequestParam(value = "pid") int pid, @RequestParam(value = "currentDir") String currentDir) { logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}", loginUser.getUserName(), type, alias, description, file.getName(), file.getOriginalFilename()); return resourceService.createResource(loginUser, alias, description, type, file, pid, currentDir); } /** * update resource * * @param loginUser login user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param alias alias * @param resourceId resource id * @param type resource type * @param description description * @param file resource file * @return update result code */ @ApiOperation(value = "updateResource", notes = "UPDATE_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"), @ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"), @ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile") }) @PostMapping(value = "/update") @ApiException(UPDATE_RESOURCE_ERROR) public Result updateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int resourceId, @RequestParam(value = "type") ResourceType type, @RequestParam(value = "name") String alias, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "file" ,required = false) MultipartFile file) { logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}, file: {}", loginUser.getUserName(), type, alias, description, file); return resourceService.updateResource(loginUser, resourceId, alias, description, type, file); } /** * query resources list *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param loginUser login user * @param type resource type * @return resource list */ @ApiOperation(value = "queryResourceList", notes = "QUERY_RESOURCE_LIST_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType") }) @GetMapping(value = "/list") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_RESOURCES_LIST_ERROR) public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") ResourceType type ) { logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type); Map<String, Object> result = resourceService.queryResourceList(loginUser, type); return returnDataList(result); } /** * query resources list paging * * @param loginUser login user * @param type resource type * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return resource list page */ @ApiOperation(value = "queryResourceListPaging", notes = "QUERY_RESOURCE_LIST_PAGING_NOTES") @ApiImplicitParams({
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"), @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "int"), @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20") }) @GetMapping(value = "/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_RESOURCES_LIST_PAGING) public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") ResourceType type, @RequestParam(value = "id") int id, @RequestParam("pageNo") Integer pageNo, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageSize") Integer pageSize ) { logger.info("query resource list, login user:{}, resource type:{}, search value:{}", loginUser.getUserName(), type, searchVal); Map<String, Object> result = checkPageParams(pageNo, pageSize); if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataListPaging(result); } searchVal = ParameterUtils.handleEscapes(searchVal); result = resourceService.queryResourceListPaging(loginUser, id, type, searchVal, pageNo, pageSize); return returnDataListPaging(result); } /** * delete resource * * @param loginUser login user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param resourceId resource id * @return delete result code */ @ApiOperation(value = "deleteResource", notes = "DELETE_RESOURCE_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/delete") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_RESOURCE_ERROR) public Result deleteResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int resourceId ) throws Exception { logger.info("login user {}, delete resource id: {}", loginUser.getUserName(), resourceId); return resourceService.delete(loginUser, resourceId); } /** * verify resource by alias and type * * @param loginUser login user * @param fullName resource full name * @param type resource type * @return true if the resource name not exists, otherwise return false */ @ApiOperation(value = "verifyResourceName", notes = "VERIFY_RESOURCE_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"), @ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String") })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
@GetMapping(value = "/verify-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR) public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "fullName") String fullName, @RequestParam(value = "type") ResourceType type ) { logger.info("login user {}, verfiy resource alias: {},resource type: {}", loginUser.getUserName(), fullName, type); return resourceService.verifyResourceName(fullName, type, loginUser); } /** * query resources jar list * * @param loginUser login user * @param type resource type * @return resource list */ @ApiOperation(value = "queryResourceJarList", notes = "QUERY_RESOURCE_LIST_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType") }) @GetMapping(value = "/list/jar") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_RESOURCES_LIST_ERROR) public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") ResourceType type ) { logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString()); Map<String, Object> result = resourceService.queryResourceJarList(loginUser, type);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
return returnDataList(result); } /** * query resource by full name and type * * @param loginUser login user * @param fullName resource full name * @param type resource type * @return true if the resource name not exists, otherwise return false */ @ApiOperation(value = "queryResource", notes = "QUERY_BY_RESOURCE_NAME") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"), @ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String") }) @GetMapping(value = "/queryResource") @ResponseStatus(HttpStatus.OK) @ApiException(RESOURCE_NOT_EXIST) public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "fullName", required = false) String fullName, @RequestParam(value = "id", required = false) Integer id, @RequestParam(value = "type") ResourceType type ) { logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}", loginUser.getUserName(), fullName, id, type); return resourceService.queryResource(fullName, id, type); } /** * view resource file online *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param loginUser login user * @param resourceId resource id * @param skipLineNum skip line number * @param limit limit * @return resource content */ @ApiOperation(value = "viewResource", notes = "VIEW_RESOURCE_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "skipLineNum", value = "SKIP_LINE_NUM", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "limit", value = "LIMIT", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/view") @ApiException(VIEW_RESOURCE_FILE_ON_LINE_ERROR) public Result viewResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int resourceId, @RequestParam(value = "skipLineNum") int skipLineNum, @RequestParam(value = "limit") int limit ) { logger.info("login user {}, view resource : {}, skipLineNum {} , limit {}", loginUser.getUserName(), resourceId, skipLineNum, limit); return resourceService.readResource(resourceId, skipLineNum, limit); } /** * create resource file online * * @param loginUser login user * @param type resource type * @param fileName file name * @param fileSuffix file suffix
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param description description * @param content content * @return create result code */ @ApiOperation(value = "onlineCreateResource", notes = "ONLINE_CREATE_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"), @ApiImplicitParam(name = "fileName", value = "RESOURCE_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "suffix", value = "SUFFIX", required = true, dataType = "String"), @ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"), @ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String") }) @PostMapping(value = "/online-create") @ApiException(CREATE_RESOURCE_FILE_ON_LINE_ERROR) public Result onlineCreateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") ResourceType type, @RequestParam(value = "fileName") String fileName, @RequestParam(value = "suffix") String fileSuffix, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "content") String content, @RequestParam(value = "pid") int pid, @RequestParam(value = "currentDir") String currentDir ) { logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}", loginUser.getUserName(), fileName, type, fileSuffix, description, content, pid, currentDir); if (StringUtils.isEmpty(content)) { logger.error("resource file contents are not allowed to be empty"); return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg()); } return resourceService.onlineCreateResource(loginUser, type, fileName, fileSuffix, description, content, pid, currentDir);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
} /** * edit resource file online * * @param loginUser login user * @param resourceId resource id * @param content content * @return update result code */ @ApiOperation(value = "updateResourceContent", notes = "UPDATE_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String") }) @PostMapping(value = "/update-content") @ApiException(EDIT_RESOURCE_FILE_ON_LINE_ERROR) public Result updateResourceContent(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int resourceId, @RequestParam(value = "content") String content ) { logger.info("login user {}, updateProcessInstance resource : {}", loginUser.getUserName(), resourceId); if (StringUtils.isEmpty(content)) { logger.error("The resource file contents are not allowed to be empty"); return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg()); } return resourceService.updateResourceContent(resourceId, content); } /** * download resource file
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* * @param loginUser login user * @param resourceId resource id * @return resource content */ @ApiOperation(value = "downloadResource", notes = "DOWNLOAD_RESOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/download") @ResponseBody @ApiException(DOWNLOAD_RESOURCE_FILE_ERROR) public ResponseEntity downloadResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int resourceId) throws Exception { logger.info("login user {}, download resource : {}", loginUser.getUserName(), resourceId); Resource file = resourceService.downloadResource(resourceId); if (file == null) { return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.RESOURCE_NOT_EXIST.getMsg()); } return ResponseEntity .ok() .header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"") .body(file); } /** * create udf function * * @param loginUser login user * @param type udf type
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param funcName function name * @param argTypes argument types * @param database database * @param description description * @param className class name * @param resourceId resource id * @return create result code */ @ApiOperation(value = "createUdfFunc", notes = "CREATE_UDF_FUNCTION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"), @ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"), @ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"), @ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") }) @PostMapping(value = "/udf-func/create") @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_UDF_FUNCTION_ERROR) public Result createUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") UdfType type, @RequestParam(value = "funcName") String funcName, @RequestParam(value = "className") String className, @RequestParam(value = "argTypes", required = false) String argTypes, @RequestParam(value = "database", required = false) String database, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "resourceId") int resourceId) { logger.info("login user {}, create udf function, type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
loginUser.getUserName(), type, funcName, argTypes, database, description, resourceId); return udfFuncService.createUdfFunction(loginUser, funcName, className, argTypes, database, description, type, resourceId); } /** * view udf function * * @param loginUser login user * @param id resource id * @return udf function detail */ @ApiOperation(value = "viewUIUdfFunction", notes = "VIEW_UDF_FUNCTION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/udf-func/update-ui") @ResponseStatus(HttpStatus.OK) @ApiException(VIEW_UDF_FUNCTION_ERROR) public Result viewUIUdfFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("id") int id) { logger.info("login user {}, query udf{}", loginUser.getUserName(), id); Map<String, Object> map = udfFuncService.queryUdfFuncDetail(id); return returnDataList(map); } /** * update udf function * * @param loginUser login user * @param type resource type * @param funcName function name
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* @param argTypes argument types * @param database data base * @param description description * @param resourceId resource id * @param className class name * @param udfFuncId udf function id * @return update result code */ @ApiOperation(value = "updateUdfFunc", notes = "UPDATE_UDF_FUNCTION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"), @ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"), @ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"), @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") }) @PostMapping(value = "/udf-func/update") @ApiException(UPDATE_UDF_FUNCTION_ERROR) public Result updateUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int udfFuncId, @RequestParam(value = "type") UdfType type, @RequestParam(value = "funcName") String funcName, @RequestParam(value = "className") String className, @RequestParam(value = "argTypes", required = false) String argTypes, @RequestParam(value = "database", required = false) String database, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "resourceId") int resourceId) { logger.info("login user {}, updateProcessInstance udf function id: {},type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
loginUser.getUserName(), udfFuncId, type, funcName, argTypes, database, description, resourceId); Map<String, Object> result = udfFuncService.updateUdfFunc(udfFuncId, funcName, className, argTypes, database, description, type, resourceId); return returnDataList(result); } /** * query udf function list paging * * @param loginUser login user * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return udf function list page */ @ApiOperation(value = "queryUdfFuncListPaging", notes = "QUERY_UDF_FUNCTION_LIST_PAGING_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20") }) @GetMapping(value = "/udf-func/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_UDF_FUNCTION_LIST_PAGING_ERROR) public Result<Object> queryUdfFuncListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("pageNo") Integer pageNo, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageSize") Integer pageSize ) { logger.info("query udf functions list, login user:{},search value:{}", loginUser.getUserName(), searchVal); Map<String, Object> result = checkPageParams(pageNo, pageSize);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataListPaging(result); } result = udfFuncService.queryUdfFuncListPaging(loginUser, searchVal, pageNo, pageSize); return returnDataListPaging(result); } /** * query udf func list by type * * @param loginUser login user * @param type resource type * @return resource list */ @ApiOperation(value = "queryUdfFuncList", notes = "QUERY_UDF_FUNC_LIST_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType") }) @GetMapping(value = "/udf-func/list") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATASOURCE_BY_TYPE_ERROR) public Result<Object> queryUdfFuncList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("type") UdfType type) { String userName = loginUser.getUserName(); userName = userName.replaceAll("[\n|\r|\t]", "_"); logger.info("query udf func list, user:{}, type:{}", userName, type); Map<String, Object> result = udfFuncService.queryUdfFuncList(loginUser, type.ordinal()); return returnDataList(result); } /** * verify udf function name can use or not
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
3,431
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center ![image](https://user-images.githubusercontent.com/55787491/89607356-eb2c3300-d8a4-11ea-9f1d-e1c41e09ba75.png) 2. View the task resources of the workflow definition,display multiple deleted resources directories ![image](https://user-images.githubusercontent.com/55787491/89607195-7eb13400-d8a4-11ea-9667-d46b51cb2678.png) **Which version of Dolphin Scheduler:** -[1.3.2-release]
https://github.com/apache/dolphinscheduler/issues/3431
https://github.com/apache/dolphinscheduler/pull/3498
e367f90bb73c9682739308a0a98887a1c0f407ef
5f5c08402fdcecca8c35f4dc3021cc089949ef13
"2020-08-07T03:58:50Z"
java
"2020-08-14T08:47:01Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
* * @param loginUser login user * @param name name * @return true if the name can user, otherwise return false */ @ApiOperation(value = "verifyUdfFuncName", notes = "VERIFY_UDF_FUNCTION_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "FUNC_NAME", required = true, dataType = "String") }) @GetMapping(value = "/udf-func/verify-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_UDF_FUNCTION_NAME_ERROR) public Result verifyUdfFuncName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "name") String name ) { logger.info("login user {}, verfiy udf function name: {}", loginUser.getUserName(), name); return udfFuncService.verifyUdfFuncByName(name); } /** * delete udf function * * @param loginUser login user * @param udfFuncId udf function id * @return delete result code */ @ApiOperation(value = "deleteUdfFunc", notes = "DELETE_UDF_FUNCTION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100") })