output
stringlengths
64
73.2k
input
stringlengths
208
73.3k
instruction
stringclasses
1 value
#fixed code @Override public void onControllerChange(NotificationContext changeContext) { logger.info("START: GenericClusterController.onControllerChange() for cluster " + _clusterName); _cache.requireFullRefresh(); _taskCache.requireFullRefresh(); if (changeContext != null && changeContext.getType() == Type.FINALIZE) { logger.info("GenericClusterController.onControllerChange() FINALIZE for cluster " + _clusterName); return; } HelixDataAccessor accessor = changeContext.getManager().getHelixDataAccessor(); // double check if this controller is the leader Builder keyBuilder = accessor.keyBuilder(); LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader == null) { logger .warn("No controller exists for cluster:" + changeContext.getManager().getClusterName()); return; } else { String leaderName = leader.getInstanceName(); String instanceName = changeContext.getManager().getInstanceName(); if (leaderName == null || !leaderName.equals(instanceName)) { logger.warn("leader name does NOT match, my name: " + instanceName + ", leader: " + leader); return; } } PauseSignal pauseSignal = accessor.getProperty(keyBuilder.pause()); if (pauseSignal != null) { if (!_paused) { _paused = true; logger.info("controller is now paused"); } } else { if (_paused) { _paused = false; logger.info("controller is now resumed"); ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.Resume); event.addAttribute(AttributeName.changeContext.name(), changeContext); event.addAttribute(AttributeName.helixmanager.name(), changeContext.getManager()); event.addAttribute(AttributeName.eventData.name(), pauseSignal); _eventQueue.put(event); _taskEventQueue.put(event.clone()); } } if (_clusterStatusMonitor == null) { _clusterStatusMonitor = new ClusterStatusMonitor(changeContext.getManager().getClusterName()); } _clusterStatusMonitor.setEnabled(!_paused); logger.info("END: GenericClusterController.onControllerChange() for cluster " + _clusterName); }
#vulnerable code @Override public void onControllerChange(NotificationContext changeContext) { logger.info("START: GenericClusterController.onControllerChange() for cluster " + _clusterName); _cache.requireFullRefresh(); if (changeContext != null && changeContext.getType() == Type.FINALIZE) { logger.info("GenericClusterController.onControllerChange() FINALIZE for cluster " + _clusterName); return; } HelixDataAccessor accessor = changeContext.getManager().getHelixDataAccessor(); // double check if this controller is the leader Builder keyBuilder = accessor.keyBuilder(); LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader == null) { logger .warn("No controller exists for cluster:" + changeContext.getManager().getClusterName()); return; } else { String leaderName = leader.getInstanceName(); String instanceName = changeContext.getManager().getInstanceName(); if (leaderName == null || !leaderName.equals(instanceName)) { logger.warn("leader name does NOT match, my name: " + instanceName + ", leader: " + leader); return; } } PauseSignal pauseSignal = accessor.getProperty(keyBuilder.pause()); if (pauseSignal != null) { if (!_paused) { _paused = true; logger.info("controller is now paused"); } } else { if (_paused) { _paused = false; logger.info("controller is now resumed"); ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.Resume); event.addAttribute(AttributeName.changeContext.name(), changeContext); event.addAttribute(AttributeName.helixmanager.name(), changeContext.getManager()); event.addAttribute(AttributeName.eventData.name(), pauseSignal); _eventQueue.put(event); } } if (_clusterStatusMonitor == null) { _clusterStatusMonitor = new ClusterStatusMonitor(changeContext.getManager().getClusterName()); } _clusterStatusMonitor.setEnabled(!_paused); logger.info("END: GenericClusterController.onControllerChange() for cluster " + _clusterName); } #location 46 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testInvocation() throws Exception { System.out.println("TestCMTaskHandler.testInvocation()"); Message message = new Message(MessageType.STATE_TRANSITION); message.setSrcName("cm-instance-0"); message.setTgtSessionId("1234"); message.setFromState("Offline"); message.setToState("Slave"); message.setStateUnitKey("Teststateunitkey"); message.setId("Some unique id"); message.setMsgId("Some unique message id"); message.setStateUnitGroup("TeststateunitGroup"); MockStateModel stateModel = new MockStateModel(); NotificationContext context; CMStateTransitionHandler stHandler = new CMStateTransitionHandler(stateModel); context = new NotificationContext(new MockManager()); CMTaskHandler handler; handler = new CMTaskHandler(message, context, stHandler, null); handler.call(); AssertJUnit.assertTrue(stateModel.stateModelInvoked); }
#vulnerable code @Test public void testInvocation() throws Exception { System.out.println("TestCMTaskHandler.testInvocation()"); Message message = new Message(MessageType.STATE_TRANSITION); message.setSrcName("cm-instance-0"); message.setTgtSessionId("1234"); message.setFromState("Offline"); message.setToState("Slave"); message.setStateUnitKey("Teststateunitkey"); message.setId("Some unique id"); message.setMsgId("Some unique message id"); message.setStateUnitGroup("TeststateunitGroup"); MockStateModel stateModel = new MockStateModel(); NotificationContext context; context = new NotificationContext(new MockManager()); CMTaskHandler handler; handler = new CMTaskHandler(message, context, null, null); handler.call(); AssertJUnit.assertTrue(stateModel.stateModelInvoked); } #location 20 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code void startRebalancingTimer(long period, HelixManager manager) { if (period != _timerPeriod) { logger.info("Controller starting timer at period " + period); if (_rebalanceTimer != null) { _rebalanceTimer.cancel(); } _rebalanceTimer = new Timer(true); _timerPeriod = period; _rebalanceTimer.scheduleAtFixedRate(new RebalanceTask(manager), _timerPeriod, _timerPeriod); } else { logger.info("Controller already has timer at period " + _timerPeriod); } }
#vulnerable code void checkRebalancingTimer(HelixManager manager, List<IdealState> idealStates) { if (manager.getConfigAccessor() == null) { logger.warn(manager.getInstanceName() + " config accessor doesn't exist. should be in file-based mode."); return; } for (IdealState idealState : idealStates) { int period = idealState.getRebalanceTimerPeriod(); if (period > 0) { startRebalancingTimer(period, manager); } } } #location 11 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(final ClusterEvent event) throws Exception { _eventId = event.getEventId(); HelixManager manager = event.getAttribute(AttributeName.helixmanager.name()); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name()); ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); if (manager == null || resourceMap == null || cache == null) { throw new StageException("Missing attributes in event:" + event + ". Requires ClusterManager|RESOURCES|DataCache"); } HelixDataAccessor dataAccessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder(); CurrentStateOutput currentStateOutput = event.getAttribute(AttributeName.CURRENT_STATE.name()); ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); List<ExternalView> newExtViews = new ArrayList<>(); Set<String> monitoringResources = new HashSet<>(); Map<String, ExternalView> curExtViews = cache.getExternalViews(); for (String resourceName : resourceMap.keySet()) { ExternalView view = new ExternalView(resourceName); // view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); // if resource ideal state has bucket size, set it // otherwise resource has been dropped, use bucket size from current state instead Resource resource = resourceMap.get(resourceName); if (resource.getBucketSize() > 0) { view.setBucketSize(resource.getBucketSize()); } else { view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); } int totalPendingMessageCount = 0; for (Partition partition : resource.getPartitions()) { Map<String, String> currentStateMap = currentStateOutput.getCurrentStateMap(resourceName, partition); if (currentStateMap != null && currentStateMap.size() > 0) { // Set<String> disabledInstances // = cache.getDisabledInstancesForResource(resource.toString()); for (String instance : currentStateMap.keySet()) { // if (!disabledInstances.contains(instance)) // { view.setState(partition.getPartitionName(), instance, currentStateMap.get(instance)); // } } } totalPendingMessageCount += currentStateOutput.getPendingMessageMap(resource.getResourceName(), partition).size(); } // Update cluster status monitor mbean IdealState idealState = cache.getIdealState(resourceName); if (!cache.isTaskCache()) { ResourceConfig resourceConfig = cache.getResourceConfig(resourceName); if (clusterStatusMonitor != null) { if (idealState != null // has ideal state && (resourceConfig == null || !resourceConfig.isMonitoringDisabled()) // monitoring not disabled && !idealState.getStateModelDefRef() // and not a job resource .equalsIgnoreCase(DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { StateModelDefinition stateModelDef = cache.getStateModelDef(idealState.getStateModelDefRef()); clusterStatusMonitor .setResourceStatus(view, cache.getIdealState(view.getResourceName()), stateModelDef); clusterStatusMonitor .updatePendingMessages(resource.getResourceName(), totalPendingMessageCount); monitoringResources.add(resourceName); } } } ExternalView curExtView = curExtViews.get(resourceName); // copy simplefields from IS, in cases where IS is deleted copy it from existing ExternalView if (idealState != null) { view.getRecord().getSimpleFields().putAll(idealState.getRecord().getSimpleFields()); } else if (curExtView != null) { view.getRecord().getSimpleFields().putAll(curExtView.getRecord().getSimpleFields()); } // compare the new external view with current one, set only on different if (curExtView == null || !curExtView.getRecord().equals(view.getRecord())) { // Add external view to the list which will be written to ZK later. newExtViews.add(view); // For SCHEDULER_TASK_RESOURCE resource group (helix task queue), we need to find out which // task partitions are finished (COMPLETED or ERROR), update the status update of the original // scheduler message, and then remove the partitions from the ideal state if (idealState != null && idealState.getStateModelDefRef().equalsIgnoreCase( DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { updateScheduledTaskStatus(view, manager, idealState); } } } // Keep MBeans for existing resources and unregister MBeans for dropped resources if (clusterStatusMonitor != null) { clusterStatusMonitor.retainResourceMonitor(monitoringResources); } List<String> externalViewsToRemove = new ArrayList<>(); // TODO: consider not setting the externalview of SCHEDULER_TASK_QUEUE at all. // Are there any entity that will be interested in its change? // For the resource with DisableExternalView option turned on in IdealState // We will not actually create or write the externalView to ZooKeeper. List<PropertyKey> keys = new ArrayList<>(); for(Iterator<ExternalView> it = newExtViews.iterator(); it.hasNext(); ) { ExternalView view = it.next(); String resourceName = view.getResourceName(); IdealState idealState = cache.getIdealState(resourceName); if (idealState != null && idealState.isExternalViewDisabled()) { it.remove(); // remove the external view if the external view exists if (curExtViews.containsKey(resourceName)) { LogUtil .logInfo(LOG, _eventId, "Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); externalViewsToRemove.add(resourceName); } } else { keys.add(keyBuilder.externalView(resourceName)); } } // add/update external-views if (newExtViews.size() > 0) { dataAccessor.setChildren(keys, newExtViews); cache.updateExternalViews(newExtViews); } // remove dead external-views for (String resourceName : curExtViews.keySet()) { if (!resourceMap.keySet().contains(resourceName)) { LogUtil.logInfo(LOG, _eventId, "Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); externalViewsToRemove.add(resourceName); } } cache.removeExternalViews(externalViewsToRemove); }
#vulnerable code @Override public void execute(final ClusterEvent event) throws Exception { _eventId = event.getEventId(); HelixManager manager = event.getAttribute(AttributeName.helixmanager.name()); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name()); ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); if (manager == null || resourceMap == null || cache == null) { throw new StageException("Missing attributes in event:" + event + ". Requires ClusterManager|RESOURCES|DataCache"); } HelixDataAccessor dataAccessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder(); CurrentStateOutput currentStateOutput = event.getAttribute(AttributeName.CURRENT_STATE.name()); List<ExternalView> newExtViews = new ArrayList<>(); Map<String, ExternalView> curExtViews = cache.getExternalViews(); for (String resourceName : resourceMap.keySet()) { ExternalView view = new ExternalView(resourceName); // view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); // if resource ideal state has bucket size, set it // otherwise resource has been dropped, use bucket size from current state instead Resource resource = resourceMap.get(resourceName); if (resource.getBucketSize() > 0) { view.setBucketSize(resource.getBucketSize()); } else { view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); } int totalPendingMessageCount = 0; for (Partition partition : resource.getPartitions()) { Map<String, String> currentStateMap = currentStateOutput.getCurrentStateMap(resourceName, partition); if (currentStateMap != null && currentStateMap.size() > 0) { // Set<String> disabledInstances // = cache.getDisabledInstancesForResource(resource.toString()); for (String instance : currentStateMap.keySet()) { // if (!disabledInstances.contains(instance)) // { view.setState(partition.getPartitionName(), instance, currentStateMap.get(instance)); // } } } totalPendingMessageCount += currentStateOutput.getPendingMessageMap(resource.getResourceName(), partition).size(); } // Update cluster status monitor mbean IdealState idealState = cache.getIdealState(resourceName); if (!cache.isTaskCache()) { ResourceConfig resourceConfig = cache.getResourceConfig(resourceName); ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); if (clusterStatusMonitor != null) { if (idealState != null && (resourceConfig == null || !resourceConfig .isMonitoringDisabled())) { if (!idealState.getStateModelDefRef() .equalsIgnoreCase(DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { StateModelDefinition stateModelDef = cache.getStateModelDef(idealState.getStateModelDefRef()); clusterStatusMonitor .setResourceStatus(view, cache.getIdealState(view.getResourceName()), stateModelDef); clusterStatusMonitor .updatePendingMessages(resource.getResourceName(), totalPendingMessageCount); } } else { // Drop the metrics if the resource is dropped, or the MonitorDisabled is changed to true. clusterStatusMonitor.unregisterResource(view.getResourceName()); } } } ExternalView curExtView = curExtViews.get(resourceName); // copy simplefields from IS, in cases where IS is deleted copy it from existing ExternalView if (idealState != null) { view.getRecord().getSimpleFields().putAll(idealState.getRecord().getSimpleFields()); } else if (curExtView != null) { view.getRecord().getSimpleFields().putAll(curExtView.getRecord().getSimpleFields()); } // compare the new external view with current one, set only on different if (curExtView == null || !curExtView.getRecord().equals(view.getRecord())) { // Add external view to the list which will be written to ZK later. newExtViews.add(view); // For SCHEDULER_TASK_RESOURCE resource group (helix task queue), we need to find out which // task partitions are finished (COMPLETED or ERROR), update the status update of the original // scheduler message, and then remove the partitions from the ideal state if (idealState != null && idealState.getStateModelDefRef().equalsIgnoreCase( DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { updateScheduledTaskStatus(view, manager, idealState); } } } List<String> externalviewsToRemove = new ArrayList<>(); // TODO: consider not setting the externalview of SCHEDULER_TASK_QUEUE at all. // Are there any entity that will be interested in its change? // For the resource with DisableExternalView option turned on in IdealState // We will not actually create or write the externalView to ZooKeeper. List<PropertyKey> keys = new ArrayList<>(); for(Iterator<ExternalView> it = newExtViews.iterator(); it.hasNext(); ) { ExternalView view = it.next(); String resourceName = view.getResourceName(); IdealState idealState = cache.getIdealState(resourceName); if (idealState != null && idealState.isExternalViewDisabled()) { it.remove(); // remove the external view if the external view exists if (curExtViews.containsKey(resourceName)) { LogUtil .logInfo(LOG, _eventId, "Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); externalviewsToRemove.add(resourceName); } } else { keys.add(keyBuilder.externalView(resourceName)); } } // add/update external-views if (newExtViews.size() > 0) { dataAccessor.setChildren(keys, newExtViews); cache.updateExternalViews(newExtViews); } // remove dead external-views for (String resourceName : curExtViews.keySet()) { if (!resourceMap.keySet().contains(resourceName)) { LogUtil.logInfo(LOG, _eventId, "Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); externalviewsToRemove.add(resourceName); } } cache.removeExternalViews(externalviewsToRemove); } #location 32 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test(groups = { "unitTest" }) public void testStaticFileCM() { final String clusterName = "TestSTaticFileCM"; final String controllerName = "controller_0"; ClusterView view; String[] illegalNodesInfo = {"localhost_8900", "localhost_8901"}; List<DBParam> dbParams = new ArrayList<DBParam>(); dbParams.add(new DBParam("TestDB0", 10)); dbParams.add(new DBParam("TestDB1", 10)); boolean exceptionCaught = false; try { view = FileBasedClusterManager.generateStaticConfigClusterView(illegalNodesInfo, dbParams, 3); } catch (IllegalArgumentException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); String[] nodesInfo = {"localhost:8900", "localhost:8901", "localhost:8902"}; view = FileBasedClusterManager.generateStaticConfigClusterView(nodesInfo, dbParams, 2); String configFile = "/tmp/" + clusterName; ClusterViewSerializer.serialize(view, new File(configFile)); ClusterView restoredView = ClusterViewSerializer.deserialize(new File(configFile)); // System.out.println(restoredView); // byte[] bytes = ClusterViewSerializer.serialize(restoredView); // System.out.println(new String(bytes)); FileBasedClusterManager.verifyFileBasedClusterStates("localhost_8900", configFile, configFile); FileBasedClusterManager controller = new FileBasedClusterManager(clusterName, controllerName, InstanceType.CONTROLLER, configFile); controller.disconnect(); Assert.assertFalse(controller.isConnected()); controller.connect(); Assert.assertTrue(controller.isConnected()); String sessionId = controller.getSessionId(); Assert.assertEquals(DynamicFileClusterManager._sessionId, sessionId); Assert.assertEquals(clusterName, controller.getClusterName()); Assert.assertEquals(0, controller.getLastNotificationTime()); Assert.assertEquals(InstanceType.CONTROLLER, controller.getInstanceType()); Assert.assertNull(controller.getPropertyStore()); Assert.assertNull(controller.getHealthReportCollector()); Assert.assertEquals(controllerName, controller.getInstanceName()); Assert.assertNull(controller.getClusterManagmentTool()); Assert.assertNull(controller.getMessagingService()); MockListener controllerListener = new MockListener(); Assert.assertFalse(controller.removeListener(controllerListener)); controllerListener.reset(); controller.addIdealStateChangeListener(controllerListener); Assert.assertTrue(controllerListener.isIdealStateChangeListenerInvoked); controller.addMessageListener(controllerListener, "localhost_8900"); Assert.assertTrue(controllerListener.isMessageListenerInvoked); exceptionCaught = false; try { controller.addLiveInstanceChangeListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addCurrentStateChangeListener(controllerListener, "localhost_8900", sessionId); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addConfigChangeListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addExternalViewChangeListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addControllerListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); }
#vulnerable code @Test(groups = { "unitTest" }) public void testStaticFileCM() { final String clusterName = "TestSTaticFileCM"; final String controllerName = "controller_0"; ClusterView view; String[] illegalNodesInfo = {"localhost_8900", "localhost_8901"}; List<DBParam> dbParams = new ArrayList<DBParam>(); dbParams.add(new DBParam("TestDB0", 10)); dbParams.add(new DBParam("TestDB1", 10)); boolean exceptionCaught = false; try { view = FileBasedClusterManager.generateStaticConfigClusterView(illegalNodesInfo, dbParams, 3); } catch (IllegalArgumentException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); String[] nodesInfo = {"localhost:8900", "localhost:8901", "localhost:8902"}; view = FileBasedClusterManager.generateStaticConfigClusterView(nodesInfo, dbParams, 2); String configFile = "/tmp/" + clusterName; ClusterViewSerializer.serialize(view, new File(configFile)); ClusterView restoredView = ClusterViewSerializer.deserialize(new File(configFile)); // System.out.println(restoredView); // byte[] bytes = ClusterViewSerializer.serialize(restoredView); // System.out.println(new String(bytes)); FileBasedClusterManager controller = new FileBasedClusterManager(clusterName, controllerName, InstanceType.CONTROLLER, configFile); controller.disconnect(); Assert.assertFalse(controller.isConnected()); controller.connect(); Assert.assertTrue(controller.isConnected()); String sessionId = controller.getSessionId(); Assert.assertEquals(DynamicFileClusterManager._sessionId, sessionId); Assert.assertEquals(clusterName, controller.getClusterName()); Assert.assertEquals(0, controller.getLastNotificationTime()); Assert.assertEquals(InstanceType.CONTROLLER, controller.getInstanceType()); Assert.assertNull(controller.getPropertyStore()); Assert.assertNull(controller.getHealthReportCollector()); Assert.assertEquals(controllerName, controller.getInstanceName()); Assert.assertNull(controller.getClusterManagmentTool()); Assert.assertNull(controller.getMessagingService()); MockListener controllerListener = new MockListener(); Assert.assertFalse(controller.removeListener(controllerListener)); controllerListener.reset(); controller.addIdealStateChangeListener(controllerListener); Assert.assertTrue(controllerListener.isIdealStateChangeListenerInvoked); controller.addMessageListener(controllerListener, "localhost_8900"); Assert.assertTrue(controllerListener.isMessageListenerInvoked); exceptionCaught = false; try { controller.addLiveInstanceChangeListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addCurrentStateChangeListener(controllerListener, "localhost_8900", sessionId); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addConfigChangeListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addExternalViewChangeListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); exceptionCaught = false; try { controller.addControllerListener(controllerListener); } catch (UnsupportedOperationException e) { exceptionCaught = true; } Assert.assertTrue(exceptionCaught); } #location 54 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { ClusterDataCache cache = event.getAttribute("ClusterDataCache"); if (cache == null) { throw new StageException("Missing attributes in event:" + event + ". Requires DataCache"); } Map<String, IdealState> idealStates = cache.getIdealStates(); Map<String, ResourceGroup> resourceGroupMap = new LinkedHashMap<String, ResourceGroup>(); if (idealStates != null && idealStates.size() > 0) { for (IdealState idealState : idealStates.values()) { Set<String> resourceSet = idealState.getResourceKeySet(); String resourceGroupName = idealState.getResourceGroup(); for (String resourceKey : resourceSet) { addResource(resourceKey, resourceGroupName, resourceGroupMap); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); resourceGroup.setStateModelDefRef(idealState.getStateModelDefRef()); } } } // It's important to get resourceKeys from CurrentState as well since the // idealState might be removed. Map<String, LiveInstance> availableInstances = cache.getLiveInstances(); if (availableInstances != null && availableInstances.size() > 0) { for (LiveInstance instance : availableInstances.values()) { String instanceName = instance.getInstanceName(); String clientSessionId = instance.getSessionId(); Map<String, CurrentState> currentStateMap = cache.getCurrentState(instanceName, clientSessionId); if (currentStateMap == null || currentStateMap.size() == 0) { continue; } for (CurrentState currentState : currentStateMap.values()) { String resourceGroupName = currentState.getResourceGroupName(); Map<String, String> resourceStateMap = currentState.getResourceKeyStateMap(); for (String resourceKey : resourceStateMap.keySet()) { addResource(resourceKey, resourceGroupName, resourceGroupMap); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); resourceGroup.setStateModelDefRef(currentState.getStateModelDefRef()); } } } } event.addAttribute(AttributeName.RESOURCE_GROUPS.toString(), resourceGroupMap); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, IdealState> idealStates = cache.getIdealStates(); Map<String, ResourceGroup> resourceGroupMap = new LinkedHashMap<String, ResourceGroup>(); if (idealStates != null && idealStates.size() > 0) { for (IdealState idealState : idealStates.values()) { Set<String> resourceSet = idealState.getResourceKeySet(); String resourceGroupName = idealState.getResourceGroup(); for (String resourceKey : resourceSet) { addResource(resourceKey, resourceGroupName, resourceGroupMap); ResourceGroup resourceGroup = resourceGroupMap .get(resourceGroupName); resourceGroup.setStateModelDefRef(idealState.getStateModelDefRef()); } } } // It's important to get resourceKeys from CurrentState as well since the // idealState might be removed. Map<String, LiveInstance> availableInstances = cache.getLiveInstances(); if (availableInstances != null && availableInstances.size() > 0) { for (LiveInstance instance : availableInstances.values()) { String instanceName = instance.getInstanceName(); String clientSessionId = instance.getSessionId(); Map<String, CurrentState> currentStateMap = cache.getCurrentState(instanceName, clientSessionId); if (currentStateMap == null || currentStateMap.size() == 0) { continue; } for (CurrentState currentState : currentStateMap.values()) { String resourceGroupName = currentState.getResourceGroupName(); Map<String, String> resourceStateMap = currentState.getResourceKeyStateMap(); for (String resourceKey : resourceStateMap.keySet()) { addResource(resourceKey, resourceGroupName, resourceGroupMap); ResourceGroup resourceGroup = resourceGroupMap .get(resourceGroupName); resourceGroup.setStateModelDefRef(currentState.getStateModelDefRef()); } } } } event.addAttribute(AttributeName.RESOURCE_GROUPS.toString(), resourceGroupMap); } #location 5 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void onExternalViewChange(ExternalView externalView, IdealState idealState) { try { String resourceName = externalView.getId(); if (!_resourceMbeanMap.containsKey(resourceName)) { synchronized (this) { if (!_resourceMbeanMap.containsKey(resourceName)) { ResourceMonitor bean = new ResourceMonitor(_clusterName, resourceName); bean.updateExternalView(externalView, idealState); registerResources(Arrays.asList(bean)); } } } ResourceMonitor bean = _resourceMbeanMap.get(resourceName); String oldSensorName = bean.getSensorName(); bean.updateExternalView(externalView, idealState); String newSensorName = bean.getSensorName(); if (!oldSensorName.equals(newSensorName)) { unregisterResources(Arrays.asList(resourceName)); registerResources(Arrays.asList(bean)); } } catch (Exception e) { LOG.warn(e); } }
#vulnerable code public void onExternalViewChange(ExternalView externalView, IdealState idealState) { try { String resourceName = externalView.getId(); if (!_resourceMbeanMap.containsKey(resourceName)) { synchronized (this) { if (!_resourceMbeanMap.containsKey(resourceName)) { ResourceMonitor bean = new ResourceMonitor(_clusterName, resourceName); String beanName = CLUSTER_DN_KEY + "=" + _clusterName + "," + RESOURCE_DN_KEY + "=" + resourceName; register(bean, getObjectName(beanName)); _resourceMbeanMap.put(resourceName, bean); } } } _resourceMbeanMap.get(resourceName).updateExternalView(externalView, idealState); } catch (Exception e) { LOG.warn(e); } } #location 15 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { ClusterDataCache cache = event.getAttribute("ClusterDataCache"); if (cache == null) { throw new StageException("Missing attributes in event:" + event + ". Requires DataCache"); } Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); CurrentStateOutput currentStateOutput = new CurrentStateOutput(); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); for (LiveInstance instance : liveInstances.values()) { String instanceName = instance.getInstanceName(); List<Message> instanceMessages; instanceMessages = cache.getMessages(instanceName); for (Message message : instanceMessages) { if (!MessageType.STATE_TRANSITION.toString().equalsIgnoreCase( message.getMsgType())) { continue; } if (!instance.getSessionId().equals(message.getTgtSessionId())) { continue; } String resourceGroupName = message.getResourceGroupName(); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); if (resourceGroup == null) { continue; } ResourceKey resourceKey = resourceGroup.getResourceKey(message .getResourceKey()); if (resourceKey != null) { currentStateOutput.setPendingState(resourceGroupName, resourceKey, instanceName, message.getToState()); } else { // log } } } for (LiveInstance instance : liveInstances.values()) { String instanceName = instance.getInstanceName(); String clientSessionId = instance.getSessionId(); Map<String, CurrentState> currentStateMap = cache.getCurrentState(instanceName, clientSessionId); for (CurrentState currentState : currentStateMap.values()) { if (!instance.getSessionId().equals(currentState.getSessionId())) { continue; } String resourceGroupName = currentState.getResourceGroupName(); String stateModelDefName = currentState.getStateModelDefRef(); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); if (resourceGroup == null) { continue; } if (stateModelDefName != null) { currentStateOutput.setResourceGroupStateModelDef(resourceGroupName, stateModelDefName); } Map<String, String> resourceKeyStateMap = currentState .getResourceKeyStateMap(); for (String resourceKeyStr : resourceKeyStateMap.keySet()) { ResourceKey resourceKey = resourceGroup .getResourceKey(resourceKeyStr); if (resourceKey != null) { currentStateOutput.setCurrentState(resourceGroupName, resourceKey, instanceName, currentState.getState(resourceKeyStr)); } else { // log } } } } event.addAttribute(AttributeName.CURRENT_STATE.toString(), currentStateOutput); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); CurrentStateOutput currentStateOutput = new CurrentStateOutput(); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); for (LiveInstance instance : liveInstances.values()) { String instanceName = instance.getInstanceName(); List<Message> instanceMessages; instanceMessages = cache.getMessages(instanceName); for (Message message : instanceMessages) { if (!MessageType.STATE_TRANSITION.toString().equalsIgnoreCase( message.getMsgType())) { continue; } if (!instance.getSessionId().equals(message.getTgtSessionId())) { continue; } String resourceGroupName = message.getResourceGroupName(); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); if (resourceGroup == null) { continue; } ResourceKey resourceKey = resourceGroup.getResourceKey(message .getResourceKey()); if (resourceKey != null) { currentStateOutput.setPendingState(resourceGroupName, resourceKey, instanceName, message.getToState()); } else { // log } } } for (LiveInstance instance : liveInstances.values()) { String instanceName = instance.getInstanceName(); String clientSessionId = instance.getSessionId(); Map<String, CurrentState> currentStateMap = cache.getCurrentState(instanceName, clientSessionId); for (CurrentState currentState : currentStateMap.values()) { if (!instance.getSessionId().equals(currentState.getSessionId())) { continue; } String resourceGroupName = currentState.getResourceGroupName(); String stateModelDefName = currentState.getStateModelDefRef(); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); if (resourceGroup == null) { continue; } if (stateModelDefName != null) { currentStateOutput.setResourceGroupStateModelDef(resourceGroupName, stateModelDefName); } Map<String, String> resourceKeyStateMap = currentState .getResourceKeyStateMap(); for (String resourceKeyStr : resourceKeyStateMap.keySet()) { ResourceKey resourceKey = resourceGroup .getResourceKey(resourceKeyStr); if (resourceKey != null) { currentStateOutput.setCurrentState(resourceGroupName, resourceKey, instanceName, currentState.getState(resourceKeyStr)); } else { // log } } } } event.addAttribute(AttributeName.CURRENT_STATE.toString(), currentStateOutput); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void scheduleSingleJob(String jobResource, JobConfig jobConfig) { HelixAdmin admin = _manager.getClusterManagmentTool(); IdealState jobIS = admin.getResourceIdealState(_manager.getClusterName(), jobResource); if (jobIS != null) { LOG.info("Job " + jobResource + " idealstate already exists!"); return; } // Set up job resource based on partitions from target resource int numIndependentTasks = jobConfig.getTaskConfigMap().size(); int numPartitions = numIndependentTasks; if (numPartitions == 0) { IdealState targetIs = admin.getResourceIdealState(_manager.getClusterName(), jobConfig.getTargetResource()); if (targetIs == null) { LOG.warn("Target resource does not exist for job " + jobResource); // do not need to fail here, the job will be marked as failure immediately when job starts running. } else { numPartitions = targetIs.getPartitionSet().size(); } } admin.addResource(_manager.getClusterName(), jobResource, numPartitions, TaskConstants.STATE_MODEL_NAME); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); // Set the job configuration PropertyKey.Builder keyBuilder = accessor.keyBuilder(); HelixProperty resourceConfig = new HelixProperty(jobResource); resourceConfig.getRecord().getSimpleFields().putAll(jobConfig.getResourceConfigMap()); Map<String, TaskConfig> taskConfigMap = jobConfig.getTaskConfigMap(); if (taskConfigMap != null) { for (TaskConfig taskConfig : taskConfigMap.values()) { resourceConfig.getRecord().setMapField(taskConfig.getId(), taskConfig.getConfigMap()); } } accessor.setProperty(keyBuilder.resourceConfig(jobResource), resourceConfig); // Push out new ideal state based on number of target partitions CustomModeISBuilder builder = new CustomModeISBuilder(jobResource); builder.setRebalancerMode(IdealState.RebalanceMode.TASK); builder.setNumReplica(1); builder.setNumPartitions(numPartitions); builder.setStateModel(TaskConstants.STATE_MODEL_NAME); if (jobConfig.isDisableExternalView()) { builder.setDisableExternalView(true); } jobIS = builder.build(); for (int i = 0; i < numPartitions; i++) { jobIS.getRecord().setListField(jobResource + "_" + i, new ArrayList<String>()); jobIS.getRecord().setMapField(jobResource + "_" + i, new HashMap<String, String>()); } jobIS.setRebalancerClassName(JobRebalancer.class.getName()); admin.setResourceIdealState(_manager.getClusterName(), jobResource, jobIS); }
#vulnerable code private void scheduleSingleJob(String jobResource, JobConfig jobConfig) { HelixAdmin admin = _manager.getClusterManagmentTool(); IdealState jobIS = admin.getResourceIdealState(_manager.getClusterName(), jobResource); if (jobIS != null) { LOG.info("Job " + jobResource + " idealstate already exists!"); return; } // Set up job resource based on partitions from target resource int numIndependentTasks = jobConfig.getTaskConfigMap().size(); int numPartitions = (numIndependentTasks > 0) ? numIndependentTasks : admin.getResourceIdealState(_manager.getClusterName(), jobConfig.getTargetResource()) .getPartitionSet().size(); admin.addResource(_manager.getClusterName(), jobResource, numPartitions, TaskConstants.STATE_MODEL_NAME); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); // Set the job configuration PropertyKey.Builder keyBuilder = accessor.keyBuilder(); HelixProperty resourceConfig = new HelixProperty(jobResource); resourceConfig.getRecord().getSimpleFields().putAll(jobConfig.getResourceConfigMap()); Map<String, TaskConfig> taskConfigMap = jobConfig.getTaskConfigMap(); if (taskConfigMap != null) { for (TaskConfig taskConfig : taskConfigMap.values()) { resourceConfig.getRecord().setMapField(taskConfig.getId(), taskConfig.getConfigMap()); } } accessor.setProperty(keyBuilder.resourceConfig(jobResource), resourceConfig); // Push out new ideal state based on number of target partitions CustomModeISBuilder builder = new CustomModeISBuilder(jobResource); builder.setRebalancerMode(IdealState.RebalanceMode.TASK); builder.setNumReplica(1); builder.setNumPartitions(numPartitions); builder.setStateModel(TaskConstants.STATE_MODEL_NAME); if (jobConfig.isDisableExternalView()) { builder.setDisableExternalView(true); } jobIS = builder.build(); for (int i = 0; i < numPartitions; i++) { jobIS.getRecord().setListField(jobResource + "_" + i, new ArrayList<String>()); jobIS.getRecord().setMapField(jobResource + "_" + i, new HashMap<String, String>()); } jobIS.setRebalancerClassName(JobRebalancer.class.getName()); admin.setResourceIdealState(_manager.getClusterName(), jobResource, jobIS); } #location 15 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static int numberOfListeners(String zkAddr, String path) throws Exception { Map<String, Set<String>> listenerMap = getListenersByZkPath(zkAddr); if (listenerMap.containsKey(path)) { return listenerMap.get(path).size(); } return 0; }
#vulnerable code public static int numberOfListeners(String zkAddr, String path) throws Exception { int count = 0; String splits[] = zkAddr.split(":"); Socket sock = new Socket(splits[0], Integer.parseInt(splits[1])); PrintWriter out = new PrintWriter(sock.getOutputStream(), true); BufferedReader in = new BufferedReader(new InputStreamReader(sock.getInputStream())); out.println("wchp"); String line = in.readLine(); while (line != null) { // System.out.println(line); if (line.equals(path)) { // System.out.println("match: " + line); String nextLine = in.readLine(); if (nextLine == null) { break; } // System.out.println(nextLine); while (nextLine.startsWith("\t0x")) { count++; nextLine = in.readLine(); if (nextLine == null) { break; } } } line = in.readLine(); } sock.close(); return count; } #location 38 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void disconnect() { if (!isConnected()) { logger.error("ClusterManager " + _instanceName + " already disconnected"); return; } disconnectInternal(); }
#vulnerable code @Override public void disconnect() { if (!isConnected()) { logger.warn("ClusterManager " + _instanceName + " already disconnected"); return; } logger.info("disconnect " + _instanceName + "(" + _instanceType + ") from " + _clusterName); /** * shutdown thread pool first to avoid reset() being invoked in the middle of state * transition */ _messagingService.getExecutor().shutDown(); resetHandlers(); _helixAccessor.shutdown(); if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(); } if (_participantHealthCheckInfoCollector != null) { _participantHealthCheckInfoCollector.stop(); } if (_timer != null) { _timer.cancel(); _timer = null; } if (_instanceType == InstanceType.CONTROLLER) { stopTimerTasks(); } // unsubscribe accessor from controllerChange _zkClient.unsubscribeAll(); _zkClient.close(); // HACK seems that zkClient is not sending DISCONNECT event _zkStateChangeListener.disconnect(); logger.info("Cluster manager: " + _instanceName + " disconnected"); } #location 30 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void handleChildChange(String parentPath, List<String> currentChilds) { if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) { return; } // Resubscribe _zkClientForRoutingDataListener.unsubscribeAll(); _zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this); resetZkResources(); }
#vulnerable code @Override public void handleChildChange(String parentPath, List<String> currentChilds) { if (_zkClientForListener == null || _zkClientForListener.isClosed()) { return; } // Resubscribe _zkClientForListener.unsubscribeAll(); _zkClientForListener.subscribeRoutingDataChanges(this, this); resetZkResources(); } #location 8 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code protected static Set<String> getExpiredJobs(HelixDataAccessor dataAccessor, HelixPropertyStore propertyStore, WorkflowConfig workflowConfig, WorkflowContext workflowContext) { Set<String> expiredJobs = new HashSet<String>(); if (workflowContext != null) { Map<String, TaskState> jobStates = workflowContext.getJobStates(); for (String job : workflowConfig.getJobDag().getAllNodes()) { JobConfig jobConfig = TaskUtil.getJobConfig(dataAccessor, job); JobContext jobContext = TaskUtil.getJobContext(propertyStore, job); if (jobConfig == null) { LOG.error(String.format("Job %s exists in JobDAG but JobConfig is missing!", job)); continue; } long expiry = jobConfig.getExpiry(); if (expiry == workflowConfig.DEFAULT_EXPIRY || expiry < 0) { expiry = workflowConfig.getExpiry(); } if (jobContext != null && jobStates.get(job) == TaskState.COMPLETED) { if (System.currentTimeMillis() >= jobContext.getFinishTime() + expiry) { expiredJobs.add(job); } } } } return expiredJobs; }
#vulnerable code protected static Set<String> getExpiredJobs(HelixDataAccessor dataAccessor, HelixPropertyStore propertyStore, WorkflowConfig workflowConfig, WorkflowContext workflowContext) { Set<String> expiredJobs = new HashSet<String>(); if (workflowContext != null) { Map<String, TaskState> jobStates = workflowContext.getJobStates(); for (String job : workflowConfig.getJobDag().getAllNodes()) { JobConfig jobConfig = TaskUtil.getJobConfig(dataAccessor, job); JobContext jobContext = TaskUtil.getJobContext(propertyStore, job); long expiry = jobConfig.getExpiry(); if (expiry == workflowConfig.DEFAULT_EXPIRY || expiry < 0) { expiry = workflowConfig.getExpiry(); } if (jobContext != null && jobStates.get(job) == TaskState.COMPLETED) { if (System.currentTimeMillis() >= jobContext.getFinishTime() + expiry) { expiredJobs.add(job); } } } } return expiredJobs; } #location 11 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override @PreFetch(enabled = false) public void onIdealStateChange(List<IdealState> idealStates, NotificationContext changeContext) { logger.info( "START: Generic GenericClusterController.onIdealStateChange() for cluster " + _clusterName); if (changeContext == null || changeContext.getType() != Type.CALLBACK) { _cache.requireFullRefresh(); _taskCache.requireFullRefresh(); } else { _cache.updateDataChange(ChangeType.IDEAL_STATE); _taskCache.updateDataChange(ChangeType.IDEAL_STATE); } ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.IdealStateChange); event.addAttribute(AttributeName.helixmanager.name(), changeContext.getManager()); event.addAttribute(AttributeName.changeContext.name(), changeContext); _eventQueue.put(event); _taskEventQueue.put(event.clone()); if (changeContext.getType() != Type.FINALIZE) { checkRebalancingTimer(changeContext.getManager(), idealStates, _cache.getClusterConfig()); } logger.info("END: GenericClusterController.onIdealStateChange() for cluster " + _clusterName); }
#vulnerable code @Override @PreFetch(enabled = false) public void onIdealStateChange(List<IdealState> idealStates, NotificationContext changeContext) { logger.info( "START: Generic GenericClusterController.onIdealStateChange() for cluster " + _clusterName); if (changeContext == null || changeContext.getType() != Type.CALLBACK) { _cache.requireFullRefresh(); } else { _cache.updateDataChange(ChangeType.IDEAL_STATE); } ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.IdealStateChange); event.addAttribute(AttributeName.helixmanager.name(), changeContext.getManager()); event.addAttribute(AttributeName.changeContext.name(), changeContext); _eventQueue.put(event); if (changeContext.getType() != Type.FINALIZE) { checkRebalancingTimer(changeContext.getManager(), idealStates, _cache.getClusterConfig()); } logger.info("END: GenericClusterController.onIdealStateChange() for cluster " + _clusterName); } #location 19 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onControllerChange(NotificationContext changeContext) { logger.info("START: GenericClusterController.onControllerChange() for cluster " + _clusterName); _cache.requireFullRefresh(); if (changeContext != null && changeContext.getType() == Type.FINALIZE) { logger.info("GenericClusterController.onControllerChange() FINALIZE for cluster " + _clusterName); return; } HelixDataAccessor accessor = changeContext.getManager().getHelixDataAccessor(); // double check if this controller is the leader Builder keyBuilder = accessor.keyBuilder(); LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader == null) { logger .warn("No controller exists for cluster:" + changeContext.getManager().getClusterName()); return; } else { String leaderName = leader.getInstanceName(); String instanceName = changeContext.getManager().getInstanceName(); if (leaderName == null || !leaderName.equals(instanceName)) { logger.warn("leader name does NOT match, my name: " + instanceName + ", leader: " + leader); return; } } PauseSignal pauseSignal = accessor.getProperty(keyBuilder.pause()); if (pauseSignal != null) { if (!_paused) { _paused = true; logger.info("controller is now paused"); } } else { if (_paused) { _paused = false; logger.info("controller is now resumed"); ClusterEvent event = new ClusterEvent("resume"); event.addAttribute("changeContext", changeContext); event.addAttribute("helixmanager", changeContext.getManager()); event.addAttribute("eventData", pauseSignal); _eventQueue.put(event); } } if (_clusterStatusMonitor == null) { _clusterStatusMonitor = new ClusterStatusMonitor(changeContext.getManager().getClusterName()); } _clusterStatusMonitor.setEnabled(!_paused); logger.info("END: GenericClusterController.onControllerChange() for cluster " + _clusterName); }
#vulnerable code @Override public void onControllerChange(NotificationContext changeContext) { logger.info("START: GenericClusterController.onControllerChange() for cluster " + _clusterName); _cache.requireFullRefresh(); if (changeContext != null && changeContext.getType() == Type.FINALIZE) { logger.info("GenericClusterController.onControllerChange() FINALIZE for cluster " + _clusterName); return; } HelixDataAccessor accessor = changeContext.getManager().getHelixDataAccessor(); // double check if this controller is the leader Builder keyBuilder = accessor.keyBuilder(); LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader == null) { logger .warn("No controller exists for cluster:" + changeContext.getManager().getClusterName()); return; } else { String leaderName = leader.getInstanceName(); String instanceName = changeContext.getManager().getInstanceName(); if (leaderName == null || !leaderName.equals(instanceName)) { logger.warn("leader name does NOT match, my name: " + instanceName + ", leader: " + leader); return; } } PauseSignal pauseSignal = accessor.getProperty(keyBuilder.pause()); if (pauseSignal != null) { if (!_paused) { _paused = true; logger.info("controller is now paused"); } } else { if (_paused) { _paused = false; logger.info("controller is now resumed"); ClusterEvent event = new ClusterEvent("resume"); event.addAttribute("changeContext", changeContext); event.addAttribute("helixmanager", changeContext.getManager()); event.addAttribute("eventData", pauseSignal); _eventQueue.put(event); } } if (_clusterStatusMonitor == null) { _clusterStatusMonitor = new ClusterStatusMonitor(changeContext.getManager().getClusterName()); } List<IdealState> idealStates = Collections.emptyList(); if (_cache.getIdealStates() != null) { idealStates = new ArrayList<>(_cache.getIdealStates().values()); } checkRebalancingTimer(changeContext.getManager(), idealStates, _cache.getClusterConfig()); _clusterStatusMonitor.setEnabled(!_paused); logger.info("END: GenericClusterController.onControllerChange() for cluster " + _clusterName); } #location 52 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); MessageGenerationOutput messageGenOutput = event .getAttribute(AttributeName.MESSAGES_ALL.toString()); if (cache == null || resourceGroupMap == null || messageGenOutput == null) { throw new StageException("Missing attributes in event:" + event + ". Requires DataCache|RESOURCE_GROUPS|MESSAGES_ALL"); } MessageSelectionStageOutput output = new MessageSelectionStageOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { List<Message> messages = messageGenOutput.getMessages( resourceGroupName, resource); List<Message> selectedMessages = selectMessages(messages, stateModelDef); output.addMessages(resourceGroupName, resource, selectedMessages); } } event.addAttribute(AttributeName.MESSAGES_SELECTED.toString(), output); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { // ClusterManager manager = event.getAttribute("clustermanager"); // if (manager == null) // { // throw new StageException("ClusterManager attribute value is null"); // } ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); MessageGenerationOutput messageGenOutput = event .getAttribute(AttributeName.MESSAGES_ALL.toString()); MessageSelectionStageOutput output = new MessageSelectionStageOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { List<Message> messages = messageGenOutput.getMessages( resourceGroupName, resource); List<Message> selectedMessages = selectMessages(messages, stateModelDef); output.addMessages(resourceGroupName, resource, selectedMessages); } } event.addAttribute(AttributeName.MESSAGES_SELECTED.toString(), output); } #location 16 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public int send(final Criteria recipientCriteria, final Message message, AsyncCallback callbackOnReply) { Map<InstanceType, List<Message>> generateMessage = generateMessage( recipientCriteria, message); int totalMessageCount = 0; String correlationId = null; if (callbackOnReply != null) { correlationId = UUID.randomUUID().toString(); for (List<Message> messages : generateMessage.values()) { totalMessageCount += messages.size(); callbackOnReply.setMessagesSent(messages); } _asyncCallbackService.registerAsyncCallback(correlationId, callbackOnReply); } for (InstanceType receiverType : generateMessage.keySet()) { List<Message> list = generateMessage.get(receiverType); for (Message tempMessage : list) { if(correlationId != null) { tempMessage.setCorrelationId(correlationId); } if (receiverType == InstanceType.CONTROLLER) { _manager.getDataAccessor().setControllerProperty(ControllerPropertyType.MESSAGES, tempMessage.getRecord(), CreateMode.PERSISTENT); } if (receiverType == InstanceType.PARTICIPANT) { _manager.getDataAccessor().setInstanceProperty(message.getTgtName(), InstancePropertyType.MESSAGES, tempMessage.getId(), tempMessage.getRecord()); } } } return totalMessageCount; }
#vulnerable code @Override public int send(final Criteria recipientCriteria, final Message message, AsyncCallback callbackOnReply) { Map<InstanceType, List<Message>> generateMessage = generateMessage( recipientCriteria, message); int totalMessageCount = 0; String correlationId = null; if (callbackOnReply != null) { correlationId = UUID.randomUUID().toString(); for (List<Message> messages : generateMessage.values()) { totalMessageCount += messages.size(); callbackOnReply.setMessagesSent(messages); } _asyncCallbackService.registerAsyncCallback(correlationId, callbackOnReply); } for (InstanceType receiverType : generateMessage.keySet()) { List<Message> list = generateMessage.get(receiverType); for (Message tempMessage : list) { tempMessage.setId(UUID.randomUUID().toString()); if(correlationId != null) { tempMessage.setCorrelationId(correlationId); } if (receiverType == InstanceType.CONTROLLER) { _dataAccessor.setControllerProperty(ControllerPropertyType.MESSAGES, tempMessage.getRecord(), CreateMode.PERSISTENT); } if (receiverType == InstanceType.PARTICIPANT) { _dataAccessor.setInstanceProperty(message.getTgtName(), InstancePropertyType.MESSAGES, tempMessage.getId(), tempMessage.getRecord()); } } } return totalMessageCount; } #location 26 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test() public void TestSchedulerZeroMsg() throws Exception { TestMessagingHandlerFactory factory = new TestMessagingHandlerFactory(); HelixManager manager = null; for (int i = 0; i < NODE_NR; i++) { String hostDest = "localhost_" + (START_PORT + i); _startCMResultMap.get(hostDest)._manager.getMessagingService() .registerMessageHandlerFactory(factory.getMessageType(), factory); manager = _startCMResultMap.get(hostDest)._manager; } Message schedulerMessage = new Message(MessageType.SCHEDULER_MSG + "", UUID .randomUUID().toString()); schedulerMessage.setTgtSessionId("*"); schedulerMessage.setTgtName("CONTROLLER"); // TODO: change it to "ADMIN" ? schedulerMessage.setSrcName("CONTROLLER"); // Template for the individual message sent to each participant Message msg = new Message(factory.getMessageType(), "Template"); msg.setTgtSessionId("*"); msg.setMsgState(MessageState.NEW); // Criteria to send individual messages Criteria cr = new Criteria(); cr.setInstanceName("localhost_DOESNOTEXIST"); cr.setRecipientInstanceType(InstanceType.PARTICIPANT); cr.setSessionSpecific(false); cr.setResource("%"); cr.setPartition("%"); ObjectMapper mapper = new ObjectMapper(); SerializationConfig serializationConfig = mapper.getSerializationConfig(); serializationConfig.set(SerializationConfig.Feature.INDENT_OUTPUT, true); StringWriter sw = new StringWriter(); mapper.writeValue(sw, cr); String crString = sw.toString(); schedulerMessage.getRecord().setSimpleField("Criteria", crString); schedulerMessage.getRecord().setMapField("MessageTemplate", msg.getRecord().getSimpleFields()); schedulerMessage.getRecord().setSimpleField("TIMEOUT", "-1"); HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); PropertyKey controllerMessageKey = keyBuilder .controllerMessage(schedulerMessage.getMsgId()); helixDataAccessor.setProperty(controllerMessageKey, schedulerMessage); Thread.sleep(3000); Assert.assertEquals(0, factory._results.size()); PropertyKey controllerTaskStatus = keyBuilder.controllerTaskStatus( MessageType.SCHEDULER_MSG.toString(), schedulerMessage.getMsgId()); for(int i = 0; i< 10; i++) { StatusUpdate update = helixDataAccessor.getProperty(controllerTaskStatus); if(update == null || update.getRecord().getMapField("SentMessageCount") == null) { Thread.sleep(1000); } } ZNRecord statusUpdate = helixDataAccessor.getProperty(controllerTaskStatus) .getRecord(); Assert.assertTrue(statusUpdate.getMapField("SentMessageCount") .get("MessageCount").equals("0")); int count = 0; for (Set<String> val : factory._results.values()) { count += val.size(); } Assert.assertEquals(count, 0); }
#vulnerable code @Test() public void TestSchedulerZeroMsg() throws Exception { TestMessagingHandlerFactory factory = new TestMessagingHandlerFactory(); HelixManager manager = null; for (int i = 0; i < NODE_NR; i++) { String hostDest = "localhost_" + (START_PORT + i); _startCMResultMap.get(hostDest)._manager.getMessagingService() .registerMessageHandlerFactory(factory.getMessageType(), factory); manager = _startCMResultMap.get(hostDest)._manager; } Message schedulerMessage = new Message(MessageType.SCHEDULER_MSG + "", UUID .randomUUID().toString()); schedulerMessage.setTgtSessionId("*"); schedulerMessage.setTgtName("CONTROLLER"); // TODO: change it to "ADMIN" ? schedulerMessage.setSrcName("CONTROLLER"); // Template for the individual message sent to each participant Message msg = new Message(factory.getMessageType(), "Template"); msg.setTgtSessionId("*"); msg.setMsgState(MessageState.NEW); // Criteria to send individual messages Criteria cr = new Criteria(); cr.setInstanceName("localhost_DOESNOTEXIST"); cr.setRecipientInstanceType(InstanceType.PARTICIPANT); cr.setSessionSpecific(false); cr.setResource("%"); cr.setPartition("%"); ObjectMapper mapper = new ObjectMapper(); SerializationConfig serializationConfig = mapper.getSerializationConfig(); serializationConfig.set(SerializationConfig.Feature.INDENT_OUTPUT, true); StringWriter sw = new StringWriter(); mapper.writeValue(sw, cr); String crString = sw.toString(); schedulerMessage.getRecord().setSimpleField("Criteria", crString); schedulerMessage.getRecord().setMapField("MessageTemplate", msg.getRecord().getSimpleFields()); schedulerMessage.getRecord().setSimpleField("TIMEOUT", "-1"); HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); PropertyKey controllerMessageKey = keyBuilder .controllerMessage(schedulerMessage.getMsgId()); helixDataAccessor.setProperty(controllerMessageKey, schedulerMessage); Thread.sleep(3000); Assert.assertEquals(0, factory._results.size()); PropertyKey controllerTaskStatus = keyBuilder.controllerTaskStatus( MessageType.SCHEDULER_MSG.toString(), schedulerMessage.getMsgId()); for(int i = 0; i< 10; i++) { if(helixDataAccessor.getProperty(controllerTaskStatus) == null) { Thread.sleep(1000); } } ZNRecord statusUpdate = helixDataAccessor.getProperty(controllerTaskStatus) .getRecord(); Assert.assertTrue(statusUpdate.getMapField("SentMessageCount") .get("MessageCount").equals("0")); int count = 0; for (Set<String> val : factory._results.values()) { count += val.size(); } Assert.assertEquals(count, 0); } #location 67 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void shutdown() throws InterruptedException { stopRebalancingTimer(); terminateEventThread(_eventThread); terminateEventThread(_taskEventThread); _asyncTasksThreadPool.shutdown(); }
#vulnerable code public void shutdown() throws InterruptedException { stopRebalancingTimer(); while (_eventThread.isAlive()) { _eventThread.interrupt(); _eventThread.join(EVENT_THREAD_JOIN_TIMEOUT); } _asyncTasksThreadPool.shutdown(); } #location 2 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void scheduleSingleJob(String jobResource, JobConfig jobConfig) { HelixAdmin admin = _manager.getClusterManagmentTool(); IdealState jobIS = admin.getResourceIdealState(_manager.getClusterName(), jobResource); if (jobIS != null) { LOG.info("Job " + jobResource + " idealstate already exists!"); return; } // Set up job resource based on partitions from target resource int numIndependentTasks = jobConfig.getTaskConfigMap().size(); int numPartitions = numIndependentTasks; if (numPartitions == 0) { IdealState targetIs = admin.getResourceIdealState(_manager.getClusterName(), jobConfig.getTargetResource()); if (targetIs == null) { LOG.warn("Target resource does not exist for job " + jobResource); // do not need to fail here, the job will be marked as failure immediately when job starts running. } else { numPartitions = targetIs.getPartitionSet().size(); } } admin.addResource(_manager.getClusterName(), jobResource, numPartitions, TaskConstants.STATE_MODEL_NAME); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); // Set the job configuration PropertyKey.Builder keyBuilder = accessor.keyBuilder(); HelixProperty resourceConfig = new HelixProperty(jobResource); resourceConfig.getRecord().getSimpleFields().putAll(jobConfig.getResourceConfigMap()); Map<String, TaskConfig> taskConfigMap = jobConfig.getTaskConfigMap(); if (taskConfigMap != null) { for (TaskConfig taskConfig : taskConfigMap.values()) { resourceConfig.getRecord().setMapField(taskConfig.getId(), taskConfig.getConfigMap()); } } accessor.setProperty(keyBuilder.resourceConfig(jobResource), resourceConfig); // Push out new ideal state based on number of target partitions CustomModeISBuilder builder = new CustomModeISBuilder(jobResource); builder.setRebalancerMode(IdealState.RebalanceMode.TASK); builder.setNumReplica(1); builder.setNumPartitions(numPartitions); builder.setStateModel(TaskConstants.STATE_MODEL_NAME); if (jobConfig.isDisableExternalView()) { builder.disableExternalView(); } jobIS = builder.build(); for (int i = 0; i < numPartitions; i++) { jobIS.getRecord().setListField(jobResource + "_" + i, new ArrayList<String>()); jobIS.getRecord().setMapField(jobResource + "_" + i, new HashMap<String, String>()); } jobIS.setRebalancerClassName(JobRebalancer.class.getName()); admin.setResourceIdealState(_manager.getClusterName(), jobResource, jobIS); }
#vulnerable code private void scheduleSingleJob(String jobResource, JobConfig jobConfig) { HelixAdmin admin = _manager.getClusterManagmentTool(); IdealState jobIS = admin.getResourceIdealState(_manager.getClusterName(), jobResource); if (jobIS != null) { LOG.info("Job " + jobResource + " idealstate already exists!"); return; } // Set up job resource based on partitions from target resource int numIndependentTasks = jobConfig.getTaskConfigMap().size(); int numPartitions = (numIndependentTasks > 0) ? numIndependentTasks : admin.getResourceIdealState(_manager.getClusterName(), jobConfig.getTargetResource()) .getPartitionSet().size(); admin.addResource(_manager.getClusterName(), jobResource, numPartitions, TaskConstants.STATE_MODEL_NAME); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); // Set the job configuration PropertyKey.Builder keyBuilder = accessor.keyBuilder(); HelixProperty resourceConfig = new HelixProperty(jobResource); resourceConfig.getRecord().getSimpleFields().putAll(jobConfig.getResourceConfigMap()); Map<String, TaskConfig> taskConfigMap = jobConfig.getTaskConfigMap(); if (taskConfigMap != null) { for (TaskConfig taskConfig : taskConfigMap.values()) { resourceConfig.getRecord().setMapField(taskConfig.getId(), taskConfig.getConfigMap()); } } accessor.setProperty(keyBuilder.resourceConfig(jobResource), resourceConfig); // Push out new ideal state based on number of target partitions CustomModeISBuilder builder = new CustomModeISBuilder(jobResource); builder.setRebalancerMode(IdealState.RebalanceMode.TASK); builder.setNumReplica(1); builder.setNumPartitions(numPartitions); builder.setStateModel(TaskConstants.STATE_MODEL_NAME); if (jobConfig.isDisableExternalView()) { builder.disableExternalView(); } jobIS = builder.build(); for (int i = 0; i < numPartitions; i++) { jobIS.getRecord().setListField(jobResource + "_" + i, new ArrayList<String>()); jobIS.getRecord().setMapField(jobResource + "_" + i, new HashMap<String, String>()); } jobIS.setRebalancerClassName(JobRebalancer.class.getName()); admin.setResourceIdealState(_manager.getClusterName(), jobResource, jobIS); } #location 15 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code void sendData() { ConcurrentHashMap<String, ZNRecordUpdate> updateCache = null; synchronized(_dataBufferRef) { updateCache = _dataBufferRef.getAndSet(new ConcurrentHashMap<String, ZNRecordUpdate>()); } if(updateCache != null) { List<String> paths = new ArrayList<String>(); List<DataUpdater<ZNRecord>> updaters = new ArrayList<DataUpdater<ZNRecord>>(); List<ZNRecord> vals = new ArrayList<ZNRecord>(); for(ZNRecordUpdate holder : updateCache.values()) { paths.add(holder.getPath()); updaters.add(holder.getZNRecordUpdater()); vals.add(holder.getRecord()); } // Batch write the accumulated updates into zookeeper if(paths.size() > 0) { _accessor.updateChildren(paths, updaters, BaseDataAccessor.Option.PERSISTENT); } LOG.info("Updating " + vals.size() + " records"); } else { LOG.warn("null _dataQueueRef. Should be in the beginning only"); } }
#vulnerable code void enqueueData(ZNRecordUpdate e) { if(!_initialized || _shutdownFlag) { LOG.error("inited:" + _initialized + " shutdownFlag:"+_shutdownFlag+" , return"); return; } // Do local merge if receive multiple update on the same path synchronized(_dataBufferRef) { if(_dataBufferRef.get().containsKey(e.getPath())) { ZNRecord oldVal = _dataBufferRef.get().get(e.getPath()).getRecord(); oldVal = e.getZNRecordUpdater().update(oldVal); _dataBufferRef.get().get(e.getPath())._record = oldVal; } else { _dataBufferRef.get().put(e.getPath(), e); } } if(_dataBufferRef.get().size() > MAX_UPDATE_LIMIT) { sendData(); } } #location 24 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testDeletingRecurrentQueueWithHistory() throws Exception { final String queueName = TestHelper.getTestMethodName(); // Create a queue LOG.info("Starting job-queue: " + queueName); JobQueue.Builder queueBuild = TaskTestUtil.buildRecurrentJobQueue(queueName, 0, 60, TargetState.STOP); createAndEnqueueJob(queueBuild, 2); _driver.createQueue(queueBuild.build()); WorkflowConfig workflowConfig = _driver.getWorkflowConfig(queueName); Assert.assertEquals(workflowConfig.getTargetState(), TargetState.STOP); _driver.resume(queueName); WorkflowContext wCtx; // wait until at least 2 workflows are scheduled based on template queue do { Thread.sleep(60000); wCtx = TaskTestUtil.pollForWorkflowContext(_driver, queueName); } while (wCtx.getScheduledWorkflows().size() < 2); // Stop recurring workflow _driver.stop(queueName); _driver.pollForWorkflowState(queueName, TaskState.STOPPED); // Record all scheduled workflows wCtx = TaskTestUtil.pollForWorkflowContext(_driver, queueName); List<String> scheduledWorkflows = new ArrayList<String>(wCtx.getScheduledWorkflows()); final String lastScheduledWorkflow = wCtx.getLastScheduledSingleWorkflow(); // Delete recurrent workflow _driver.delete(queueName); // Wait until recurrent workflow and the last scheduled workflow are cleaned up boolean result = TestHelper.verify(new TestHelper.Verifier() { @Override public boolean verify() throws Exception { WorkflowContext wCtx = _driver.getWorkflowContext(queueName); WorkflowContext lastWfCtx = _driver.getWorkflowContext(lastScheduledWorkflow); return (wCtx == null && lastWfCtx == null); } }, 5 * 1000); Assert.assertTrue(result); for (String scheduledWorkflow : scheduledWorkflows) { WorkflowContext scheduledWorkflowCtx = _driver.getWorkflowContext(scheduledWorkflow); WorkflowConfig scheduledWorkflowCfg = _driver.getWorkflowConfig(scheduledWorkflow); Assert.assertNull(scheduledWorkflowCtx); Assert.assertNull(scheduledWorkflowCfg); } }
#vulnerable code @Test public void testDeletingRecurrentQueueWithHistory() throws Exception { final String queueName = TestHelper.getTestMethodName(); int intervalSeconds = 3; // Create a queue LOG.info("Starting job-queue: " + queueName); JobQueue.Builder queueBuild = TaskTestUtil.buildRecurrentJobQueue(queueName, 0, 60, TargetState.STOP); createAndEnqueueJob(queueBuild, 2); _driver.createQueue(queueBuild.build()); WorkflowConfig workflowConfig = _driver.getWorkflowConfig(queueName); Assert.assertEquals(workflowConfig.getTargetState(), TargetState.STOP); // reset interval to a smaller number so as to accelerate test workflowConfig.putSimpleConfig(WorkflowConfig.WorkflowConfigProperty.RecurrenceInterval.name(), "" + intervalSeconds); _driver.updateWorkflow(queueName, workflowConfig); _driver.resume(queueName); WorkflowContext wCtx; // wait until at least 2 workflows are scheduled based on template queue do { Thread.sleep(intervalSeconds); wCtx = TaskTestUtil.pollForWorkflowContext(_driver, queueName); } while (wCtx.getScheduledWorkflows().size() < 2); // Stop recurring workflow _driver.stop(queueName); _driver.pollForWorkflowState(queueName, TaskState.STOPPED); // Record all scheduled workflows wCtx = TaskTestUtil.pollForWorkflowContext(_driver, queueName); List<String> scheduledWorkflows = new ArrayList<String>(wCtx.getScheduledWorkflows()); final String lastScheduledWorkflow = wCtx.getLastScheduledSingleWorkflow(); // Delete recurrent workflow _driver.delete(queueName); // Wait until recurrent workflow and the last scheduled workflow are cleaned up boolean result = TestHelper.verify(new TestHelper.Verifier() { @Override public boolean verify() throws Exception { WorkflowContext wCtx = _driver.getWorkflowContext(queueName); WorkflowContext lastWfCtx = _driver.getWorkflowContext(lastScheduledWorkflow); return (wCtx == null && lastWfCtx == null); } }, 5 * 1000); Assert.assertTrue(result); for (String scheduledWorkflow : scheduledWorkflows) { WorkflowContext scheduledWorkflowCtx = _driver.getWorkflowContext(scheduledWorkflow); WorkflowConfig scheduledWorkflowCfg = _driver.getWorkflowConfig(scheduledWorkflow); Assert.assertNull(scheduledWorkflowCtx); Assert.assertNull(scheduledWorkflowCfg); } } #location 14 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void disconnect() { if (_zkclient == null || _zkclient.isClosed()) { LOG.info("instanceName: " + _instanceName + " already disconnected"); return; } LOG.info("disconnect " + _instanceName + "(" + _instanceType + ") from " + _clusterName); try { /** * stop all timer tasks */ stopTimerTasks(); /** * shutdown thread pool first to avoid reset() being invoked in the middle of state * transition */ _messagingService.getExecutor().shutdown(); if (!cleanupCallbackHandlers()) { LOG.warn( "The callback handler cleanup has been cleanly done. " + "Some callback handlers might not be reset properly. " + "Continue to finish the other Helix Mananger disconnect tasks."); } } finally { GenericHelixController controller = _controller; if (controller != null) { try { controller.shutdown(); } catch (InterruptedException e) { LOG.info("Interrupted shutting down GenericHelixController", e); } } for (HelixCallbackMonitor callbackMonitor : _callbackMonitors.values()) { callbackMonitor.unregister(); } _helixPropertyStore = null; synchronized (this) { if (_controller != null) { _controller = null; _leaderElectionHandler = null; } if (_participantManager != null) { _participantManager = null; } if (_zkclient != null) { _zkclient.close(); } } _sessionStartTime = null; LOG.info("Cluster manager: " + _instanceName + " disconnected"); } }
#vulnerable code @Override public void disconnect() { if (_zkclient == null || _zkclient.isClosed()) { LOG.info("instanceName: " + _instanceName + " already disconnected"); return; } LOG.info("disconnect " + _instanceName + "(" + _instanceType + ") from " + _clusterName); try { /** * stop all timer tasks */ stopTimerTasks(); /** * shutdown thread pool first to avoid reset() being invoked in the middle of state * transition */ _messagingService.getExecutor().shutdown(); // TODO reset user defined handlers only // TODO Fix the issue that when connection disconnected, reset handlers will be blocked. -- JJ // This is because reset logic contains ZK operations. resetHandlers(true); if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(true); } } finally { GenericHelixController controller = _controller; if (controller != null) { try { controller.shutdown(); } catch (InterruptedException e) { LOG.info("Interrupted shutting down GenericHelixController", e); } } ParticipantManager participantManager = _participantManager; if (participantManager != null) { participantManager.disconnect(); } for (HelixCallbackMonitor callbackMonitor : _callbackMonitors.values()) { callbackMonitor.unregister(); } _helixPropertyStore = null; synchronized (this) { if (_controller != null) { _controller = null; _leaderElectionHandler = null; } if (_participantManager != null) { _participantManager = null; } if (_zkclient != null) { _zkclient.close(); } } _sessionStartTime = null; LOG.info("Cluster manager: " + _instanceName + " disconnected"); } } #location 28 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testZkClientMonitor() throws Exception { final String TEST_TAG = "test_monitor"; final String TEST_KEY = "test_key"; final String TEST_DATA = "testData"; final String TEST_ROOT = "/my_cluster/IDEALSTATES"; final String TEST_NODE = "/test_zkclient_monitor"; final String TEST_PATH = TEST_ROOT + TEST_NODE; ZkClient.Builder builder = new ZkClient.Builder(); builder.setZkServer(ZK_ADDR).setMonitorKey(TEST_KEY).setMonitorType(TEST_TAG) .setMonitorRootPathOnly(false); ZkClient zkClient = builder.build(); final long TEST_DATA_SIZE = zkClient.serialize(TEST_DATA, TEST_PATH).length; if (_zkClient.exists(TEST_PATH)) { _zkClient.delete(TEST_PATH); } if (!_zkClient.exists(TEST_ROOT)) { _zkClient.createPersistent(TEST_ROOT, true); } MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY); ObjectName rootname = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "Root"); ObjectName idealStatename = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "IdealStates"); Assert.assertTrue(beanServer.isRegistered(rootname)); Assert.assertTrue(beanServer.isRegistered(idealStatename)); // Test exists Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadLatencyGauge.Max"), 0); zkClient.exists(TEST_ROOT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadLatencyGauge.Max") >= 0); // Test create Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteLatencyGauge.Max"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteLatencyGauge.Max"), 0); zkClient.create(TEST_PATH, TEST_DATA, CreateMode.PERSISTENT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE); long origWriteTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"); Assert.assertTrue(origWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteLatencyGauge.Max") >= 0); long origIdealStatesWriteTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"); Assert.assertTrue(origIdealStatesWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteLatencyGauge.Max") >= 0); // Test read Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), 0); long origReadTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"); long origIdealStatesReadTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter"); Assert.assertEquals(origIdealStatesReadTotalLatencyCounter, 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadLatencyGauge.Max"), 0); zkClient.readData(TEST_PATH, new Stat()); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 2); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= origReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter") >= origIdealStatesReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadLatencyGauge.Max") >= 0); zkClient.getChildren(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 3); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.getStat(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 4); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 3); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.readDataAndStat(TEST_PATH, new Stat(), true); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 5); ZkAsyncCallbacks.ExistsCallbackHandler callbackHandler = new ZkAsyncCallbacks.ExistsCallbackHandler(); zkClient.asyncExists(TEST_PATH, callbackHandler); callbackHandler.waitForSuccess(); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 6); // Test write zkClient.writeData(TEST_PATH, TEST_DATA); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter") >= origWriteTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter") >= origIdealStatesWriteTotalLatencyCounter); // Test data change count final Lock lock = new ReentrantLock(); final Condition callbackFinish = lock.newCondition(); zkClient.subscribeDataChanges(TEST_PATH, new IZkDataListener() { @Override public void handleDataChange(String dataPath, Object data) throws Exception { } @Override public void handleDataDeleted(String dataPath) throws Exception { lock.lock(); try { callbackFinish.signal(); } finally { lock.unlock(); } } }); lock.lock(); _zkClient.delete(TEST_PATH); Assert.assertTrue(callbackFinish.await(10, TimeUnit.SECONDS)); Assert.assertEquals((long) beanServer.getAttribute(name, "DataChangeEventCounter"), 1); }
#vulnerable code @Test public void testZkClientMonitor() throws Exception { final String TEST_TAG = "test_monitor"; final String TEST_KEY = "test_key"; final String TEST_DATA = "testData"; final String TEST_ROOT = "/my_cluster/IDEALSTATES"; final String TEST_NODE = "/test_zkclient_monitor"; final String TEST_PATH = TEST_ROOT + TEST_NODE; ZkClient zkClient = new ZkClient(ZK_ADDR, TEST_TAG, TEST_KEY); final long TEST_DATA_SIZE = zkClient.serialize(TEST_DATA, TEST_PATH).length; if (_zkClient.exists(TEST_PATH)) { _zkClient.delete(TEST_PATH); } if (!_zkClient.exists(TEST_ROOT)) { _zkClient.createPersistent(TEST_ROOT, true); } MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY); ObjectName rootname = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "Root"); ObjectName idealStatename = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "IdealStates"); Assert.assertTrue(beanServer.isRegistered(rootname)); Assert.assertTrue(beanServer.isRegistered(idealStatename)); // Test exists Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadMaxLatencyGauge"), 0); zkClient.exists(TEST_ROOT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadMaxLatencyGauge") >= 0); // Test create Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteMaxLatencyGauge"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteMaxLatencyGauge"), 0); zkClient.create(TEST_PATH, TEST_DATA, CreateMode.PERSISTENT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE); long origWriteTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"); Assert.assertTrue(origWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteMaxLatencyGauge") >= 0); long origIdealStatesWriteTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"); Assert.assertTrue(origIdealStatesWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteMaxLatencyGauge") >= 0); // Test read Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), 0); long origReadTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"); long origIdealStatesReadTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter"); Assert.assertEquals(origIdealStatesReadTotalLatencyCounter, 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadMaxLatencyGauge"), 0); zkClient.readData(TEST_PATH, new Stat()); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 2); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= origReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter") >= origIdealStatesReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadMaxLatencyGauge") >= 0); zkClient.getChildren(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 3); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.getStat(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 4); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 3); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.readDataAndStat(TEST_PATH, new Stat(), true); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 5); ZkAsyncCallbacks.ExistsCallbackHandler callbackHandler = new ZkAsyncCallbacks.ExistsCallbackHandler(); zkClient.asyncExists(TEST_PATH, callbackHandler); callbackHandler.waitForSuccess(); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 6); // Test write zkClient.writeData(TEST_PATH, TEST_DATA); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter") >= origWriteTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter") >= origIdealStatesWriteTotalLatencyCounter); // Test data change count final Lock lock = new ReentrantLock(); final Condition callbackFinish = lock.newCondition(); zkClient.subscribeDataChanges(TEST_PATH, new IZkDataListener() { @Override public void handleDataChange(String dataPath, Object data) throws Exception { } @Override public void handleDataDeleted(String dataPath) throws Exception { lock.lock(); try { callbackFinish.signal(); } finally { lock.unlock(); } } }); lock.lock(); _zkClient.delete(TEST_PATH); Assert.assertTrue(callbackFinish.await(10, TimeUnit.SECONDS)); Assert.assertEquals((long) beanServer.getAttribute(name, "DataChangeEventCounter"), 1); } #location 56 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { ClusterManager manager = event.getAttribute("clustermanager"); ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); CurrentStateOutput currentStateOutput = event .getAttribute(AttributeName.CURRENT_STATE.toString()); BestPossibleStateOutput bestPossibleStateOutput = event .getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString()); if (manager == null || cache == null || resourceGroupMap == null || currentStateOutput == null || bestPossibleStateOutput == null) { throw new StageException("Missing attributes in event:" + event + ". Requires ClusterManager|DataCache|RESOURCE_GROUPS|CURRENT_STATE|BEST_POSSIBLE_STATE"); } Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); Map<String, String> sessionIdMap = new HashMap<String, String>(); for (LiveInstance liveInstance : liveInstances.values()) { sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId()); } MessageGenerationOutput output = new MessageGenerationOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { Map<String, String> instanceStateMap = bestPossibleStateOutput .getInstanceStateMap(resourceGroupName, resource); for (String instanceName : instanceStateMap.keySet()) { String desiredState = instanceStateMap.get(instanceName); String currentState = currentStateOutput.getCurrentState( resourceGroupName, resource, instanceName); if (currentState == null) { currentState = stateModelDef.getInitialState(); } String pendingState = currentStateOutput.getPendingState( resourceGroupName, resource, instanceName); String nextState; nextState = stateModelDef.getNextStateForTransition(currentState, desiredState); if (!desiredState.equalsIgnoreCase(currentState)) { if (nextState != null) { if (pendingState != null && nextState.equalsIgnoreCase(pendingState)) { if (logger.isDebugEnabled()) { logger.debug("Message already exists at" + instanceName + " to transition"+ resource.getResourceKeyName() +" from " + currentState + " to " + nextState ); } } else { Message message = createMessage(manager,resourceGroupName, resource.getResourceKeyName(), instanceName, currentState, nextState, sessionIdMap.get(instanceName), stateModelDef.getId()); output.addMessage(resourceGroupName, resource, message); } } else { logger .warn("Unable to find a next state from stateModelDefinition" + stateModelDef.getClass() + " from:" + currentState + " to:" + desiredState); } } } } } event.addAttribute(AttributeName.MESSAGES_ALL.toString(), output); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { ClusterManager manager = event.getAttribute("clustermanager"); if (manager == null) { throw new StageException("ClusterManager attribute value is null"); } ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); CurrentStateOutput currentStateOutput = event .getAttribute(AttributeName.CURRENT_STATE.toString()); BestPossibleStateOutput bestPossibleStateOutput = event .getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString()); Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); Map<String, String> sessionIdMap = new HashMap<String, String>(); for (LiveInstance liveInstance : liveInstances.values()) { sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId()); } MessageGenerationOutput output = new MessageGenerationOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { Map<String, String> instanceStateMap = bestPossibleStateOutput .getInstanceStateMap(resourceGroupName, resource); for (String instanceName : instanceStateMap.keySet()) { String desiredState = instanceStateMap.get(instanceName); String currentState = currentStateOutput.getCurrentState( resourceGroupName, resource, instanceName); if (currentState == null) { currentState = stateModelDef.getInitialState(); } String pendingState = currentStateOutput.getPendingState( resourceGroupName, resource, instanceName); String nextState; nextState = stateModelDef.getNextStateForTransition(currentState, desiredState); if (!desiredState.equalsIgnoreCase(currentState)) { if (nextState != null) { if (pendingState != null && nextState.equalsIgnoreCase(pendingState)) { if (logger.isDebugEnabled()) { logger.debug("Message already exists at" + instanceName + " to transition"+ resource.getResourceKeyName() +" from " + currentState + " to " + nextState ); } } else { Message message = createMessage(manager,resourceGroupName, resource.getResourceKeyName(), instanceName, currentState, nextState, sessionIdMap.get(instanceName), stateModelDef.getId()); output.addMessage(resourceGroupName, resource, message); } } else { logger .warn("Unable to find a next state from stateModelDefinition" + stateModelDef.getClass() + " from:" + currentState + " to:" + desiredState); } } } } } event.addAttribute(AttributeName.MESSAGES_ALL.toString(), output); } #location 19 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code void addControllerMessageListener(MessageListener listener) { addListener(listener, new Builder(_clusterName).controllerMessages(), ChangeType.MESSAGES_CONTROLLER, new EventType[] { EventType.NodeChildrenChanged, EventType.NodeDeleted, EventType.NodeCreated }); }
#vulnerable code void disconnectInternal() { // This function can be called when the connection are in bad state(e.g. flapping), // in which isConnected() could be false and we want to disconnect from cluster. logger.info("disconnect " + _instanceName + "(" + _instanceType + ") from " + _clusterName); /** * shutdown thread pool first to avoid reset() being invoked in the middle of state * transition */ _messagingService.getExecutor().shutdown(); resetHandlers(); _helixAccessor.shutdown(); if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(); } if (_participantHealthCheckInfoCollector != null) { _participantHealthCheckInfoCollector.stop(); } if (_timer != null) { _timer.cancel(); _timer = null; } if (_instanceType == InstanceType.CONTROLLER) { stopTimerTasks(); } // unsubscribe accessor from controllerChange _zkClient.unsubscribeAll(); _zkClient.close(); // HACK seems that zkClient is not sending DISCONNECT event _zkStateChangeListener.disconnect(); logger.info("Cluster manager: " + _instanceName + " disconnected"); } #location 13 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCreateFailZkCacheBaseDataAccessor() { String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); // init zkCacheDataAccessor String curStatePath = PropertyPathBuilder.getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901"); ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient); ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<ZNRecord>(baseAccessor, null, Arrays.asList(curStatePath), null); // create 10 current states for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in create: " + path); } // create same 10 current states again, should fail for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertFalse(success, "Should fail in create due to NodeExists: " + path); } System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); }
#vulnerable code @Test public void testCreateFailZkCacheBaseDataAccessor() { String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); // init zkCacheDataAccessor String curStatePath = PropertyPathConfig.getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901"); ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient); ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<ZNRecord>(baseAccessor, null, Arrays.asList(curStatePath), null); // create 10 current states for (int i = 0; i < 10; i++) { String path = PropertyPathConfig.getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in create: " + path); } // create same 10 current states again, should fail for (int i = 0; i < 10; i++) { String path = PropertyPathConfig.getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertFalse(success, "Should fail in create due to NodeExists: " + path); } System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); } #location 22 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void handleNewSession() throws Exception { LOG.info( "Handle new session, instance: " + _instanceName + ", type: " + _instanceType); waitUntilConnected(); /** * stop all timer tasks, reset all handlers, make sure cleanup completed for previous session * disconnect if fail to cleanup */ stopTimerTasks(); if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(false); } resetHandlers(false); /** * clean up write-through cache */ _baseDataAccessor.reset(); /** * from here on, we are dealing with new session */ if (!ZKUtil.isClusterSetup(_clusterName, _zkclient)) { throw new HelixException("Cluster structure is not set up for cluster: " + _clusterName); } _sessionStartTime = System.currentTimeMillis(); switch (_instanceType) { case PARTICIPANT: handleNewSessionAsParticipant(); break; case CONTROLLER: handleNewSessionAsController(); break; case CONTROLLER_PARTICIPANT: handleNewSessionAsParticipant(); handleNewSessionAsController(); break; case ADMINISTRATOR: case SPECTATOR: default: break; } startTimerTasks(); /** * init handlers * ok to init message handler and data-accessor twice * the second init will be skipped (see CallbackHandler) */ initHandlers(_handlers); if (_stateListener != null) { try { _stateListener.onConnected(this); } catch (Exception e) { LOG.warn("stateListener.onConnected callback fails", e); } } }
#vulnerable code @Override public void handleNewSession() throws Exception { LOG.info( "Handle new session, instance: " + _instanceName + ", type: " + _instanceType); waitUntilConnected(); /** * stop all timer tasks, reset all handlers, make sure cleanup completed for previous session * disconnect if fail to cleanup */ stopTimerTasks(); if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(); } resetHandlers(); /** * clean up write-through cache */ _baseDataAccessor.reset(); /** * from here on, we are dealing with new session */ if (!ZKUtil.isClusterSetup(_clusterName, _zkclient)) { throw new HelixException("Cluster structure is not set up for cluster: " + _clusterName); } _sessionStartTime = System.currentTimeMillis(); switch (_instanceType) { case PARTICIPANT: handleNewSessionAsParticipant(); break; case CONTROLLER: handleNewSessionAsController(); break; case CONTROLLER_PARTICIPANT: handleNewSessionAsParticipant(); handleNewSessionAsController(); break; case ADMINISTRATOR: case SPECTATOR: default: break; } startTimerTasks(); /** * init handlers * ok to init message handler and data-accessor twice * the second init will be skipped (see CallbackHandler) */ initHandlers(_handlers); if (_stateListener != null) { try { _stateListener.onConnected(this); } catch (Exception e) { LOG.warn("stateListener.onConnected callback fails", e); } } } #location 13 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); BestPossibleStateOutput output = new BestPossibleStateOutput(); HelixManager helixManager = event.getAttribute(AttributeName.helixmanager.name()); ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); // Check whether the offline/disabled instance count in the cluster reaches the set limit, // if yes, pause the rebalancer. boolean isValid = validateOfflineInstancesLimit(cache, (HelixManager) event.getAttribute(AttributeName.helixmanager.name())); final List<String> failureResources = new ArrayList<>(); Iterator<Resource> itr = resourceMap.values().iterator(); while (itr.hasNext()) { Resource resource = itr.next(); boolean result = false; try { result = computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output); } catch (HelixException ex) { LogUtil.logError(logger, _eventId, "Exception when calculating best possible states for " + resource.getResourceName(), ex); } if (!result) { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, "Failed to calculate best possible states for " + resource.getResourceName()); } } // Check and report if resource rebalance has failure updateRebalanceStatus(!isValid || !failureResources.isEmpty(), failureResources, helixManager, cache, clusterStatusMonitor, "Failed to calculate best possible states for " + failureResources.size() + " resources."); return output; }
#vulnerable code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); BestPossibleStateOutput output = new BestPossibleStateOutput(); HelixManager helixManager = event.getAttribute(AttributeName.helixmanager.name()); final List<String> failureResources = new ArrayList<>(); Iterator<Resource> itr = resourceMap.values().iterator(); while (itr.hasNext()) { Resource resource = itr.next(); boolean result = false; try { result = computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output); } catch (HelixException ex) { LogUtil.logError(logger, _eventId, "Exception when calculating best possible states for " + resource.getResourceName(), ex); } if (!result) { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, "Failed to calculate best possible states for " + resource.getResourceName()); } } // Check and report if resource rebalance has failure ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); updateRebalanceStatus(!failureResources.isEmpty(), helixManager, cache, clusterStatusMonitor, "Failed to calculate best possible states for " + failureResources.size() + " resources."); return output; } #location 31 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); // After compute all workflows and jobs, there are still task resources need to be DROPPED Map<String, Resource> restOfResources = new HashMap<>(resourceMap); BestPossibleStateOutput output = new BestPossibleStateOutput(); final List<String> failureResources = new ArrayList<>(); // Queues only for Workflows scheduleWorkflows(resourceMap, cache, restOfResources, failureResources, currentStateOutput, output); for (String jobName : cache.getDispatchedJobs()) { updateResourceMap(jobName, resourceMap, output.getPartitionStateMap(jobName).partitionSet()); restOfResources.remove(jobName); } // Current rest of resources including: only current state left over ones // Original resource map contains workflows + jobs + other invalid resources // After removing workflows + jobs, only leftover ones will go over old rebalance pipeline. for (Resource resource : restOfResources.values()) { if (!computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output)) { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, "Failed to calculate best possible states for " + resource.getResourceName()); } } return output; }
#vulnerable code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); // After compute all workflows and jobs, there are still task resources need to be DROPPED Map<String, Resource> restOfResources = new HashMap<>(resourceMap); BestPossibleStateOutput output = new BestPossibleStateOutput(); final List<String> failureResources = new ArrayList<>(); // Queues only for Workflows scheduleWorkflows(resourceMap, cache, restOfResources, failureResources, currentStateOutput, output); for (String jobName : cache.getDispatchedJobs()) { updateResourceMap(jobName, resourceMap, output.getPartitionStateMap(jobName).partitionSet()); restOfResources.remove(jobName); } // Current rest of resources including: only current state left over ones for (Resource resource : restOfResources.values()) { if (!computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output)) { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, "Failed to calculate best possible states for " + resource.getResourceName()); } } return output; } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code void startRebalancingTimer(long period, HelixManager manager) { if (period != _timerPeriod) { logger.info("Controller starting timer at period " + period); if (_rebalanceTimer != null) { _rebalanceTimer.cancel(); } _rebalanceTimer = new Timer(true); _timerPeriod = period; _rebalanceTimer .scheduleAtFixedRate(new RebalanceTask(manager), _timerPeriod, _timerPeriod); } else { logger.info("Controller already has timer at period " + _timerPeriod); } }
#vulnerable code void stopRebalancingTimer() { if (_rebalanceTimer != null) { _rebalanceTimer.cancel(); _rebalanceTimer = null; } _timerPeriod = Integer.MAX_VALUE; } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void onControllerChange(NotificationContext changeContext) { logger.info("START: GenericClusterController.onControllerChange() for cluster " + _clusterName); _cache.requireFullRefresh(); _taskCache.requireFullRefresh(); if (changeContext != null && changeContext.getType() == Type.FINALIZE) { logger.info("GenericClusterController.onControllerChange() FINALIZE for cluster " + _clusterName); return; } HelixDataAccessor accessor = changeContext.getManager().getHelixDataAccessor(); // double check if this controller is the leader Builder keyBuilder = accessor.keyBuilder(); LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader == null) { logger .warn("No controller exists for cluster:" + changeContext.getManager().getClusterName()); return; } else { String leaderName = leader.getInstanceName(); String instanceName = changeContext.getManager().getInstanceName(); if (leaderName == null || !leaderName.equals(instanceName)) { logger.warn("leader name does NOT match, my name: " + instanceName + ", leader: " + leader); return; } } PauseSignal pauseSignal = accessor.getProperty(keyBuilder.pause()); if (pauseSignal != null) { if (!_paused) { _paused = true; logger.info("controller is now paused"); } } else { if (_paused) { _paused = false; logger.info("controller is now resumed"); ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.Resume); event.addAttribute(AttributeName.changeContext.name(), changeContext); event.addAttribute(AttributeName.helixmanager.name(), changeContext.getManager()); event.addAttribute(AttributeName.eventData.name(), pauseSignal); _eventQueue.put(event); _taskEventQueue.put(event.clone()); } } if (_clusterStatusMonitor == null) { _clusterStatusMonitor = new ClusterStatusMonitor(changeContext.getManager().getClusterName()); } _clusterStatusMonitor.setEnabled(!_paused); logger.info("END: GenericClusterController.onControllerChange() for cluster " + _clusterName); }
#vulnerable code @Override public void onControllerChange(NotificationContext changeContext) { logger.info("START: GenericClusterController.onControllerChange() for cluster " + _clusterName); _cache.requireFullRefresh(); if (changeContext != null && changeContext.getType() == Type.FINALIZE) { logger.info("GenericClusterController.onControllerChange() FINALIZE for cluster " + _clusterName); return; } HelixDataAccessor accessor = changeContext.getManager().getHelixDataAccessor(); // double check if this controller is the leader Builder keyBuilder = accessor.keyBuilder(); LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader == null) { logger .warn("No controller exists for cluster:" + changeContext.getManager().getClusterName()); return; } else { String leaderName = leader.getInstanceName(); String instanceName = changeContext.getManager().getInstanceName(); if (leaderName == null || !leaderName.equals(instanceName)) { logger.warn("leader name does NOT match, my name: " + instanceName + ", leader: " + leader); return; } } PauseSignal pauseSignal = accessor.getProperty(keyBuilder.pause()); if (pauseSignal != null) { if (!_paused) { _paused = true; logger.info("controller is now paused"); } } else { if (_paused) { _paused = false; logger.info("controller is now resumed"); ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.Resume); event.addAttribute(AttributeName.changeContext.name(), changeContext); event.addAttribute(AttributeName.helixmanager.name(), changeContext.getManager()); event.addAttribute(AttributeName.eventData.name(), pauseSignal); _eventQueue.put(event); } } if (_clusterStatusMonitor == null) { _clusterStatusMonitor = new ClusterStatusMonitor(changeContext.getManager().getClusterName()); } _clusterStatusMonitor.setEnabled(!_paused); logger.info("END: GenericClusterController.onControllerChange() for cluster " + _clusterName); } #location 48 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public boolean getExistsLiveInstanceOrCurrentStateChange() { return _existsLiveInstanceOrCurrentStateChange; }
#vulnerable code public boolean getExistsLiveInstanceOrCurrentStateChange() { boolean change = _existsLiveInstanceOrCurrentStateChange; _existsLiveInstanceOrCurrentStateChange = false; return change; } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); MessageGenerationOutput messageGenOutput = event .getAttribute(AttributeName.MESSAGES_ALL.toString()); if (cache == null || resourceGroupMap == null || messageGenOutput == null) { throw new StageException("Missing attributes in event:" + event + ". Requires DataCache|RESOURCE_GROUPS|MESSAGES_ALL"); } MessageSelectionStageOutput output = new MessageSelectionStageOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { List<Message> messages = messageGenOutput.getMessages( resourceGroupName, resource); List<Message> selectedMessages = selectMessages(messages, stateModelDef); output.addMessages(resourceGroupName, resource, selectedMessages); } } event.addAttribute(AttributeName.MESSAGES_SELECTED.toString(), output); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { // ClusterManager manager = event.getAttribute("clustermanager"); // if (manager == null) // { // throw new StageException("ClusterManager attribute value is null"); // } ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); MessageGenerationOutput messageGenOutput = event .getAttribute(AttributeName.MESSAGES_ALL.toString()); MessageSelectionStageOutput output = new MessageSelectionStageOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { List<Message> messages = messageGenOutput.getMessages( resourceGroupName, resource); List<Message> selectedMessages = selectMessages(messages, stateModelDef); output.addMessages(resourceGroupName, resource, selectedMessages); } } event.addAttribute(AttributeName.MESSAGES_SELECTED.toString(), output); } #location 19 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private boolean tryUpdateController(ClusterManager manager) { try { String instanceName = manager.getInstanceName(); String clusterName = manager.getClusterName(); final ZNRecord leaderRecord = new ZNRecord(ControllerPropertyType.LEADER.toString()); leaderRecord.setSimpleField(ControllerPropertyType.LEADER.toString(), manager.getInstanceName()); ClusterDataAccessor dataAccessor = manager.getDataAccessor(); ZNRecord currentleader = dataAccessor .getControllerProperty(ControllerPropertyType.LEADER); if (currentleader == null) { dataAccessor.createControllerProperty(ControllerPropertyType.LEADER, leaderRecord, CreateMode.EPHEMERAL); // set controller history ZNRecord histRecord = dataAccessor .getControllerProperty(ControllerPropertyType.HISTORY, "HISTORY"); List<String> list = histRecord.getListField(clusterName); list.add(instanceName); dataAccessor.setControllerProperty(ControllerPropertyType.HISTORY, histRecord, CreateMode.PERSISTENT); return true; } else { logger.info("Leader exists for cluster:" + clusterName + " currentLeader:" + currentleader.getId()); } } catch (ZkNodeExistsException e) { logger.warn("Ignorable exception. Found that leader already exists, " + e.getMessage()); } return false; }
#vulnerable code private boolean tryUpdateController(ClusterManager manager) { try { String instanceName = manager.getInstanceName(); String clusterName = manager.getClusterName(); final ZNRecord leaderRecord = new ZNRecord(ControllerPropertyType.LEADER.toString()); leaderRecord.setSimpleField(ControllerPropertyType.LEADER.toString(), manager.getInstanceName()); ClusterDataAccessor dataAccessor = manager.getDataAccessor(); ZNRecord currentleader = dataAccessor .getControllerProperty(ControllerPropertyType.LEADER); if (currentleader == null) { dataAccessor.createControllerProperty(ControllerPropertyType.LEADER, leaderRecord, CreateMode.EPHEMERAL); // set controller history ZNRecord histRecord = dataAccessor .getControllerProperty(ControllerPropertyType.HISTORY); List<String> list = histRecord.getListField(clusterName); list.add(instanceName); dataAccessor.setControllerProperty(ControllerPropertyType.HISTORY, histRecord, CreateMode.PERSISTENT); return true; } else { logger.info("Leader exists for cluster:" + clusterName + " currentLeader:" + currentleader.getId()); } } catch (ZkNodeExistsException e) { logger.warn("Ignorable exception. Found that leader already exists, " + e.getMessage()); } return false; } #location 21 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private Map<String, Resource> computeResourceBestPossibleStateWithWagedRebalancer( ResourceControllerDataProvider cache, CurrentStateOutput currentStateOutput, HelixManager helixManager, Map<String, Resource> resourceMap, BestPossibleStateOutput output, List<String> failureResources) { if (cache.isMaintenanceModeEnabled()) { // The WAGED rebalancer won't be used while maintenance mode is enabled. return Collections.emptyMap(); } // Find the compatible resources: 1. FULL_AUTO 2. Configured to use the WAGED rebalancer Map<String, Resource> wagedRebalancedResourceMap = resourceMap.entrySet().stream().filter(resourceEntry -> { IdealState is = cache.getIdealState(resourceEntry.getKey()); return is != null && is.getRebalanceMode().equals(IdealState.RebalanceMode.FULL_AUTO) && WagedRebalancer.class.getName().equals(is.getRebalancerClassName()); }).collect(Collectors.toMap(resourceEntry -> resourceEntry.getKey(), resourceEntry -> resourceEntry.getValue())); Map<String, IdealState> newIdealStates = new HashMap<>(); // Init rebalancer with the rebalance preferences. Map<ClusterConfig.GlobalRebalancePreferenceKey, Integer> preferences = cache.getClusterConfig().getGlobalRebalancePreference(); // Create MetricCollector ThreadLocal if it hasn't been already initialized if (METRIC_COLLECTOR_THREAD_LOCAL.get() == null) { try { // If HelixManager is null, we just pass in null for MetricCollector so that a // non-functioning WagedRebalancerMetricCollector would be created in WagedRebalancer's // constructor. This is to handle two cases: 1. HelixManager is null for non-testing cases - // in this case, WagedRebalancer will not read/write to metadata store and just use // CurrentState-based rebalancing. 2. Tests that require instrumenting the rebalancer for // verifying whether the cluster has converged. METRIC_COLLECTOR_THREAD_LOCAL.set(helixManager == null ? null : new WagedRebalancerMetricCollector(helixManager.getClusterName())); } catch (JMException e) { LogUtil.logWarn(logger, _eventId, String.format( "MetricCollector instantiation failed! WagedRebalancer will not emit metrics due to JMException %s", e)); } } // Create MetricCollector ThreadLocal if it hasn't been already initialized if (WAGED_REBALANCER_THREAD_LOCAL.get() == null) { WAGED_REBALANCER_THREAD_LOCAL .set(new WagedRebalancer(helixManager, preferences, METRIC_COLLECTOR_THREAD_LOCAL.get())); } WagedRebalancer wagedRebalancer = WAGED_REBALANCER_THREAD_LOCAL.get(); try { newIdealStates.putAll(wagedRebalancer.computeNewIdealStates(cache, wagedRebalancedResourceMap, currentStateOutput)); } catch (HelixRebalanceException ex) { // Note that unlike the legacy rebalancer, the WAGED rebalance won't return partial result. // Since it calculates for all the eligible resources globally, a partial result is invalid. // TODO propagate the rebalancer failure information to updateRebalanceStatus for monitoring. LogUtil.logError(logger, _eventId, String.format( "Failed to calculate the new Ideal States using the rebalancer %s due to %s", wagedRebalancer.getClass().getSimpleName(), ex.getFailureType()), ex); } Iterator<Resource> itr = wagedRebalancedResourceMap.values().iterator(); while (itr.hasNext()) { Resource resource = itr.next(); IdealState is = newIdealStates.get(resource.getResourceName()); // Check if the WAGED rebalancer has calculated the result for this resource or not. if (is != null && checkBestPossibleStateCalculation(is)) { // The WAGED rebalancer calculates a valid result, record in the output updateBestPossibleStateOutput(output, resource, is); } else { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, String .format("Failed to calculate best possible states for %s.", resource.getResourceName())); } } return wagedRebalancedResourceMap; }
#vulnerable code private Map<String, Resource> computeResourceBestPossibleStateWithWagedRebalancer( ResourceControllerDataProvider cache, CurrentStateOutput currentStateOutput, HelixManager helixManager, Map<String, Resource> resourceMap, BestPossibleStateOutput output, List<String> failureResources) { if (cache.isMaintenanceModeEnabled()) { // The WAGED rebalancer won't be used while maintenance mode is enabled. return Collections.emptyMap(); } // Find the compatible resources: 1. FULL_AUTO 2. Configured to use the WAGED rebalancer Map<String, Resource> wagedRebalancedResourceMap = resourceMap.entrySet().stream().filter(resourceEntry -> { IdealState is = cache.getIdealState(resourceEntry.getKey()); return is != null && is.getRebalanceMode().equals(IdealState.RebalanceMode.FULL_AUTO) && WagedRebalancer.class.getName().equals(is.getRebalancerClassName()); }).collect(Collectors.toMap(resourceEntry -> resourceEntry.getKey(), resourceEntry -> resourceEntry.getValue())); Map<String, IdealState> newIdealStates = new HashMap<>(); // Init rebalancer with the rebalance preferences. Map<ClusterConfig.GlobalRebalancePreferenceKey, Integer> preferences = cache.getClusterConfig().getGlobalRebalancePreference(); if (METRIC_COLLECTOR_THREAD_LOCAL.get() == null) { try { // If HelixManager is null, we just pass in null for MetricCollector so that a // non-functioning WagedRebalancerMetricCollector would be created in WagedRebalancer's // constructor. This is to handle two cases: 1. HelixManager is null for non-testing cases - // in this case, WagedRebalancer will not read/write to metadata store and just use // CurrentState-based rebalancing. 2. Tests that require instrumenting the rebalancer for // verifying whether the cluster has converged. METRIC_COLLECTOR_THREAD_LOCAL.set(helixManager == null ? null : new WagedRebalancerMetricCollector(helixManager.getClusterName())); } catch (JMException e) { LogUtil.logWarn(logger, _eventId, String.format( "MetricCollector instantiation failed! WagedRebalancer will not emit metrics due to JMException %s", e)); } } // TODO avoid creating the rebalancer on every rebalance call for performance enhancement WagedRebalancer wagedRebalancer = new WagedRebalancer(helixManager, preferences, METRIC_COLLECTOR_THREAD_LOCAL.get()); try { newIdealStates.putAll(wagedRebalancer.computeNewIdealStates(cache, wagedRebalancedResourceMap, currentStateOutput)); } catch (HelixRebalanceException ex) { // Note that unlike the legacy rebalancer, the WAGED rebalance won't return partial result. // Since it calculates for all the eligible resources globally, a partial result is invalid. // TODO propagate the rebalancer failure information to updateRebalanceStatus for monitoring. LogUtil.logError(logger, _eventId, String.format( "Failed to calculate the new Ideal States using the rebalancer %s due to %s", wagedRebalancer.getClass().getSimpleName(), ex.getFailureType()), ex); } finally { wagedRebalancer.close(); } Iterator<Resource> itr = wagedRebalancedResourceMap.values().iterator(); while (itr.hasNext()) { Resource resource = itr.next(); IdealState is = newIdealStates.get(resource.getResourceName()); // Check if the WAGED rebalancer has calculated the result for this resource or not. if (is != null && checkBestPossibleStateCalculation(is)) { // The WAGED rebalancer calculates a valid result, record in the output updateBestPossibleStateOutput(output, resource, is); } else { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, String .format("Failed to calculate best possible states for %s.", resource.getResourceName())); } } return wagedRebalancedResourceMap; } #location 58 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); BestPossibleStateOutput output = new BestPossibleStateOutput(); HelixManager helixManager = event.getAttribute(AttributeName.helixmanager.name()); final List<String> failureResources = new ArrayList<>(); Iterator<Resource> itr = resourceMap.values().iterator(); while (itr.hasNext()) { Resource resource = itr.next(); if (!computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output)) { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, "Failed to calculate best possible states for " + resource.getResourceName()); } } // Check and report if resource rebalance has failure ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); updateRebalanceStatus(!failureResources.isEmpty(), helixManager, cache, clusterStatusMonitor, "Failed to calculate best possible states for " + failureResources.size() + " resources."); return output; }
#vulnerable code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); BestPossibleStateOutput output = new BestPossibleStateOutput(); PriorityQueue<ResourcePriority> resourcePriorityQueue = new PriorityQueue<>(); TaskDriver taskDriver = null; HelixManager helixManager = event.getAttribute(AttributeName.helixmanager.name()); if (helixManager != null) { taskDriver = new TaskDriver(helixManager); } for (Resource resource : resourceMap.values()) { resourcePriorityQueue.add(new ResourcePriority(resource, cache.getIdealState(resource.getResourceName()), taskDriver)); } final List<String> failureResources = new ArrayList<>(); Iterator<ResourcePriority> itr = resourcePriorityQueue.iterator(); while (itr.hasNext()) { Resource resource = itr.next().getResource(); if (!computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output)) { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, "Failed to calculate best possible states for " + resource.getResourceName()); } } // Check and report if resource rebalance has failure if (!cache.isTaskCache()) { ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); updateRebalanceStatus(!failureResources.isEmpty(), helixManager, cache, clusterStatusMonitor, "Failed to calculate best possible states for " + failureResources.size() + " resources."); } return output; } #location 28 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code protected void handleNewSession() { boolean isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); while (!isConnected) { logger.error("Could NOT connect to zk server in " + CONNECTIONTIMEOUT + "ms. zkServer: " + _zkConnectString + ", expiredSessionId: " + _sessionId + ", clusterName: " + _clusterName); isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); } ZkConnection zkConnection = ((ZkConnection) _zkClient.getConnection()); synchronized (this) { _sessionId = Long.toHexString(zkConnection.getZookeeper().getSessionId()); } _baseDataAccessor.reset(); // reset all handlers so they have a chance to unsubscribe zk changes from zkclient // abandon all callback-handlers added in expired session resetHandlers(); logger.info("Handling new session, session id:" + _sessionId + ", instance:" + _instanceName + ", instanceTye: " + _instanceType + ", cluster: " + _clusterName); logger.info(zkConnection.getZookeeper()); if (!ZKUtil.isClusterSetup(_clusterName, _zkClient)) { throw new HelixException("Initial cluster structure is not set up for cluster:" + _clusterName); } // Read cluster config and see if instance can auto join the cluster boolean autoJoin = false; try { ConfigScope scope = new ConfigScopeBuilder().forCluster(getClusterName()) .build(); autoJoin = Boolean.parseBoolean(getConfigAccessor().get(scope, ALLOW_PARTICIPANT_AUTO_JOIN)); logger.info("Auto joining " + _clusterName +" is true"); } catch(Exception e) { } if (!ZKUtil.isInstanceSetup(_zkClient, _clusterName, _instanceName, _instanceType)) { if(!autoJoin) { throw new HelixException("Initial cluster structure is not set up for instance:" + _instanceName + " instanceType:" + _instanceType); } else { logger.info("Auto joining instance " + _instanceName); InstanceConfig instanceConfig = new InstanceConfig(_instanceName); String hostName = _instanceName; String port = ""; int lastPos = _instanceName.lastIndexOf("_"); if (lastPos > 0) { hostName = _instanceName.substring(0, lastPos); port = _instanceName.substring(lastPos + 1); } instanceConfig.setHostName(hostName); instanceConfig.setPort(port); instanceConfig.setInstanceEnabled(true); getClusterManagmentTool().addInstance(_clusterName, instanceConfig); } } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { handleNewSessionAsParticipant(); } if (_instanceType == InstanceType.CONTROLLER || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { addControllerMessageListener(_messagingService.getExecutor()); MessageHandlerFactory defaultControllerMsgHandlerFactory = new DefaultControllerMessageHandlerFactory(); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultControllerMsgHandlerFactory.getMessageType(), defaultControllerMsgHandlerFactory); MessageHandlerFactory defaultSchedulerMsgHandlerFactory = new DefaultSchedulerMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultSchedulerMsgHandlerFactory.getMessageType(), defaultSchedulerMsgHandlerFactory); MessageHandlerFactory defaultParticipantErrorMessageHandlerFactory = new DefaultParticipantErrorMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultParticipantErrorMessageHandlerFactory.getMessageType(), defaultParticipantErrorMessageHandlerFactory); if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(); _leaderElectionHandler.init(); } else { _leaderElectionHandler = createCallBackHandler(new Builder(_clusterName).controller(), new DistClusterControllerElection(_zkConnectString), new EventType[] { EventType.NodeChildrenChanged, EventType.NodeDeleted, EventType.NodeCreated }, ChangeType.CONTROLLER); } } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT || (_instanceType == InstanceType.CONTROLLER && isLeader())) { initHandlers(); } }
#vulnerable code protected void handleNewSession() { boolean isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); while (!isConnected) { logger.error("Could NOT connect to zk server in " + CONNECTIONTIMEOUT + "ms. zkServer: " + _zkConnectString + ", expiredSessionId: " + _sessionId + ", clusterName: " + _clusterName); isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); } ZkConnection zkConnection = ((ZkConnection) _zkClient.getConnection()); synchronized (this) { _sessionId = Long.toHexString(zkConnection.getZookeeper().getSessionId()); } _baseDataAccessor.reset(); // reset all handlers so they have a chance to unsubscribe zk changes from zkclient // abandon all callback-handlers added in expired session resetHandlers(); _handlers = new ArrayList<CallbackHandler>(); logger.info("Handling new session, session id:" + _sessionId + ", instance:" + _instanceName + ", instanceTye: " + _instanceType + ", cluster: " + _clusterName); logger.info(zkConnection.getZookeeper()); if (!ZKUtil.isClusterSetup(_clusterName, _zkClient)) { throw new HelixException("Initial cluster structure is not set up for cluster:" + _clusterName); } // Read cluster config and see if instance can auto join the cluster boolean autoJoin = false; try { ConfigScope scope = new ConfigScopeBuilder().forCluster(getClusterName()) .build(); autoJoin = Boolean.parseBoolean(getConfigAccessor().get(scope, ALLOW_PARTICIPANT_AUTO_JOIN)); logger.info("Auto joining " + _clusterName +" is true"); } catch(Exception e) { } if (!ZKUtil.isInstanceSetup(_zkClient, _clusterName, _instanceName, _instanceType)) { if(!autoJoin) { throw new HelixException("Initial cluster structure is not set up for instance:" + _instanceName + " instanceType:" + _instanceType); } else { logger.info("Auto joining instance " + _instanceName); InstanceConfig instanceConfig = new InstanceConfig(_instanceName); String hostName = _instanceName; String port = ""; int lastPos = _instanceName.lastIndexOf("_"); if (lastPos > 0) { hostName = _instanceName.substring(0, lastPos); port = _instanceName.substring(lastPos + 1); } instanceConfig.setHostName(hostName); instanceConfig.setPort(port); instanceConfig.setInstanceEnabled(true); getClusterManagmentTool().addInstance(_clusterName, instanceConfig); } } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { handleNewSessionAsParticipant(); } if (_instanceType == InstanceType.CONTROLLER || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { addControllerMessageListener(_messagingService.getExecutor()); MessageHandlerFactory defaultControllerMsgHandlerFactory = new DefaultControllerMessageHandlerFactory(); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultControllerMsgHandlerFactory.getMessageType(), defaultControllerMsgHandlerFactory); MessageHandlerFactory defaultSchedulerMsgHandlerFactory = new DefaultSchedulerMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultSchedulerMsgHandlerFactory.getMessageType(), defaultSchedulerMsgHandlerFactory); MessageHandlerFactory defaultParticipantErrorMessageHandlerFactory = new DefaultParticipantErrorMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultParticipantErrorMessageHandlerFactory.getMessageType(), defaultParticipantErrorMessageHandlerFactory); // create a new leader-election handler for a new session if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(); } _leaderElectionHandler = createCallBackHandler(new Builder(_clusterName).controller(), new DistClusterControllerElection(_zkConnectString), new EventType[] { EventType.NodeChildrenChanged, EventType.NodeDeleted, EventType.NodeCreated }, ChangeType.CONTROLLER); } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT || (_instanceType == InstanceType.CONTROLLER && isLeader())) { initHandlers(); } } #location 22 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testStateTransitionTimeOut() throws Exception { _setupTool.addResourceToCluster(CLUSTER_NAME, TEST_DB, _PARTITIONS, STATE_MODEL); _setupTool.getClusterManagementTool().enableResource(CLUSTER_NAME, TEST_DB, false); _setupTool.rebalanceStorageCluster(CLUSTER_NAME, TEST_DB, 3); // Set the timeout values StateTransitionTimeoutConfig stateTransitionTimeoutConfig = new StateTransitionTimeoutConfig(new ZNRecord(TEST_DB)); stateTransitionTimeoutConfig.setStateTransitionTimeout("SLAVE", "MASTER", 300); ResourceConfig resourceConfig = new ResourceConfig.Builder(TEST_DB) .setStateTransitionTimeoutConfig(stateTransitionTimeoutConfig) .setRebalanceConfig(new RebalanceConfig(new ZNRecord(TEST_DB))) .setNumPartitions(_PARTITIONS).setHelixEnabled(false).build(); _configAccessor.setResourceConfig(CLUSTER_NAME, TEST_DB, resourceConfig); setParticipants(TEST_DB); _setupTool.getClusterManagementTool().enableResource(CLUSTER_NAME, TEST_DB, true); boolean result = ClusterStateVerifier .verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, CLUSTER_NAME)); Assert.assertTrue(result); verify(TEST_DB); }
#vulnerable code @Test public void testStateTransitionTimeOut() throws Exception { Map<String, SleepStateModelFactory> factories = new HashMap<String, SleepStateModelFactory>(); IdealState idealState = _setupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, TEST_DB); for (int i = 0; i < NODE_NR; i++) { String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i); SleepStateModelFactory factory = new SleepStateModelFactory(1000); factories.put(instanceName, factory); for (String p : idealState.getPartitionSet()) { if (idealState.getPreferenceList(p).get(0).equals(instanceName)) { factory.addPartition(p); } } _participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName); _participants[i].getStateMachineEngine().registerStateModelFactory("MasterSlave", factory); _participants[i].syncStart(); } String controllerName = CONTROLLER_PREFIX + "_0"; _controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName); _controller.syncStart(); boolean result = ClusterStateVerifier .verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, CLUSTER_NAME)); Assert.assertTrue(result); HelixDataAccessor accessor = _participants[0].getHelixDataAccessor(); Builder kb = accessor.keyBuilder(); ExternalView ev = accessor.getProperty(kb.externalView(TEST_DB)); for (String p : idealState.getPartitionSet()) { String idealMaster = idealState.getPreferenceList(p).get(0); Assert.assertTrue(ev.getStateMap(p).get(idealMaster).equals("ERROR")); TimeOutStateModel model = factories.get(idealMaster).getStateModel(TEST_DB, p); Assert.assertEquals(model._errorCallcount, 1); Assert.assertEquals(model._error.getCode(), ErrorCode.TIMEOUT); } } #location 17 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Object deserialize(byte[] bytes) throws ZkMarshallingError { if (bytes == null || bytes.length == 0) { LOG.error("ZNode is empty."); return null; } ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ZNRecord record = null; String id = null; Map<String, String> simpleFields = Maps.newHashMap(); Map<String, List<String>> listFields = Maps.newHashMap(); Map<String, Map<String, String>> mapFields = Maps.newHashMap(); byte[] rawPayload = null; try { JsonFactory f = new JsonFactory(); JsonParser jp = f.createJsonParser(bais); jp.nextToken(); // will return JsonToken.START_OBJECT (verify?) while (jp.nextToken() != JsonToken.END_OBJECT) { String fieldname = jp.getCurrentName(); jp.nextToken(); // move to value, or START_OBJECT/START_ARRAY if ("id".equals(fieldname)) { // contains an object id = jp.getText(); } else if ("simpleFields".equals(fieldname)) { while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); jp.nextToken(); // move to value simpleFields.put(key, jp.getText()); } } else if ("mapFields".equals(fieldname)) { // user.setVerified(jp.getCurrentToken() == JsonToken.VALUE_TRUE); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); mapFields.put(key, new TreeMap<String, String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_OBJECT) { String mapKey = jp.getCurrentName(); jp.nextToken(); // move to value mapFields.get(key).put(mapKey, jp.getText()); } } } else if ("listFields".equals(fieldname)) { // user.setUserImage(jp.getBinaryValue()); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); listFields.put(key, new ArrayList<String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_ARRAY) { listFields.get(key).add(jp.getText()); } } } else if ("rawPayload".equals(fieldname)) { rawPayload = Base64.decode(jp.getText()); } else { throw new IllegalStateException("Unrecognized field '" + fieldname + "'!"); } } jp.close(); // ensure resources get cleaned up timely and properly if (id == null) { throw new IllegalStateException("ZNRecord id field is required!"); } record = new ZNRecord(id); record.setSimpleFields(simpleFields); record.setListFields(listFields); record.setMapFields(mapFields); record.setRawPayload(rawPayload); } catch (Exception e) { LOG.error("Exception during deserialization of bytes: " + new String(bytes), e); } return record; }
#vulnerable code @Override public Object deserialize(byte[] bytes) throws ZkMarshallingError { if (bytes == null || bytes.length == 0) { LOG.error("ZNode is empty."); return null; } ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ZNRecord record = null; try { JsonFactory f = new JsonFactory(); JsonParser jp = f.createJsonParser(bais); jp.nextToken(); // will return JsonToken.START_OBJECT (verify?) while (jp.nextToken() != JsonToken.END_OBJECT) { String fieldname = jp.getCurrentName(); jp.nextToken(); // move to value, or START_OBJECT/START_ARRAY if ("id".equals(fieldname)) { // contains an object record = new ZNRecord(jp.getText()); } else if ("simpleFields".equals(fieldname)) { while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); jp.nextToken(); // move to value record.setSimpleField(key, jp.getText()); } } else if ("mapFields".equals(fieldname)) { // user.setVerified(jp.getCurrentToken() == JsonToken.VALUE_TRUE); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); record.setMapField(key, new TreeMap<String, String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_OBJECT) { String mapKey = jp.getCurrentName(); jp.nextToken(); // move to value record.getMapField(key).put(mapKey, jp.getText()); } } } else if ("listFields".equals(fieldname)) { // user.setUserImage(jp.getBinaryValue()); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); record.setListField(key, new ArrayList<String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_ARRAY) { record.getListField(key).add(jp.getText()); } } } else if ("rawPayload".equals(fieldname)) { record.setRawPayload(Base64.decode(jp.getText())); } else { throw new IllegalStateException("Unrecognized field '" + fieldname + "'!"); } } jp.close(); // ensure resources get cleaned up timely and properly } catch (Exception e) { LOG.error("Exception during deserialization of bytes: " + new String(bytes), e); } return record; } #location 46 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) throws Exception { ExecutorService pool = Executors.newFixedThreadPool(MAX_PARALLEL_TASKS); Future<CMTaskResult> future; // pool.shutdown(); // pool.awaitTermination(5, TimeUnit.SECONDS); future = pool.submit(new Callable<CMTaskResult>() { @Override public CMTaskResult call() throws Exception { System.out .println("CMTaskExecutor.main(...).new Callable() {...}.call()"); return null; } }); future = pool.submit(new CMTask(null, null, null, null)); Thread.currentThread().join(); System.out.println(future.isDone()); }
#vulnerable code public static void main(String[] args) throws Exception { ExecutorService pool = Executors.newFixedThreadPool(MAX_PARALLEL_TASKS); Future<CMTaskResult> future; // pool.shutdown(); // pool.awaitTermination(5, TimeUnit.SECONDS); future = pool.submit(new Callable<CMTaskResult>() { @Override public CMTaskResult call() throws Exception { System.out .println("CMTaskExecutor.main(...).new Callable() {...}.call()"); return null; } }); future = pool.submit(new CMTaskHandler(null, null, null, null)); Thread.currentThread().join(); System.out.println(future.isDone()); } #location 19 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void handleStateChanged(Watcher.Event.KeeperState state) { if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) { return; } // Resubscribe _zkClientForRoutingDataListener.unsubscribeAll(); _zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this); resetZkResources(); }
#vulnerable code @Override public void handleStateChanged(Watcher.Event.KeeperState state) { if (_zkClientForListener == null || _zkClientForListener.isClosed()) { return; } // Resubscribe _zkClientForListener.unsubscribeAll(); _zkClientForListener.subscribeRoutingDataChanges(this, this); resetZkResources(); } #location 8 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code void sendData() { ConcurrentHashMap<String, ZNRecordUpdate> updateCache = null; synchronized(_dataBufferRef) { updateCache = _dataBufferRef.getAndSet(new ConcurrentHashMap<String, ZNRecordUpdate>()); } if(updateCache != null) { List<String> paths = new ArrayList<String>(); List<DataUpdater<ZNRecord>> updaters = new ArrayList<DataUpdater<ZNRecord>>(); List<ZNRecord> vals = new ArrayList<ZNRecord>(); for(ZNRecordUpdate holder : updateCache.values()) { paths.add(holder.getPath()); updaters.add(holder.getZNRecordUpdater()); vals.add(holder.getRecord()); } // Batch write the accumulated updates into zookeeper if(paths.size() > 0) { _accessor.updateChildren(paths, updaters, BaseDataAccessor.Option.PERSISTENT); } LOG.info("Updating " + vals.size() + " records"); } else { LOG.warn("null _dataQueueRef. Should be in the beginning only"); } }
#vulnerable code void sendData() { ConcurrentHashMap<String, ZNRecordUpdate> updateCache = null; synchronized(_dataBufferRef) { updateCache = _dataBufferRef.getAndSet(new ConcurrentHashMap<String, ZNRecordUpdate>()); } if(updateCache != null) { List<String> paths = new ArrayList<String>(); List<DataUpdater<ZNRecord>> updaters = new ArrayList<DataUpdater<ZNRecord>>(); List<ZNRecord> vals = new ArrayList<ZNRecord>(); for(ZNRecordUpdate holder : updateCache.values()) { paths.add(holder.getPath()); updaters.add(holder.getZNRecordUpdater()); vals.add(holder.getRecord()); } // Batch write the accumulated updates into zookeeper if(paths.size() > 0) { _manager.getHelixDataAccessor().updateChildren(paths, updaters, BaseDataAccessor.Option.PERSISTENT); } LOG.info("Updating " + vals.size() + " records"); } else { LOG.warn("null _dataQueueRef. Should be in the beginning only"); } } #location 24 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { ClusterManager manager = event.getAttribute("clustermanager"); ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); CurrentStateOutput currentStateOutput = event .getAttribute(AttributeName.CURRENT_STATE.toString()); BestPossibleStateOutput bestPossibleStateOutput = event .getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString()); if (manager == null || cache == null || resourceGroupMap == null || currentStateOutput == null || bestPossibleStateOutput == null) { throw new StageException("Missing attributes in event:" + event + ". Requires ClusterManager|DataCache|RESOURCE_GROUPS|CURRENT_STATE|BEST_POSSIBLE_STATE"); } Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); Map<String, String> sessionIdMap = new HashMap<String, String>(); for (LiveInstance liveInstance : liveInstances.values()) { sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId()); } MessageGenerationOutput output = new MessageGenerationOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { Map<String, String> instanceStateMap = bestPossibleStateOutput .getInstanceStateMap(resourceGroupName, resource); for (String instanceName : instanceStateMap.keySet()) { String desiredState = instanceStateMap.get(instanceName); String currentState = currentStateOutput.getCurrentState( resourceGroupName, resource, instanceName); if (currentState == null) { currentState = stateModelDef.getInitialState(); } String pendingState = currentStateOutput.getPendingState( resourceGroupName, resource, instanceName); String nextState; nextState = stateModelDef.getNextStateForTransition(currentState, desiredState); if (!desiredState.equalsIgnoreCase(currentState)) { if (nextState != null) { if (pendingState != null && nextState.equalsIgnoreCase(pendingState)) { if (logger.isDebugEnabled()) { logger.debug("Message already exists at" + instanceName + " to transition"+ resource.getResourceKeyName() +" from " + currentState + " to " + nextState ); } } else { Message message = createMessage(manager,resourceGroupName, resource.getResourceKeyName(), instanceName, currentState, nextState, sessionIdMap.get(instanceName), stateModelDef.getId()); output.addMessage(resourceGroupName, resource, message); } } else { logger .warn("Unable to find a next state from stateModelDefinition" + stateModelDef.getClass() + " from:" + currentState + " to:" + desiredState); } } } } } event.addAttribute(AttributeName.MESSAGES_ALL.toString(), output); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { ClusterManager manager = event.getAttribute("clustermanager"); if (manager == null) { throw new StageException("ClusterManager attribute value is null"); } ClusterDataCache cache = event.getAttribute("ClusterDataCache"); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); CurrentStateOutput currentStateOutput = event .getAttribute(AttributeName.CURRENT_STATE.toString()); BestPossibleStateOutput bestPossibleStateOutput = event .getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString()); Map<String, LiveInstance> liveInstances = cache.getLiveInstances(); Map<String, String> sessionIdMap = new HashMap<String, String>(); for (LiveInstance liveInstance : liveInstances.values()) { sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId()); } MessageGenerationOutput output = new MessageGenerationOutput(); for (String resourceGroupName : resourceGroupMap.keySet()) { ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); StateModelDefinition stateModelDef = cache.getStateModelDef(resourceGroup.getStateModelDefRef()); for (ResourceKey resource : resourceGroup.getResourceKeys()) { Map<String, String> instanceStateMap = bestPossibleStateOutput .getInstanceStateMap(resourceGroupName, resource); for (String instanceName : instanceStateMap.keySet()) { String desiredState = instanceStateMap.get(instanceName); String currentState = currentStateOutput.getCurrentState( resourceGroupName, resource, instanceName); if (currentState == null) { currentState = stateModelDef.getInitialState(); } String pendingState = currentStateOutput.getPendingState( resourceGroupName, resource, instanceName); String nextState; nextState = stateModelDef.getNextStateForTransition(currentState, desiredState); if (!desiredState.equalsIgnoreCase(currentState)) { if (nextState != null) { if (pendingState != null && nextState.equalsIgnoreCase(pendingState)) { if (logger.isDebugEnabled()) { logger.debug("Message already exists at" + instanceName + " to transition"+ resource.getResourceKeyName() +" from " + currentState + " to " + nextState ); } } else { Message message = createMessage(manager,resourceGroupName, resource.getResourceKeyName(), instanceName, currentState, nextState, sessionIdMap.get(instanceName), stateModelDef.getId()); output.addMessage(resourceGroupName, resource, message); } } else { logger .warn("Unable to find a next state from stateModelDefinition" + stateModelDef.getClass() + " from:" + currentState + " to:" + desiredState); } } } } } event.addAttribute(AttributeName.MESSAGES_ALL.toString(), output); } #location 30 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); Map<String, Resource> restOfResources = new HashMap<>(resourceMap); BestPossibleStateOutput output = new BestPossibleStateOutput(); final List<String> failureResources = new ArrayList<>(); // Queues only for Workflows scheduleWorkflows(resourceMap, cache, restOfResources, failureResources); // Current rest of resources including: jobs + only current state left over ones Iterator<Resource> itr = restOfResources.values().iterator(); while (itr.hasNext()) { Resource resource = itr.next(); if (!computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output)) { failureResources.add(resource.getResourceName()); LogUtil.logWarn(logger, _eventId, "Failed to calculate best possible states for " + resource.getResourceName()); } } return output; }
#vulnerable code private BestPossibleStateOutput compute(ClusterEvent event, Map<String, Resource> resourceMap, CurrentStateOutput currentStateOutput) { ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); BestPossibleStateOutput output = new BestPossibleStateOutput(); PriorityQueue<TaskSchedulingStage.ResourcePriority> resourcePriorityQueue = new PriorityQueue<>(); TaskDriver taskDriver = null; HelixManager helixManager = event.getAttribute(AttributeName.helixmanager.name()); if (helixManager != null) { taskDriver = new TaskDriver(helixManager); } for (Resource resource : resourceMap.values()) { resourcePriorityQueue.add(new TaskSchedulingStage.ResourcePriority(resource, cache.getIdealState(resource.getResourceName()), taskDriver)); } // TODO: Replace this looping available resources with Workflow Queues for (Iterator<TaskSchedulingStage.ResourcePriority> itr = resourcePriorityQueue.iterator(); itr.hasNext(); ) { Resource resource = itr.next().getResource(); if (!computeResourceBestPossibleState(event, cache, currentStateOutput, resource, output)) { LogUtil .logWarn(logger, _eventId, "Failed to assign tasks for " + resource.getResourceName()); } } return output; } #location 15 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public int send(final Criteria recipientCriteria, final Message message, AsyncCallback callbackOnReply) { Map<InstanceType, List<Message>> generateMessage = generateMessage( recipientCriteria, message); int totalMessageCount = 0; String correlationId = null; if (callbackOnReply != null) { correlationId = UUID.randomUUID().toString(); for (List<Message> messages : generateMessage.values()) { totalMessageCount += messages.size(); callbackOnReply.setMessagesSent(messages); } _asyncCallbackService.registerAsyncCallback(correlationId, callbackOnReply); } for (InstanceType receiverType : generateMessage.keySet()) { List<Message> list = generateMessage.get(receiverType); for (Message tempMessage : list) { if(correlationId != null) { tempMessage.setCorrelationId(correlationId); } if (receiverType == InstanceType.CONTROLLER) { _manager.getDataAccessor().setControllerProperty(ControllerPropertyType.MESSAGES, tempMessage.getRecord(), CreateMode.PERSISTENT); } if (receiverType == InstanceType.PARTICIPANT) { _manager.getDataAccessor().setInstanceProperty(message.getTgtName(), InstancePropertyType.MESSAGES, tempMessage.getId(), tempMessage.getRecord()); } } } return totalMessageCount; }
#vulnerable code @Override public int send(final Criteria recipientCriteria, final Message message, AsyncCallback callbackOnReply) { Map<InstanceType, List<Message>> generateMessage = generateMessage( recipientCriteria, message); int totalMessageCount = 0; String correlationId = null; if (callbackOnReply != null) { correlationId = UUID.randomUUID().toString(); for (List<Message> messages : generateMessage.values()) { totalMessageCount += messages.size(); callbackOnReply.setMessagesSent(messages); } _asyncCallbackService.registerAsyncCallback(correlationId, callbackOnReply); } for (InstanceType receiverType : generateMessage.keySet()) { List<Message> list = generateMessage.get(receiverType); for (Message tempMessage : list) { tempMessage.setId(UUID.randomUUID().toString()); if(correlationId != null) { tempMessage.setCorrelationId(correlationId); } if (receiverType == InstanceType.CONTROLLER) { _dataAccessor.setControllerProperty(ControllerPropertyType.MESSAGES, tempMessage.getRecord(), CreateMode.PERSISTENT); } if (receiverType == InstanceType.PARTICIPANT) { _dataAccessor.setInstanceProperty(message.getTgtName(), InstancePropertyType.MESSAGES, tempMessage.getId(), tempMessage.getRecord()); } } } return totalMessageCount; } #location 39 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCreateFailZkCacheBaseDataAccessor() { String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); // init zkCacheDataAccessor String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901"); ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient); ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<ZNRecord>(baseAccessor, null, Arrays.asList(curStatePath), null); // create 10 current states for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in create: " + path); } // create same 10 current states again, should fail for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertFalse(success, "Should fail in create due to NodeExists: " + path); } System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); }
#vulnerable code @Test public void testCreateFailZkCacheBaseDataAccessor() { String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); // init zkCacheDataAccessor String curStatePath = PropertyPathBuilder.getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901"); ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient); ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<ZNRecord>(baseAccessor, null, Arrays.asList(curStatePath), null); // create 10 current states for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder .getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in create: " + path); } // create same 10 current states again, should fail for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder .getPath(PropertyType.CURRENTSTATES, clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertFalse(success, "Should fail in create due to NodeExists: " + path); } System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); } #location 22 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { LOG.info("START PersistAssignmentStage.process()"); long startTime = System.currentTimeMillis(); ClusterDataCache cache = event.getAttribute("ClusterDataCache"); ClusterConfig clusterConfig = cache.getClusterConfig(); if (clusterConfig.isPersistBestPossibleAssignment()) { HelixManager helixManager = event.getAttribute("helixmanager"); HelixDataAccessor accessor = helixManager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); BestPossibleStateOutput bestPossibleAssignments = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString()); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.toString()); for (String resourceId : bestPossibleAssignments.resourceSet()) { Resource resource = resourceMap.get(resourceId); if (resource != null) { boolean changed = false; Map<Partition, Map<String, String>> bestPossibleAssignment = bestPossibleAssignments.getResourceMap(resourceId); IdealState idealState = cache.getIdealState(resourceId); if (idealState == null) { LOG.warn("IdealState not found for resource " + resourceId); continue; } IdealState.RebalanceMode mode = idealState.getRebalanceMode(); if (!mode.equals(IdealState.RebalanceMode.SEMI_AUTO) && !mode .equals(IdealState.RebalanceMode.FULL_AUTO)) { // do not persist assignment for resource in neither semi or full auto. continue; } //TODO: temporary solution for Espresso/Dbus backcompatible, should remove this. Map<Partition, Map<String, String>> assignmentToPersist = convertAssignmentPersisted(resource, idealState, bestPossibleAssignment); for (Partition partition : resource.getPartitions()) { Map<String, String> instanceMap = assignmentToPersist.get(partition); Map<String, String> existInstanceMap = idealState.getInstanceStateMap(partition.getPartitionName()); if (instanceMap == null && existInstanceMap == null) { continue; } if (instanceMap == null || existInstanceMap == null || !instanceMap .equals(existInstanceMap)) { changed = true; break; } } if (changed) { for (Partition partition : assignmentToPersist.keySet()) { Map<String, String> instanceMap = assignmentToPersist.get(partition); idealState.setInstanceStateMap(partition.getPartitionName(), instanceMap); } accessor.setProperty(keyBuilder.idealStates(resourceId), idealState); } } } } long endTime = System.currentTimeMillis(); LOG.info("END PersistAssignmentStage.process(), took " + (endTime - startTime) + " ms"); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { LOG.info("START PersistAssignmentStage.process()"); long startTime = System.currentTimeMillis(); ClusterDataCache cache = event.getAttribute("ClusterDataCache"); ClusterConfig clusterConfig = cache.getClusterConfig(); if (clusterConfig.isPersistBestPossibleAssignment()) { HelixManager helixManager = event.getAttribute("helixmanager"); HelixDataAccessor accessor = helixManager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); BestPossibleStateOutput assignments = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString()); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.toString()); for (String resourceId : assignments.resourceSet()) { Resource resource = resourceMap.get(resourceId); if (resource != null) { boolean changed = false; Map<Partition, Map<String, String>> assignment = assignments.getResourceMap(resourceId); IdealState idealState = cache.getIdealState(resourceId); if (idealState == null) { LOG.warn("IdealState not found for resource " + resourceId); continue; } IdealState.RebalanceMode mode = idealState.getRebalanceMode(); if (!mode.equals(IdealState.RebalanceMode.SEMI_AUTO) && !mode .equals(IdealState.RebalanceMode.FULL_AUTO)) { // do not persist assignment for resource in neither semi or full auto. continue; } for (Partition partition : resource.getPartitions()) { Map<String, String> instanceMap = assignment.get(partition); Map<String, String> existInstanceMap = idealState.getInstanceStateMap(partition.getPartitionName()); if (instanceMap == null && existInstanceMap == null) { continue; } if (instanceMap == null || existInstanceMap == null || !instanceMap .equals(existInstanceMap)) { changed = true; break; } } if (changed) { for (Partition partition : assignment.keySet()) { Map<String, String> instanceMap = assignment.get(partition); idealState.setInstanceStateMap(partition.getPartitionName(), instanceMap); } accessor.setProperty(keyBuilder.idealStates(resourceId), idealState); } } } } long endTime = System.currentTimeMillis(); LOG.info("END PersistAssignmentStage.process(), took " + (endTime - startTime) + " ms"); } #location 16 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testZkClientMonitor() throws Exception { final String TEST_TAG = "test_monitor"; final String TEST_KEY = "test_key"; final String TEST_DATA = "testData"; final String TEST_ROOT = "/my_cluster/IDEALSTATES"; final String TEST_NODE = "/test_zkclient_monitor"; final String TEST_PATH = TEST_ROOT + TEST_NODE; ZkClient.Builder builder = new ZkClient.Builder(); builder.setZkServer(ZK_ADDR).setMonitorKey(TEST_KEY).setMonitorType(TEST_TAG) .setMonitorRootPathOnly(false); ZkClient zkClient = builder.build(); final long TEST_DATA_SIZE = zkClient.serialize(TEST_DATA, TEST_PATH).length; if (_zkClient.exists(TEST_PATH)) { _zkClient.delete(TEST_PATH); } if (!_zkClient.exists(TEST_ROOT)) { _zkClient.createPersistent(TEST_ROOT, true); } MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY); ObjectName rootname = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "Root"); ObjectName idealStatename = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "IdealStates"); Assert.assertTrue(beanServer.isRegistered(rootname)); Assert.assertTrue(beanServer.isRegistered(idealStatename)); // Test exists Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadLatencyGauge.Max"), 0); zkClient.exists(TEST_ROOT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadLatencyGauge.Max") >= 0); // Test create Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteLatencyGauge.Max"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteLatencyGauge.Max"), 0); zkClient.create(TEST_PATH, TEST_DATA, CreateMode.PERSISTENT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE); long origWriteTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"); Assert.assertTrue(origWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteLatencyGauge.Max") >= 0); long origIdealStatesWriteTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"); Assert.assertTrue(origIdealStatesWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteLatencyGauge.Max") >= 0); // Test read Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), 0); long origReadTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"); long origIdealStatesReadTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter"); Assert.assertEquals(origIdealStatesReadTotalLatencyCounter, 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadLatencyGauge.Max"), 0); zkClient.readData(TEST_PATH, new Stat()); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 2); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= origReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter") >= origIdealStatesReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadLatencyGauge.Max") >= 0); zkClient.getChildren(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 3); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.getStat(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 4); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 3); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.readDataAndStat(TEST_PATH, new Stat(), true); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 5); ZkAsyncCallbacks.ExistsCallbackHandler callbackHandler = new ZkAsyncCallbacks.ExistsCallbackHandler(); zkClient.asyncExists(TEST_PATH, callbackHandler); callbackHandler.waitForSuccess(); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 6); // Test write zkClient.writeData(TEST_PATH, TEST_DATA); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter") >= origWriteTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter") >= origIdealStatesWriteTotalLatencyCounter); // Test data change count final Lock lock = new ReentrantLock(); final Condition callbackFinish = lock.newCondition(); zkClient.subscribeDataChanges(TEST_PATH, new IZkDataListener() { @Override public void handleDataChange(String dataPath, Object data) throws Exception { } @Override public void handleDataDeleted(String dataPath) throws Exception { lock.lock(); try { callbackFinish.signal(); } finally { lock.unlock(); } } }); lock.lock(); _zkClient.delete(TEST_PATH); Assert.assertTrue(callbackFinish.await(10, TimeUnit.SECONDS)); Assert.assertEquals((long) beanServer.getAttribute(name, "DataChangeEventCounter"), 1); }
#vulnerable code @Test public void testZkClientMonitor() throws Exception { final String TEST_TAG = "test_monitor"; final String TEST_KEY = "test_key"; final String TEST_DATA = "testData"; final String TEST_ROOT = "/my_cluster/IDEALSTATES"; final String TEST_NODE = "/test_zkclient_monitor"; final String TEST_PATH = TEST_ROOT + TEST_NODE; ZkClient zkClient = new ZkClient(ZK_ADDR, TEST_TAG, TEST_KEY); final long TEST_DATA_SIZE = zkClient.serialize(TEST_DATA, TEST_PATH).length; if (_zkClient.exists(TEST_PATH)) { _zkClient.delete(TEST_PATH); } if (!_zkClient.exists(TEST_ROOT)) { _zkClient.createPersistent(TEST_ROOT, true); } MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer(); ObjectName name = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY); ObjectName rootname = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "Root"); ObjectName idealStatename = MBeanRegistrar .buildObjectName(MonitorDomainNames.HelixZkClient.name(), ZkClientMonitor.MONITOR_TYPE, TEST_TAG, ZkClientMonitor.MONITOR_KEY, TEST_KEY, ZkClientPathMonitor.MONITOR_PATH, "IdealStates"); Assert.assertTrue(beanServer.isRegistered(rootname)); Assert.assertTrue(beanServer.isRegistered(idealStatename)); // Test exists Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadMaxLatencyGauge"), 0); zkClient.exists(TEST_ROOT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadMaxLatencyGauge") >= 0); // Test create Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteMaxLatencyGauge"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteMaxLatencyGauge"), 0); zkClient.create(TEST_PATH, TEST_DATA, CreateMode.PERSISTENT); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE); long origWriteTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter"); Assert.assertTrue(origWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteMaxLatencyGauge") >= 0); long origIdealStatesWriteTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter"); Assert.assertTrue(origIdealStatesWriteTotalLatencyCounter >= 0); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteMaxLatencyGauge") >= 0); // Test read Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), 0); long origReadTotalLatencyCounter = (long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter"); long origIdealStatesReadTotalLatencyCounter = (long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter"); Assert.assertEquals(origIdealStatesReadTotalLatencyCounter, 0); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadMaxLatencyGauge"), 0); zkClient.readData(TEST_PATH, new Stat()); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 2); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 1); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertTrue((long) beanServer.getAttribute(rootname, "ReadTotalLatencyCounter") >= origReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadTotalLatencyCounter") >= origIdealStatesReadTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "ReadMaxLatencyGauge") >= 0); zkClient.getChildren(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 3); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.getStat(TEST_PATH); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 4); Assert .assertEquals((long) beanServer.getAttribute(rootname, "ReadBytesCounter"), TEST_DATA_SIZE); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadCounter"), 3); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "ReadBytesCounter"), TEST_DATA_SIZE); zkClient.readDataAndStat(TEST_PATH, new Stat(), true); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 5); ZkAsyncCallbacks.ExistsCallbackHandler callbackHandler = new ZkAsyncCallbacks.ExistsCallbackHandler(); zkClient.asyncExists(TEST_PATH, callbackHandler); callbackHandler.waitForSuccess(); Assert.assertEquals((long) beanServer.getAttribute(rootname, "ReadCounter"), 6); // Test write zkClient.writeData(TEST_PATH, TEST_DATA); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(rootname, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteCounter"), 2); Assert.assertEquals((long) beanServer.getAttribute(idealStatename, "WriteBytesCounter"), TEST_DATA_SIZE * 2); Assert.assertTrue((long) beanServer.getAttribute(rootname, "WriteTotalLatencyCounter") >= origWriteTotalLatencyCounter); Assert.assertTrue((long) beanServer.getAttribute(idealStatename, "WriteTotalLatencyCounter") >= origIdealStatesWriteTotalLatencyCounter); // Test data change count final Lock lock = new ReentrantLock(); final Condition callbackFinish = lock.newCondition(); zkClient.subscribeDataChanges(TEST_PATH, new IZkDataListener() { @Override public void handleDataChange(String dataPath, Object data) throws Exception { } @Override public void handleDataDeleted(String dataPath) throws Exception { lock.lock(); try { callbackFinish.signal(); } finally { lock.unlock(); } } }); lock.lock(); _zkClient.delete(TEST_PATH); Assert.assertTrue(callbackFinish.await(10, TimeUnit.SECONDS)); Assert.assertEquals((long) beanServer.getAttribute(name, "DataChangeEventCounter"), 1); } #location 102 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void close() { if (_zkClient != null) { _zkClient.close(); } }
#vulnerable code @Override public void close() { if (_zkclient != null) { _zkclient.close(); } } #location 4 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void setState(String partitionName, String state) { setProperty(partitionName, CurrentStateProperty.CURRENT_STATE, state); }
#vulnerable code public void setState(String partitionName, String state) { Map<String, Map<String, String>> mapFields = _record.getMapFields(); if (mapFields.get(partitionName) == null) { mapFields.put(partitionName, new TreeMap<String, String>()); } mapFields.get(partitionName).put(CurrentStateProperty.CURRENT_STATE.toString(), state); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void handleSessionEstablishmentError(Throwable error) { if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) { return; } // Resubscribe _zkClientForRoutingDataListener.unsubscribeAll(); _zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this); resetZkResources(); }
#vulnerable code @Override public void handleSessionEstablishmentError(Throwable error) { if (_zkClientForListener == null || _zkClientForListener.isClosed()) { return; } // Resubscribe _zkClientForListener.unsubscribeAll(); _zkClientForListener.subscribeRoutingDataChanges(this, this); resetZkResources(); } #location 8 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Object deserialize(byte[] bytes) throws ZkMarshallingError { if (bytes == null || bytes.length == 0) { LOG.error("ZNode is empty."); return null; } ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ZNRecord record = null; String id = null; Map<String, String> simpleFields = Maps.newHashMap(); Map<String, List<String>> listFields = Maps.newHashMap(); Map<String, Map<String, String>> mapFields = Maps.newHashMap(); byte[] rawPayload = null; try { JsonFactory f = new JsonFactory(); JsonParser jp = f.createJsonParser(bais); jp.nextToken(); // will return JsonToken.START_OBJECT (verify?) while (jp.nextToken() != JsonToken.END_OBJECT) { String fieldname = jp.getCurrentName(); jp.nextToken(); // move to value, or START_OBJECT/START_ARRAY if ("id".equals(fieldname)) { // contains an object id = jp.getText(); } else if ("simpleFields".equals(fieldname)) { while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); jp.nextToken(); // move to value simpleFields.put(key, jp.getText()); } } else if ("mapFields".equals(fieldname)) { // user.setVerified(jp.getCurrentToken() == JsonToken.VALUE_TRUE); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); mapFields.put(key, new TreeMap<String, String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_OBJECT) { String mapKey = jp.getCurrentName(); jp.nextToken(); // move to value mapFields.get(key).put(mapKey, jp.getText()); } } } else if ("listFields".equals(fieldname)) { // user.setUserImage(jp.getBinaryValue()); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); listFields.put(key, new ArrayList<String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_ARRAY) { listFields.get(key).add(jp.getText()); } } } else if ("rawPayload".equals(fieldname)) { rawPayload = Base64.decode(jp.getText()); } else { throw new IllegalStateException("Unrecognized field '" + fieldname + "'!"); } } jp.close(); // ensure resources get cleaned up timely and properly if (id == null) { throw new IllegalStateException("ZNRecord id field is required!"); } record = new ZNRecord(id); record.setSimpleFields(simpleFields); record.setListFields(listFields); record.setMapFields(mapFields); record.setRawPayload(rawPayload); } catch (Exception e) { LOG.error("Exception during deserialization of bytes: " + new String(bytes), e); } return record; }
#vulnerable code @Override public Object deserialize(byte[] bytes) throws ZkMarshallingError { if (bytes == null || bytes.length == 0) { LOG.error("ZNode is empty."); return null; } ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ZNRecord record = null; try { JsonFactory f = new JsonFactory(); JsonParser jp = f.createJsonParser(bais); jp.nextToken(); // will return JsonToken.START_OBJECT (verify?) while (jp.nextToken() != JsonToken.END_OBJECT) { String fieldname = jp.getCurrentName(); jp.nextToken(); // move to value, or START_OBJECT/START_ARRAY if ("id".equals(fieldname)) { // contains an object record = new ZNRecord(jp.getText()); } else if ("simpleFields".equals(fieldname)) { while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); jp.nextToken(); // move to value record.setSimpleField(key, jp.getText()); } } else if ("mapFields".equals(fieldname)) { // user.setVerified(jp.getCurrentToken() == JsonToken.VALUE_TRUE); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); record.setMapField(key, new TreeMap<String, String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_OBJECT) { String mapKey = jp.getCurrentName(); jp.nextToken(); // move to value record.getMapField(key).put(mapKey, jp.getText()); } } } else if ("listFields".equals(fieldname)) { // user.setUserImage(jp.getBinaryValue()); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); record.setListField(key, new ArrayList<String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_ARRAY) { record.getListField(key).add(jp.getText()); } } } else if ("rawPayload".equals(fieldname)) { record.setRawPayload(Base64.decode(jp.getText())); } else { throw new IllegalStateException("Unrecognized field '" + fieldname + "'!"); } } jp.close(); // ensure resources get cleaned up timely and properly } catch (Exception e) { LOG.error("Exception during deserialization of bytes: " + new String(bytes), e); } return record; } #location 26 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { LOG.info("START PersistAssignmentStage.process()"); long startTime = System.currentTimeMillis(); ClusterDataCache cache = event.getAttribute("ClusterDataCache"); ClusterConfig clusterConfig = cache.getClusterConfig(); if (!clusterConfig.isPersistBestPossibleAssignment()) { return; } BestPossibleStateOutput bestPossibleAssignment = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name()); HelixManager helixManager = event.getAttribute("helixmanager"); HelixDataAccessor accessor = helixManager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name()); for (String resourceId : bestPossibleAssignment.resourceSet()) { Resource resource = resourceMap.get(resourceId); if (resource != null) { final IdealState idealState = cache.getIdealState(resourceId); if (idealState == null) { LOG.warn("IdealState not found for resource " + resourceId); continue; } IdealState.RebalanceMode mode = idealState.getRebalanceMode(); if (!mode.equals(IdealState.RebalanceMode.SEMI_AUTO) && !mode .equals(IdealState.RebalanceMode.FULL_AUTO)) { // do not persist assignment for resource in neither semi or full auto. continue; } boolean needPersist = false; if (mode.equals(IdealState.RebalanceMode.FULL_AUTO)) { // persist preference list in ful-auto mode. Map<String, List<String>> newLists = bestPossibleAssignment.getPreferenceLists(resourceId); if (newLists != null && hasPreferenceListChanged(newLists, idealState)) { idealState.setPreferenceLists(newLists); needPersist = true; } } Map<Partition, Map<String, String>> bestPossibleAssignements = bestPossibleAssignment.getResourceMap(resourceId); if (bestPossibleAssignements != null && hasInstanceMapChanged(bestPossibleAssignements, idealState)) { for (Partition partition : bestPossibleAssignements.keySet()) { Map<String, String> instanceMap = bestPossibleAssignements.get(partition); idealState.setInstanceStateMap(partition.getPartitionName(), instanceMap); } needPersist = true; } if (needPersist) { accessor.setProperty(keyBuilder.idealStates(resourceId), idealState); } } } long endTime = System.currentTimeMillis(); LOG.info("END PersistAssignmentStage.process(), took " + (endTime - startTime) + " ms"); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { LOG.info("START PersistAssignmentStage.process()"); long startTime = System.currentTimeMillis(); ClusterDataCache cache = event.getAttribute("ClusterDataCache"); ClusterConfig clusterConfig = cache.getClusterConfig(); if (clusterConfig.isPersistBestPossibleAssignment()) { HelixManager helixManager = event.getAttribute("helixmanager"); HelixDataAccessor accessor = helixManager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); BestPossibleStateOutput bestPossibleAssignments = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString()); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.toString()); for (String resourceId : bestPossibleAssignments.resourceSet()) { Resource resource = resourceMap.get(resourceId); if (resource != null) { boolean changed = false; Map<Partition, Map<String, String>> bestPossibleAssignment = bestPossibleAssignments.getResourceMap(resourceId); IdealState idealState = cache.getIdealState(resourceId); if (idealState == null) { LOG.warn("IdealState not found for resource " + resourceId); continue; } IdealState.RebalanceMode mode = idealState.getRebalanceMode(); if (!mode.equals(IdealState.RebalanceMode.SEMI_AUTO) && !mode .equals(IdealState.RebalanceMode.FULL_AUTO)) { // do not persist assignment for resource in neither semi or full auto. continue; } //TODO: temporary solution for Espresso/Dbus backcompatible, should remove this. Map<Partition, Map<String, String>> assignmentToPersist = convertAssignmentPersisted(resource, idealState, bestPossibleAssignment); for (Partition partition : resource.getPartitions()) { Map<String, String> instanceMap = assignmentToPersist.get(partition); Map<String, String> existInstanceMap = idealState.getInstanceStateMap(partition.getPartitionName()); if (instanceMap == null && existInstanceMap == null) { continue; } if (instanceMap == null || existInstanceMap == null || !instanceMap .equals(existInstanceMap)) { changed = true; break; } } if (changed) { for (Partition partition : assignmentToPersist.keySet()) { Map<String, String> instanceMap = assignmentToPersist.get(partition); idealState.setInstanceStateMap(partition.getPartitionName(), instanceMap); } accessor.setProperty(keyBuilder.idealStates(resourceId), idealState); } } } } long endTime = System.currentTimeMillis(); LOG.info("END PersistAssignmentStage.process(), took " + (endTime - startTime) + " ms"); } #location 16 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void close() { if (_zkClient != null) { _zkClient.close(); } if (_zkMetadataStoreDirectory != null) { _zkMetadataStoreDirectory.close(); } if (_zkClientForRoutingDataListener != null) { _zkClientForRoutingDataListener.close(); } }
#vulnerable code public void close() { if (_zkClient != null) { _zkClient.close(); } if (_zkMetadataStoreDirectory != null) { _zkMetadataStoreDirectory.close(); } if (_zkClientForListener != null) { _zkClientForListener.close(); } } #location 9 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { long startTime = System.currentTimeMillis(); LOG.info("START ExternalViewComputeStage.process()"); HelixManager manager = event.getAttribute(AttributeName.helixmanager.name()); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name()); ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); if (manager == null || resourceMap == null || cache == null) { throw new StageException("Missing attributes in event:" + event + ". Requires ClusterManager|RESOURCES|DataCache"); } HelixDataAccessor dataAccessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder(); CurrentStateOutput currentStateOutput = event.getAttribute(AttributeName.CURRENT_STATE.name()); List<ExternalView> newExtViews = new ArrayList<ExternalView>(); Map<String, ExternalView> curExtViews = dataAccessor.getChildValuesMap(keyBuilder.externalViews()); for (String resourceName : resourceMap.keySet()) { ExternalView view = new ExternalView(resourceName); // view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); // if resource ideal state has bucket size, set it // otherwise resource has been dropped, use bucket size from current state instead Resource resource = resourceMap.get(resourceName); if (resource.getBucketSize() > 0) { view.setBucketSize(resource.getBucketSize()); } else { view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); } for (Partition partition : resource.getPartitions()) { Map<String, String> currentStateMap = currentStateOutput.getCurrentStateMap(resourceName, partition); if (currentStateMap != null && currentStateMap.size() > 0) { // Set<String> disabledInstances // = cache.getDisabledInstancesForResource(resource.toString()); for (String instance : currentStateMap.keySet()) { // if (!disabledInstances.contains(instance)) // { view.setState(partition.getPartitionName(), instance, currentStateMap.get(instance)); // } } } } // Update cluster status monitor mbean IdealState idealState = cache.getIdealState(resourceName); if (!cache.isTaskCache()) { ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); ResourceConfig resourceConfig = cache.getResourceConfig(resourceName); if (idealState != null && (resourceConfig == null || !resourceConfig .isMonitoringDisabled())) { if (clusterStatusMonitor != null && !idealState.getStateModelDefRef() .equalsIgnoreCase(DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { StateModelDefinition stateModelDef = cache.getStateModelDef(idealState.getStateModelDefRef()); clusterStatusMonitor .setResourceStatus(view, cache.getIdealState(view.getResourceName()), stateModelDef); } } else { // Drop the metrics if the resource is dropped, or the MonitorDisabled is changed to true. clusterStatusMonitor.unregisterResource(view.getResourceName()); } } ExternalView curExtView = curExtViews.get(resourceName); // copy simplefields from IS, in cases where IS is deleted copy it from existing ExternalView if (idealState != null) { view.getRecord().getSimpleFields().putAll(idealState.getRecord().getSimpleFields()); } else if (curExtView != null) { view.getRecord().getSimpleFields().putAll(curExtView.getRecord().getSimpleFields()); } // compare the new external view with current one, set only on different if (curExtView == null || !curExtView.getRecord().equals(view.getRecord())) { // Add external view to the list which will be written to ZK later. newExtViews.add(view); // For SCHEDULER_TASK_RESOURCE resource group (helix task queue), we need to find out which // task partitions are finished (COMPLETED or ERROR), update the status update of the original // scheduler message, and then remove the partitions from the ideal state if (idealState != null && idealState.getStateModelDefRef().equalsIgnoreCase( DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { updateScheduledTaskStatus(view, manager, idealState); } } } // TODO: consider not setting the externalview of SCHEDULER_TASK_QUEUE at all. // Are there any entity that will be interested in its change? // For the resource with DisableExternalView option turned on in IdealState // We will not actually create or write the externalView to ZooKeeper. List<PropertyKey> keys = new ArrayList<PropertyKey>(); for(Iterator<ExternalView> it = newExtViews.iterator(); it.hasNext(); ) { ExternalView view = it.next(); String resourceName = view.getResourceName(); IdealState idealState = cache.getIdealState(resourceName); if (idealState != null && idealState.isExternalViewDisabled()) { it.remove(); // remove the external view if the external view exists if (curExtViews.containsKey(resourceName)) { LOG.info("Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); } } else { keys.add(keyBuilder.externalView(resourceName)); } } // add/update external-views if (newExtViews.size() > 0) { dataAccessor.setChildren(keys, newExtViews); } // remove dead external-views for (String resourceName : curExtViews.keySet()) { if (!resourceMap.keySet().contains(resourceName)) { LOG.info("Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); } } long endTime = System.currentTimeMillis(); LOG.info("END " + GenericHelixController.getPipelineType(cache.isTaskCache()) + " ExternalViewComputeStage.process() for cluster " + cache.getClusterName() + ". took: " + (endTime - startTime) + " ms"); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { long startTime = System.currentTimeMillis(); LOG.info("START ExternalViewComputeStage.process()"); HelixManager manager = event.getAttribute(AttributeName.helixmanager.name()); Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name()); ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name()); if (manager == null || resourceMap == null || cache == null) { throw new StageException("Missing attributes in event:" + event + ". Requires ClusterManager|RESOURCES|DataCache"); } HelixDataAccessor dataAccessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder(); CurrentStateOutput currentStateOutput = event.getAttribute(AttributeName.CURRENT_STATE.name()); List<ExternalView> newExtViews = new ArrayList<ExternalView>(); Map<String, ExternalView> curExtViews = dataAccessor.getChildValuesMap(keyBuilder.externalViews()); for (String resourceName : resourceMap.keySet()) { ExternalView view = new ExternalView(resourceName); // view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); // if resource ideal state has bucket size, set it // otherwise resource has been dropped, use bucket size from current state instead Resource resource = resourceMap.get(resourceName); if (resource.getBucketSize() > 0) { view.setBucketSize(resource.getBucketSize()); } else { view.setBucketSize(currentStateOutput.getBucketSize(resourceName)); } for (Partition partition : resource.getPartitions()) { Map<String, String> currentStateMap = currentStateOutput.getCurrentStateMap(resourceName, partition); if (currentStateMap != null && currentStateMap.size() > 0) { // Set<String> disabledInstances // = cache.getDisabledInstancesForResource(resource.toString()); for (String instance : currentStateMap.keySet()) { // if (!disabledInstances.contains(instance)) // { view.setState(partition.getPartitionName(), instance, currentStateMap.get(instance)); // } } } } // Update cluster status monitor mbean IdealState idealState = cache.getIdealState(resourceName); if (!cache.isTaskCache()) { ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name()); ResourceConfig resourceConfig = cache.getResourceConfig(resourceName); if (idealState != null && (resourceConfig == null || !resourceConfig .isMonitoringDisabled())) { if (clusterStatusMonitor != null && !idealState.getStateModelDefRef() .equalsIgnoreCase(DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { StateModelDefinition stateModelDef = cache.getStateModelDef(idealState.getStateModelDefRef()); clusterStatusMonitor .setResourceStatus(view, cache.getIdealState(view.getResourceName()), stateModelDef); } } else { // Drop the metrics if the resource is dropped, or the MonitorDisabled is changed to true. clusterStatusMonitor.unregisterResource(view.getResourceName()); } } ExternalView curExtView = curExtViews.get(resourceName); // copy simplefields from IS, in cases where IS is deleted copy it from existing ExternalView if (idealState != null) { view.getRecord().getSimpleFields().putAll(idealState.getRecord().getSimpleFields()); } else if (curExtView != null) { view.getRecord().getSimpleFields().putAll(curExtView.getRecord().getSimpleFields()); } // compare the new external view with current one, set only on different if (curExtView == null || !curExtView.getRecord().equals(view.getRecord())) { // Add external view to the list which will be written to ZK later. newExtViews.add(view); // For SCHEDULER_TASK_RESOURCE resource group (helix task queue), we need to find out which // task partitions are finished (COMPLETED or ERROR), update the status update of the original // scheduler message, and then remove the partitions from the ideal state if (idealState != null && idealState.getStateModelDefRef().equalsIgnoreCase( DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) { updateScheduledTaskStatus(view, manager, idealState); } } } // TODO: consider not setting the externalview of SCHEDULER_TASK_QUEUE at all. // Are there any entity that will be interested in its change? // For the resource with DisableExternalView option turned on in IdealState // We will not actually create or write the externalView to ZooKeeper. List<PropertyKey> keys = new ArrayList<PropertyKey>(); for(Iterator<ExternalView> it = newExtViews.iterator(); it.hasNext(); ) { ExternalView view = it.next(); String resourceName = view.getResourceName(); IdealState idealState = cache.getIdealState(resourceName); if (idealState != null && idealState.isExternalViewDisabled()) { it.remove(); // remove the external view if the external view exists if (curExtViews.containsKey(resourceName)) { LOG.info("Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); } } else { keys.add(keyBuilder.externalView(resourceName)); } } // add/update external-views if (newExtViews.size() > 0) { dataAccessor.setChildren(keys, newExtViews); } // remove dead external-views for (String resourceName : curExtViews.keySet()) { if (!resourceMap.keySet().contains(resourceName)) { LOG.info("Remove externalView for resource: " + resourceName); dataAccessor.removeProperty(keyBuilder.externalView(resourceName)); } } long endTime = System.currentTimeMillis(); LOG.info("END ExternalViewComputeStage.process() for cluster " + cache.getClusterName() + ". took: " + (endTime - startTime) + " ms"); } #location 35 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static int numberOfListeners(String zkAddr, String path) throws Exception { Map<String, Set<String>> listenerMap = getListenersByZkPath(zkAddr); if (listenerMap.containsKey(path)) { return listenerMap.get(path).size(); } return 0; }
#vulnerable code public static int numberOfListeners(String zkAddr, String path) throws Exception { int count = 0; String splits[] = zkAddr.split(":"); Socket sock = new Socket(splits[0], Integer.parseInt(splits[1])); PrintWriter out = new PrintWriter(sock.getOutputStream(), true); BufferedReader in = new BufferedReader(new InputStreamReader(sock.getInputStream())); out.println("wchp"); String line = in.readLine(); while (line != null) { // System.out.println(line); if (line.equals(path)) { // System.out.println("match: " + line); String nextLine = in.readLine(); if (nextLine == null) { break; } // System.out.println(nextLine); while (nextLine.startsWith("\t0x")) { count++; nextLine = in.readLine(); if (nextLine == null) { break; } } } line = in.readLine(); } sock.close(); return count; } #location 38 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void process(ClusterEvent event) throws Exception { ClusterManager manager = event.getAttribute("clustermanager"); if (manager == null) { throw new StageException("ClusterManager attribute value is null"); } log.info("START ExternalViewComputeStage.process()"); ClusterDataAccessor dataAccessor = manager.getDataAccessor(); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); if (resourceGroupMap == null) { throw new StageException("ResourceGroupMap attribute value is null"); } CurrentStateOutput currentStateOutput = event .getAttribute(AttributeName.CURRENT_STATE.toString()); for (String resourceGroupName : resourceGroupMap.keySet()) { ZNRecord viewRecord = new ZNRecord(); viewRecord.setId(resourceGroupName); ExternalView view = new ExternalView(viewRecord); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); for (ResourceKey resource : resourceGroup.getResourceKeys()) { Map<String, String> currentStateMap = currentStateOutput .getCurrentStateMap(resourceGroupName, resource); if (currentStateMap != null && currentStateMap.size() > 0) { view.setStateMap(resource.getResourceKeyName(), currentStateMap); } } dataAccessor.setClusterProperty(ClusterPropertyType.EXTERNALVIEW, resourceGroupName, view.getRecord()); } log.info("START ExternalViewComputeStage.process()"); }
#vulnerable code @Override public void process(ClusterEvent event) throws Exception { ClusterManager manager = event.getAttribute("clustermanager"); if (manager == null) { throw new StageException("ClusterManager attribute value is null"); } log.info("START ExternalViewComputeStage.process()"); ClusterDataAccessor dataAccessor = manager.getDataAccessor(); Map<String, ResourceGroup> resourceGroupMap = event .getAttribute(AttributeName.RESOURCE_GROUPS.toString()); CurrentStateOutput currentStateOutput = event .getAttribute(AttributeName.CURRENT_STATE.toString()); for (String resourceGroupName : resourceGroupMap.keySet()) { ZNRecord viewRecord = new ZNRecord(); viewRecord.setId(resourceGroupName); ExternalView view = new ExternalView(viewRecord); ResourceGroup resourceGroup = resourceGroupMap.get(resourceGroupName); for (ResourceKey resource : resourceGroup.getResourceKeys()) { Map<String, String> currentStateMap = currentStateOutput .getCurrentStateMap(resourceGroupName, resource); if (currentStateMap != null && currentStateMap.size() > 0) { view.setStateMap(resource.getResourceKeyName(), currentStateMap); } } dataAccessor.setClusterProperty(ClusterPropertyType.EXTERNALVIEW, resourceGroupName, view.getRecord()); } log.info("START ExternalViewComputeStage.process()"); } #location 15 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void handleDataChange(String dataPath, Object data) { if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) { return; } resetZkResources(); }
#vulnerable code @Override public void handleDataChange(String dataPath, Object data) { if (_zkClientForListener == null || _zkClientForListener.isClosed()) { return; } resetZkResources(); } #location 3 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code protected void handleNewSession() { boolean isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); while (!isConnected) { logger.error("Could NOT connect to zk server in " + CONNECTIONTIMEOUT + "ms. zkServer: " + _zkConnectString + ", expiredSessionId: " + _sessionId + ", clusterName: " + _clusterName); isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); } ZkConnection zkConnection = ((ZkConnection) _zkClient.getConnection()); synchronized (this) { _sessionId = Long.toHexString(zkConnection.getZookeeper().getSessionId()); } _baseDataAccessor.reset(); // reset all handlers so they have a chance to unsubscribe zk changes from zkclient // abandon all callback-handlers added in expired session resetHandlers(); _handlers = new ArrayList<CallbackHandler>(); logger.info("Handling new session, session id:" + _sessionId + ", instance:" + _instanceName + ", instanceTye: " + _instanceType + ", cluster: " + _clusterName); logger.info(zkConnection.getZookeeper()); if (!ZKUtil.isClusterSetup(_clusterName, _zkClient)) { throw new HelixException("Initial cluster structure is not set up for cluster:" + _clusterName); } // Read cluster config and see if instance can auto join the cluster boolean autoJoin = false; try { ConfigScope scope = new ConfigScopeBuilder().forCluster(getClusterName()) .build(); autoJoin = Boolean.parseBoolean(getConfigAccessor().get(scope, ALLOW_PARTICIPANT_AUTO_JOIN)); logger.info("Auto joining " + _clusterName +" is true"); } catch(Exception e) { } if (!ZKUtil.isInstanceSetup(_zkClient, _clusterName, _instanceName, _instanceType)) { if(!autoJoin) { throw new HelixException("Initial cluster structure is not set up for instance:" + _instanceName + " instanceType:" + _instanceType); } else { logger.info("Auto joining instance " + _instanceName); InstanceConfig instanceConfig = new InstanceConfig(_instanceName); String hostName = _instanceName; String port = ""; int lastPos = _instanceName.lastIndexOf("_"); if (lastPos > 0) { hostName = _instanceName.substring(0, lastPos); port = _instanceName.substring(lastPos + 1); } instanceConfig.setHostName(hostName); instanceConfig.setPort(port); instanceConfig.setInstanceEnabled(true); getClusterManagmentTool().addInstance(_clusterName, instanceConfig); } } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { handleNewSessionAsParticipant(); } if (_instanceType == InstanceType.CONTROLLER || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { addControllerMessageListener(_messagingService.getExecutor()); MessageHandlerFactory defaultControllerMsgHandlerFactory = new DefaultControllerMessageHandlerFactory(); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultControllerMsgHandlerFactory.getMessageType(), defaultControllerMsgHandlerFactory); MessageHandlerFactory defaultSchedulerMsgHandlerFactory = new DefaultSchedulerMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultSchedulerMsgHandlerFactory.getMessageType(), defaultSchedulerMsgHandlerFactory); MessageHandlerFactory defaultParticipantErrorMessageHandlerFactory = new DefaultParticipantErrorMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultParticipantErrorMessageHandlerFactory.getMessageType(), defaultParticipantErrorMessageHandlerFactory); // create a new leader-election handler for a new session if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(); } _leaderElectionHandler = createCallBackHandler(new Builder(_clusterName).controller(), new DistClusterControllerElection(_zkConnectString), new EventType[] { EventType.NodeChildrenChanged, EventType.NodeDeleted, EventType.NodeCreated }, ChangeType.CONTROLLER); } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT || (_instanceType == InstanceType.CONTROLLER && isLeader())) { initHandlers(); } }
#vulnerable code protected void handleNewSession() { boolean isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); while (!isConnected) { logger.error("Could NOT connect to zk server in " + CONNECTIONTIMEOUT + "ms. zkServer: " + _zkConnectString + ", expiredSessionId: " + _sessionId + ", clusterName: " + _clusterName); isConnected = _zkClient.waitUntilConnected(CONNECTIONTIMEOUT, TimeUnit.MILLISECONDS); } ZkConnection zkConnection = ((ZkConnection) _zkClient.getConnection()); synchronized (this) { _sessionId = Long.toHexString(zkConnection.getZookeeper().getSessionId()); } _baseDataAccessor.reset(); // reset all handlers so they have a chance to unsubscribe zk changes from zkclient // abandon all callback-handlers added in expired session resetHandlers(); _handlers = new ArrayList<CallbackHandler>(); logger.info("Handling new session, session id:" + _sessionId + ", instance:" + _instanceName + ", instanceTye: " + _instanceType + ", cluster: " + _clusterName); logger.info(zkConnection.getZookeeper()); if (!ZKUtil.isClusterSetup(_clusterName, _zkClient)) { throw new HelixException("Initial cluster structure is not set up for cluster:" + _clusterName); } if (!ZKUtil.isInstanceSetup(_zkClient, _clusterName, _instanceName, _instanceType)) { throw new HelixException("Initial cluster structure is not set up for instance:" + _instanceName + " instanceType:" + _instanceType); } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { handleNewSessionAsParticipant(); } if (_instanceType == InstanceType.CONTROLLER || _instanceType == InstanceType.CONTROLLER_PARTICIPANT) { addControllerMessageListener(_messagingService.getExecutor()); MessageHandlerFactory defaultControllerMsgHandlerFactory = new DefaultControllerMessageHandlerFactory(); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultControllerMsgHandlerFactory.getMessageType(), defaultControllerMsgHandlerFactory); MessageHandlerFactory defaultSchedulerMsgHandlerFactory = new DefaultSchedulerMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultSchedulerMsgHandlerFactory.getMessageType(), defaultSchedulerMsgHandlerFactory); MessageHandlerFactory defaultParticipantErrorMessageHandlerFactory = new DefaultParticipantErrorMessageHandlerFactory(this); _messagingService.getExecutor() .registerMessageHandlerFactory(defaultParticipantErrorMessageHandlerFactory.getMessageType(), defaultParticipantErrorMessageHandlerFactory); // create a new leader-election handler for a new session if (_leaderElectionHandler != null) { _leaderElectionHandler.reset(); } _leaderElectionHandler = createCallBackHandler(new Builder(_clusterName).controller(), new DistClusterControllerElection(_zkConnectString), new EventType[] { EventType.NodeChildrenChanged, EventType.NodeDeleted, EventType.NodeCreated }, ChangeType.CONTROLLER); } if (_instanceType == InstanceType.PARTICIPANT || _instanceType == InstanceType.CONTROLLER_PARTICIPANT || (_instanceType == InstanceType.CONTROLLER && isLeader())) { initHandlers(); } } #location 45 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public Object deserialize(byte[] bytes) throws ZkMarshallingError { if (bytes == null || bytes.length == 0) { LOG.error("ZNode is empty."); return null; } ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ZNRecord record = null; String id = null; Map<String, String> simpleFields = Maps.newHashMap(); Map<String, List<String>> listFields = Maps.newHashMap(); Map<String, Map<String, String>> mapFields = Maps.newHashMap(); byte[] rawPayload = null; try { JsonFactory f = new JsonFactory(); JsonParser jp = f.createJsonParser(bais); jp.nextToken(); // will return JsonToken.START_OBJECT (verify?) while (jp.nextToken() != JsonToken.END_OBJECT) { String fieldname = jp.getCurrentName(); jp.nextToken(); // move to value, or START_OBJECT/START_ARRAY if ("id".equals(fieldname)) { // contains an object id = jp.getText(); } else if ("simpleFields".equals(fieldname)) { while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); jp.nextToken(); // move to value simpleFields.put(key, jp.getText()); } } else if ("mapFields".equals(fieldname)) { // user.setVerified(jp.getCurrentToken() == JsonToken.VALUE_TRUE); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); mapFields.put(key, new TreeMap<String, String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_OBJECT) { String mapKey = jp.getCurrentName(); jp.nextToken(); // move to value mapFields.get(key).put(mapKey, jp.getText()); } } } else if ("listFields".equals(fieldname)) { // user.setUserImage(jp.getBinaryValue()); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); listFields.put(key, new ArrayList<String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_ARRAY) { listFields.get(key).add(jp.getText()); } } } else if ("rawPayload".equals(fieldname)) { rawPayload = Base64.decode(jp.getText()); } else { throw new IllegalStateException("Unrecognized field '" + fieldname + "'!"); } } jp.close(); // ensure resources get cleaned up timely and properly if (id == null) { throw new IllegalStateException("ZNRecord id field is required!"); } record = new ZNRecord(id); record.setSimpleFields(simpleFields); record.setListFields(listFields); record.setMapFields(mapFields); record.setRawPayload(rawPayload); } catch (Exception e) { LOG.error("Exception during deserialization of bytes: " + new String(bytes), e); } return record; }
#vulnerable code @Override public Object deserialize(byte[] bytes) throws ZkMarshallingError { if (bytes == null || bytes.length == 0) { LOG.error("ZNode is empty."); return null; } ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ZNRecord record = null; try { JsonFactory f = new JsonFactory(); JsonParser jp = f.createJsonParser(bais); jp.nextToken(); // will return JsonToken.START_OBJECT (verify?) while (jp.nextToken() != JsonToken.END_OBJECT) { String fieldname = jp.getCurrentName(); jp.nextToken(); // move to value, or START_OBJECT/START_ARRAY if ("id".equals(fieldname)) { // contains an object record = new ZNRecord(jp.getText()); } else if ("simpleFields".equals(fieldname)) { while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); jp.nextToken(); // move to value record.setSimpleField(key, jp.getText()); } } else if ("mapFields".equals(fieldname)) { // user.setVerified(jp.getCurrentToken() == JsonToken.VALUE_TRUE); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); record.setMapField(key, new TreeMap<String, String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_OBJECT) { String mapKey = jp.getCurrentName(); jp.nextToken(); // move to value record.getMapField(key).put(mapKey, jp.getText()); } } } else if ("listFields".equals(fieldname)) { // user.setUserImage(jp.getBinaryValue()); while (jp.nextToken() != JsonToken.END_OBJECT) { String key = jp.getCurrentName(); record.setListField(key, new ArrayList<String>()); jp.nextToken(); // move to value while (jp.nextToken() != JsonToken.END_ARRAY) { record.getListField(key).add(jp.getText()); } } } else if ("rawPayload".equals(fieldname)) { record.setRawPayload(Base64.decode(jp.getText())); } else { throw new IllegalStateException("Unrecognized field '" + fieldname + "'!"); } } jp.close(); // ensure resources get cleaned up timely and properly } catch (Exception e) { LOG.error("Exception during deserialization of bytes: " + new String(bytes), e); } return record; } #location 32 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void handleNewSession(String sessionId) { if (_zkClientForRoutingDataListener == null || _zkClientForRoutingDataListener.isClosed()) { return; } // Resubscribe _zkClientForRoutingDataListener.unsubscribeAll(); _zkClientForRoutingDataListener.subscribeRoutingDataChanges(this, this); resetZkResources(); }
#vulnerable code @Override public void handleNewSession(String sessionId) { if (_zkClientForListener == null || _zkClientForListener.isClosed()) { return; } // Resubscribe _zkClientForListener.unsubscribeAll(); _zkClientForListener.subscribeRoutingDataChanges(this, this); resetZkResources(); } #location 8 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code void preHandleMessage() throws Exception { if (!_message.isValid()) { String errorMessage = "Invalid Message, ensure that message: " + _message + " has all the required fields: " + Arrays.toString(Message.Attributes.values()); _statusUpdateUtil.logError(_message, HelixStateTransitionHandler.class, errorMessage, _manager); logger.error(errorMessage); throw new HelixException(errorMessage); } logger.info("handling message: " + _message.getMsgId() + " transit " + _message.getResourceName() + "." + _message.getPartitionName() + "|" + _message.getPartitionNames() + " from:" + _message.getFromState() + " to:" + _message.getToState() + ", relayedFrom: " + _message.getRelaySrcHost()); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); String partitionName = _message.getPartitionName(); // Set start time right before invoke client logic _currentStateDelta.setStartTime(_message.getPartitionName(), System.currentTimeMillis()); StaleMessageValidateResult err = staleMessageValidator(); if (!err.isValid) { _statusUpdateUtil .logError(_message, HelixStateTransitionHandler.class, err.exception.getMessage(), _manager); logger.error(err.exception.getMessage()); throw err.exception; } // Reset the REQUESTED_STATE property if it exists. try { String instance = _manager.getInstanceName(); String sessionId = _message.getTgtSessionId(); String resource = _message.getResourceName(); ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(_message.getBucketSize()); PropertyKey key = accessor.keyBuilder().currentState(instance, sessionId, resource, bucketizer.getBucketName(partitionName)); ZNRecord rec = new ZNRecord(resource); Map<String, String> map = new TreeMap<String, String>(); map.put(CurrentState.CurrentStateProperty.REQUESTED_STATE.name(), null); rec.getMapFields().put(partitionName, map); ZNRecordDelta delta = new ZNRecordDelta(rec, ZNRecordDelta.MergeOperation.SUBTRACT); List<ZNRecordDelta> deltaList = new ArrayList<ZNRecordDelta>(); deltaList.add(delta); CurrentState currStateUpdate = new CurrentState(resource); currStateUpdate.setDeltaList(deltaList); // Update the ZK current state of the node if (!accessor.updateProperty(key, currStateUpdate)) { logger.error("Fails to persist current state back to ZK for resource " + resource + " partition: " + partitionName); } } catch (Exception e) { logger.error("Error when removing " + CurrentState.CurrentStateProperty.REQUESTED_STATE.name() + " from current state.", e); StateTransitionError error = new StateTransitionError(ErrorType.FRAMEWORK, ErrorCode.ERROR, e); _stateModel.rollbackOnError(_message, _notificationContext, error); _statusUpdateUtil.logError( _message, HelixStateTransitionHandler.class, e, "Error when removing " + CurrentState.CurrentStateProperty.REQUESTED_STATE.name() + " from current state.", _manager); } }
#vulnerable code void preHandleMessage() throws Exception { if (!_message.isValid()) { String errorMessage = "Invalid Message, ensure that message: " + _message + " has all the required fields: " + Arrays.toString(Message.Attributes.values()); _statusUpdateUtil.logError(_message, HelixStateTransitionHandler.class, errorMessage, _manager); logger.error(errorMessage); throw new HelixException(errorMessage); } logger.info("handling message: " + _message.getMsgId() + " transit " + _message.getResourceName() + "." + _message.getPartitionName() + "|" + _message.getPartitionNames() + " from:" + _message.getFromState() + " to:" + _message.getToState() + ", relayedFrom: " + _message.getRelaySrcHost()); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); String partitionName = _message.getPartitionName(); String fromState = _message.getFromState(); String toState = _message.getToState(); // Verify the fromState and current state of the stateModel // getting current state from state model will provide most up-to-date // current state. In case current state is null, partition is in initial // state and we are setting it in current state String state = _stateModel.getCurrentState() != null ? _stateModel.getCurrentState() : _currentStateDelta.getState(partitionName); // Set start time right before invoke client logic _currentStateDelta.setStartTime(_message.getPartitionName(), System.currentTimeMillis()); Exception err = null; if (toState.equalsIgnoreCase(state)) { // To state equals current state, we can just ignore the message err = new HelixDuplicatedStateTransitionException( String.format("Partition %s current state is same as toState (%s->%s) from message.", partitionName, fromState, toState)); } else if (fromState != null && !fromState.equals("*") && !fromState.equalsIgnoreCase(state)) { // If current state is neither toState nor fromState in message, there is a problem err = new HelixStateMismatchException(String.format( "Current state of stateModel does not match the fromState in Message, CurrentState: %s, Message: %s->%s, Partition: %s, from: %s, to: %s", state, fromState, toState, partitionName, _message.getMsgSrc(), _message.getTgtName())); } if (err != null) { _statusUpdateUtil.logError(_message, HelixStateTransitionHandler.class, err.getMessage(), _manager); logger.error(err.getMessage()); throw err; } // Reset the REQUESTED_STATE property if it exists. try { String instance = _manager.getInstanceName(); String sessionId = _message.getTgtSessionId(); String resource = _message.getResourceName(); ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(_message.getBucketSize()); PropertyKey key = accessor.keyBuilder().currentState(instance, sessionId, resource, bucketizer.getBucketName(partitionName)); ZNRecord rec = new ZNRecord(resource); Map<String, String> map = new TreeMap<String, String>(); map.put(CurrentState.CurrentStateProperty.REQUESTED_STATE.name(), null); rec.getMapFields().put(partitionName, map); ZNRecordDelta delta = new ZNRecordDelta(rec, ZNRecordDelta.MergeOperation.SUBTRACT); List<ZNRecordDelta> deltaList = new ArrayList<ZNRecordDelta>(); deltaList.add(delta); CurrentState currStateUpdate = new CurrentState(resource); currStateUpdate.setDeltaList(deltaList); // Update the ZK current state of the node if (!accessor.updateProperty(key, currStateUpdate)) { logger.error("Fails to persist current state back to ZK for resource " + resource + " partition: " + partitionName); } } catch (Exception e) { logger.error("Error when removing " + CurrentState.CurrentStateProperty.REQUESTED_STATE.name() + " from current state.", e); StateTransitionError error = new StateTransitionError(ErrorType.FRAMEWORK, ErrorCode.ERROR, e); _stateModel.rollbackOnError(_message, _notificationContext, error); _statusUpdateUtil.logError( _message, HelixStateTransitionHandler.class, e, "Error when removing " + CurrentState.CurrentStateProperty.REQUESTED_STATE.name() + " from current state.", _manager); } } #location 27 #vulnerability type THREAD_SAFETY_VIOLATION
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQPullConsumer consumer = new DefaultMQPullConsumer(MixAll.TOOLS_CONSUMER_GROUP, rpcHook); try { String topic = commandLine.getOptionValue('t').trim(); String charsetName = !commandLine.hasOption('c') ? "UTF-8" : commandLine.getOptionValue('c').trim(); String subExpression = !commandLine.hasOption('s') ? "*" : commandLine.getOptionValue('s').trim(); boolean printBody = !commandLine.hasOption('d') || Boolean.parseBoolean(commandLine.getOptionValue('d').trim()); consumer.start(); Set<MessageQueue> mqs = consumer.fetchSubscribeMessageQueues(topic); for (MessageQueue mq : mqs) { long minOffset = consumer.minOffset(mq); long maxOffset = consumer.maxOffset(mq); if (commandLine.hasOption('b')) { String timestampStr = commandLine.getOptionValue('b').trim(); long timeValue = timestampFormat(timestampStr); minOffset = consumer.searchOffset(mq, timeValue); } if (commandLine.hasOption('e')) { String timestampStr = commandLine.getOptionValue('e').trim(); long timeValue = timestampFormat(timestampStr); maxOffset = consumer.searchOffset(mq, timeValue); } System.out.printf("minOffset=%s, maxOffset=%s, %s", minOffset, maxOffset, mq); READQ: for (long offset = minOffset; offset < maxOffset; ) { try { PullResult pullResult = consumer.pull(mq, subExpression, offset, 32); offset = pullResult.getNextBeginOffset(); switch (pullResult.getPullStatus()) { case FOUND: printMessage(pullResult.getMsgFoundList(), charsetName, printBody); break; case NO_MATCHED_MSG: System.out.printf("%s no matched msg. status=%s, offset=%s", mq, pullResult.getPullStatus(), offset); break; case NO_NEW_MSG: case OFFSET_ILLEGAL: System.out.printf("%s print msg finished. status=%s, offset=%s", mq, pullResult.getPullStatus(), offset); break READQ; } } catch (Exception e) { e.printStackTrace(); break; } } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { consumer.shutdown(); } }
#vulnerable code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQPullConsumer consumer = new DefaultMQPullConsumer(MixAll.TOOLS_CONSUMER_GROUP, rpcHook); try { String topic = commandLine.getOptionValue('t').trim(); String charsetName = !commandLine.hasOption('c') ? "UTF-8" : commandLine.getOptionValue('c').trim(); String subExpression = !commandLine.hasOption('s') ? "*" : commandLine.getOptionValue('s').trim(); boolean printBody = !commandLine.hasOption('d') || Boolean.parseBoolean(commandLine.getOptionValue('d').trim()); consumer.start(); Set<MessageQueue> mqs = consumer.fetchSubscribeMessageQueues(topic); for (MessageQueue mq : mqs) { long minOffset = consumer.minOffset(mq); long maxOffset = consumer.maxOffset(mq); if (commandLine.hasOption('b')) { String timestampStr = commandLine.getOptionValue('b').trim(); long timeValue = timestampFormat(timestampStr); minOffset = consumer.searchOffset(mq, timeValue); } if (commandLine.hasOption('e')) { String timestampStr = commandLine.getOptionValue('e').trim(); long timeValue = timestampFormat(timestampStr); maxOffset = consumer.searchOffset(mq, timeValue); } System.out.printf("minOffset=%s, maxOffset=%s, %s", minOffset, maxOffset, mq); READQ: for (long offset = minOffset; offset < maxOffset; ) { try { PullResult pullResult = consumer.pull(mq, subExpression, offset, 32); offset = pullResult.getNextBeginOffset(); switch (pullResult.getPullStatus()) { case FOUND: printMessage(pullResult.getMsgFoundList(), charsetName, printBody); break; case NO_MATCHED_MSG: System.out.printf(mq + " no matched msg. status=%s, offset=%s", pullResult.getPullStatus(), offset); break; case NO_NEW_MSG: case OFFSET_ILLEGAL: System.out.printf(mq + " print msg finished. status=%s, offset=%s", pullResult.getPullStatus(), offset); break READQ; } } catch (Exception e) { e.printStackTrace(); break; } } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { consumer.shutdown(); } } #location 51 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(final CommandLine commandLine, final Options options, RPCHook rpcHook) throws SubCommandException { String messageId = commandLine.getOptionValue('i').trim(); try { System.out.printf("ip=%s", MessageClientIDSetter.getIPStrFromID(messageId)); } catch (Exception e) { e.printStackTrace(); } try { String date = UtilAll.formatDate(MessageClientIDSetter.getNearlyTimeFromID(messageId), UtilAll.YYYY_MM_DD_HH_MM_SS_SSS); System.out.printf("date=%s", date); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } }
#vulnerable code @Override public void execute(final CommandLine commandLine, final Options options, RPCHook rpcHook) throws SubCommandException { String messageId = commandLine.getOptionValue('i').trim(); try { System.out.printf("ip=" + MessageClientIDSetter.getIPStrFromID(messageId)); } catch (Exception e) { e.printStackTrace(); } try { String date = UtilAll.formatDate(MessageClientIDSetter.getNearlyTimeFromID(messageId), UtilAll.YYYY_MM_DD_HH_MM_SS_SSS); System.out.printf("date=" + date); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } } #location 7 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testConsumerStartWithInterval() { int msgSize = 100; String originMsgDCName = RandomUtils.getStringByUUID(); String msgBodyDCName = RandomUtils.getStringByUUID(); RMQNormalConsumer consumer1 = getConsumer(nsAddr, topic, tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); producer.send(tag, msgSize, 100); TestUtils.waitForMoment(5); getConsumer(nsAddr, consumer1.getConsumerGroup(), tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); TestUtils.waitForMoment(5); consumer1.getListner().waitForMessageConsume(producer.getAllMsgBody(), consumeTime); assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(), consumer1.getListner().getAllMsgBody())) .containsExactlyElementsIn(producer.getAllMsgBody()); }
#vulnerable code @Test public void testConsumerStartWithInterval() { String tag = "jueyin"; int msgSize = 100; String originMsgDCName = RandomUtils.getStringByUUID(); String msgBodyDCName = RandomUtils.getStringByUUID(); RMQNormalConsumer consumer1 = getConsumer(nsAddr, topic, tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); producer.send(tag, msgSize, 100); TestUtils.waitForMoment(5); RMQNormalConsumer consumer2 = getConsumer(nsAddr, consumer1.getConsumerGroup(), tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); TestUtils.waitForMoment(5); consumer1.getListner().waitForMessageConsume(producer.getAllMsgBody(), consumeTime); assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(), consumer1.getListner().getAllMsgBody())) .containsExactlyElementsIn(producer.getAllMsgBody()); } #location 16 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public void executeSendMessageHookBefore(final ChannelHandlerContext ctx, final RemotingCommand request, SendMessageContext context) { if (hasSendMessageHook()) { for (SendMessageHook hook : this.sendMessageHookList) { try { final SendMessageRequestHeader requestHeader = parseRequestHeader(request); if (null != requestHeader) { context.setProducerGroup(requestHeader.getProducerGroup()); context.setTopic(requestHeader.getTopic()); context.setBodyLength(request.getBody().length); context.setMsgProps(requestHeader.getProperties()); context.setBornHost(RemotingHelper.parseChannelRemoteAddr(ctx.channel())); context.setBrokerAddr(this.brokerController.getBrokerAddr()); context.setQueueId(requestHeader.getQueueId()); } hook.sendMessageBefore(context); if (requestHeader != null) { requestHeader.setProperties(context.getMsgProps()); } } catch (Throwable e) { // Ignore } } } }
#vulnerable code public void executeSendMessageHookBefore(final ChannelHandlerContext ctx, final RemotingCommand request, SendMessageContext context) { if (hasSendMessageHook()) { for (SendMessageHook hook : this.sendMessageHookList) { try { final SendMessageRequestHeader requestHeader = parseRequestHeader(request); if (null != requestHeader) { context.setProducerGroup(requestHeader.getProducerGroup()); context.setTopic(requestHeader.getTopic()); context.setBodyLength(request.getBody().length); context.setMsgProps(requestHeader.getProperties()); context.setBornHost(RemotingHelper.parseChannelRemoteAddr(ctx.channel())); context.setBrokerAddr(this.brokerController.getBrokerAddr()); context.setQueueId(requestHeader.getQueueId()); } hook.sendMessageBefore(context); requestHeader.setProperties(context.getMsgProps()); } catch (Throwable e) { } } } } #location 19 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testGetHalfMessageNull() { when(messageStore .getMessage(anyString(), anyString(), anyInt(), anyLong(), anyInt(), ArgumentMatchers.nullable(MessageFilter.class))) .thenReturn(null); PullResult result = transactionBridge.getHalfMessage(0, 0, 1); assertThat(result).isNull(); }
#vulnerable code @Test public void testGetHalfMessageNull() { when(messageStore .getMessage(anyString(), anyString(), anyInt(), anyLong(), anyInt(), ArgumentMatchers.nullable(MessageFilter.class))) .thenReturn(null); PullResult result = transactionBridge.getHalfMessage(0, 0, 1); assertThat(result.getPullStatus()).isNull(); } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testGetHalfMessageNull() { when(messageStore .getMessage(anyString(), anyString(), anyInt(), anyLong(), anyInt(), ArgumentMatchers.nullable(MessageFilter.class))) .thenReturn(null); PullResult result = transactionBridge.getHalfMessage(0, 0, 1); assertThat(result).isNull(); }
#vulnerable code @Test public void testGetHalfMessageNull() { when(messageStore .getMessage(anyString(), anyString(), anyInt(), anyLong(), anyInt(), ArgumentMatchers.nullable(MessageFilter.class))) .thenReturn(null); PullResult result = transactionBridge.getHalfMessage(0, 0, 1); assertThat(result.getPullStatus()).isNull(); } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main0(String[] args, RPCHook rpcHook) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); //PackageConflictDetect.detectFastjson(); initCommand(); try { initLogback(); switch (args.length) { case 0: printHelp(); break; case 2: if (args[0].equals("help")) { SubCommand cmd = findSubCommand(args[1]); if (cmd != null) { Options options = ServerUtil.buildCommandlineOptions(new Options()); options = cmd.buildCommandlineOptions(options); if (options != null) { ServerUtil.printCommandLineHelp("mqadmin " + cmd.commandName(), options); } } else { System.out.printf("The sub command %s not exist.%n", args[1]); } break; } case 1: default: SubCommand cmd = findSubCommand(args[0]); if (cmd != null) { String[] subargs = parseSubArgs(args); Options options = ServerUtil.buildCommandlineOptions(new Options()); final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return; } if (commandLine.hasOption('n')) { String namesrvAddr = commandLine.getOptionValue('n'); System.setProperty(MixAll.NAMESRV_ADDR_PROPERTY, namesrvAddr); } cmd.execute(commandLine, options, rpcHook); } else { System.out.printf("The sub command %s not exist.%n", args[0]); } break; } } catch (Exception e) { e.printStackTrace(); } }
#vulnerable code public static void main0(String[] args, RPCHook rpcHook) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); //PackageConflictDetect.detectFastjson(); initCommand(); try { initLogback(); switch (args.length) { case 0: printHelp(); break; case 2: if (args[0].equals("help")) { SubCommand cmd = findSubCommand(args[1]); if (cmd != null) { Options options = ServerUtil.buildCommandlineOptions(new Options()); options = cmd.buildCommandlineOptions(options); if (options != null) { ServerUtil.printCommandLineHelp("mqadmin " + cmd.commandName(), options); } } else { System.out.printf("The sub command \'" + args[1] + "\' not exist.%n"); } break; } case 1: default: SubCommand cmd = findSubCommand(args[0]); if (cmd != null) { String[] subargs = parseSubArgs(args); Options options = ServerUtil.buildCommandlineOptions(new Options()); final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return; } if (commandLine.hasOption('n')) { String namesrvAddr = commandLine.getOptionValue('n'); System.setProperty(MixAll.NAMESRV_ADDR_PROPERTY, namesrvAddr); } cmd.execute(commandLine, options, rpcHook); } else { System.out.printf("The sub command \'" + args[0] + "\' not exist.%n"); } break; } } catch (Exception e) { e.printStackTrace(); } } #location 50 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static NamesrvController main0(String[] args) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); try { //PackageConflictDetect.detectFastjson(); Options options = ServerUtil.buildCommandlineOptions(new Options()); commandLine = ServerUtil.parseCmdLine("mqnamesrv", args, buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return null; } final NamesrvConfig namesrvConfig = new NamesrvConfig(); final NettyServerConfig nettyServerConfig = new NettyServerConfig(); nettyServerConfig.setListenPort(9876); if (commandLine.hasOption('c')) { String file = commandLine.getOptionValue('c'); if (file != null) { InputStream in = new BufferedInputStream(new FileInputStream(file)); properties = new Properties(); properties.load(in); MixAll.properties2Object(properties, namesrvConfig); MixAll.properties2Object(properties, nettyServerConfig); namesrvConfig.setConfigStorePath(file); System.out.printf("load config properties file OK, %s%n", file); in.close(); } } if (commandLine.hasOption('p')) { MixAll.printObjectProperties(null, namesrvConfig); MixAll.printObjectProperties(null, nettyServerConfig); System.exit(0); } MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), namesrvConfig); if (null == namesrvConfig.getRocketmqHome()) { System.out.printf("Please set the %s variable in your environment to match the location of the RocketMQ installation%n", MixAll.ROCKETMQ_HOME_ENV); System.exit(-2); } LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(lc); lc.reset(); configurator.doConfigure(namesrvConfig.getRocketmqHome() + "/conf/logback_namesrv.xml"); final Logger log = LoggerFactory.getLogger(LoggerName.NAMESRV_LOGGER_NAME); MixAll.printObjectProperties(log, namesrvConfig); MixAll.printObjectProperties(log, nettyServerConfig); final NamesrvController controller = new NamesrvController(namesrvConfig, nettyServerConfig); // remember all configs to prevent discard controller.getConfiguration().registerConfig(properties); boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable<Void>() { @Override public Void call() throws Exception { controller.shutdown(); return null; } })); controller.start(); String tip = "The Name Server boot success. serializeType=" + RemotingCommand.getSerializeTypeConfigInThisServer(); log.info(tip); System.out.printf("%s%n", tip); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; }
#vulnerable code public static NamesrvController main0(String[] args) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); try { //PackageConflictDetect.detectFastjson(); Options options = ServerUtil.buildCommandlineOptions(new Options()); commandLine = ServerUtil.parseCmdLine("mqnamesrv", args, buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return null; } final NamesrvConfig namesrvConfig = new NamesrvConfig(); final NettyServerConfig nettyServerConfig = new NettyServerConfig(); nettyServerConfig.setListenPort(9876); if (commandLine.hasOption('c')) { String file = commandLine.getOptionValue('c'); if (file != null) { InputStream in = new BufferedInputStream(new FileInputStream(file)); properties = new Properties(); properties.load(in); MixAll.properties2Object(properties, namesrvConfig); MixAll.properties2Object(properties, nettyServerConfig); namesrvConfig.setConfigStorePath(file); System.out.printf("load config properties file OK, " + file + "%n"); in.close(); } } if (commandLine.hasOption('p')) { MixAll.printObjectProperties(null, namesrvConfig); MixAll.printObjectProperties(null, nettyServerConfig); System.exit(0); } MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), namesrvConfig); if (null == namesrvConfig.getRocketmqHome()) { System.out.printf("Please set the %s variable in your environment to match the location of the RocketMQ installation%n", MixAll.ROCKETMQ_HOME_ENV); System.exit(-2); } LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(lc); lc.reset(); configurator.doConfigure(namesrvConfig.getRocketmqHome() + "/conf/logback_namesrv.xml"); final Logger log = LoggerFactory.getLogger(LoggerName.NAMESRV_LOGGER_NAME); MixAll.printObjectProperties(log, namesrvConfig); MixAll.printObjectProperties(log, nettyServerConfig); final NamesrvController controller = new NamesrvController(namesrvConfig, nettyServerConfig); // remember all configs to prevent discard controller.getConfiguration().registerConfig(properties); boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable<Void>() { @Override public Void call() throws Exception { controller.shutdown(); return null; } })); controller.start(); String tip = "The Name Server boot success. serializeType=" + RemotingCommand.getSerializeTypeConfigInThisServer(); log.info(tip); System.out.printf(tip + "%n"); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; } #location 78 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testRegisterProducer() throws Exception { producerManager.registerProducer(group, clientInfo); Map<Channel, ClientChannelInfo> channelMap = producerManager.getGroupChannelTable().get(group); Channel channel1 = producerManager.findChannel("clientId"); assertThat(channelMap).isNotNull(); assertThat(channel1).isNotNull(); assertThat(channelMap.get(channel)).isEqualTo(clientInfo); assertThat(channel1).isEqualTo(channel); }
#vulnerable code @Test public void testRegisterProducer() throws Exception { producerManager.registerProducer(group, clientInfo); HashMap<Channel, ClientChannelInfo> channelMap = producerManager.getGroupChannelTable().get(group); Channel channel1 = producerManager.findChannel("clientId"); assertThat(channelMap).isNotNull(); assertThat(channel1).isNotNull(); assertThat(channelMap.get(channel)).isEqualTo(clientInfo); assertThat(channel1).isEqualTo(channel); } #location 8 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(final CommandLine commandLine, final Options options, RPCHook rpcHook) throws SubCommandException { String messageId = commandLine.getOptionValue('i').trim(); try { System.out.printf("ip=%s", MessageClientIDSetter.getIPStrFromID(messageId)); } catch (Exception e) { e.printStackTrace(); } try { String date = UtilAll.formatDate(MessageClientIDSetter.getNearlyTimeFromID(messageId), UtilAll.YYYY_MM_DD_HH_MM_SS_SSS); System.out.printf("date=%s", date); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } }
#vulnerable code @Override public void execute(final CommandLine commandLine, final Options options, RPCHook rpcHook) throws SubCommandException { String messageId = commandLine.getOptionValue('i').trim(); try { System.out.printf("ip=" + MessageClientIDSetter.getIPStrFromID(messageId)); } catch (Exception e) { e.printStackTrace(); } try { String date = UtilAll.formatDate(MessageClientIDSetter.getNearlyTimeFromID(messageId), UtilAll.YYYY_MM_DD_HH_MM_SS_SSS); System.out.printf("date=" + date); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } } #location 14 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Before public void init() throws NoSuchFieldException, SecurityException, IOException { Yaml ymal = new Yaml(); String home = System.getProperty(MixAll.ROCKETMQ_HOME_PROPERTY, System.getenv(MixAll.ROCKETMQ_HOME_ENV)); InputStream fis = null; if (home == null) { URL url = PlainAclPlugEngineTest.class.getResource("/"); home = url.toString(); home = home.substring(0, home.length() - 1).replace("file:/", "").replace("target/test-classes", ""); home = home + "src/test/resources"; String filePath = home + "/conf/transport.yml"; fis = new FileInputStream(new File(filePath)); } else { String filePath = home + "/conf/transport.yml"; fis = new FileInputStream(new File(filePath)); } transport = ymal.loadAs(fis, BorkerAccessControlTransport.class); ControllerParameters controllerParametersEntity = new ControllerParameters(); controllerParametersEntity.setFileHome(home); plainAclPlugEngine = new PlainAclPlugEngine(controllerParametersEntity); accessControl = new BorkerAccessControl(); accessControl.setAccount("rokcetmq"); accessControl.setPassword("aliyun11"); accessControl.setNetaddress("127.0.0.1"); accessControl.setRecognition("127.0.0.1:1"); accessControlTwo = new BorkerAccessControl(); accessControlTwo.setAccount("rokcet1"); accessControlTwo.setPassword("aliyun1"); accessControlTwo.setNetaddress("127.0.0.1"); accessControlTwo.setRecognition("127.0.0.1:2"); loginInfoMap = new ConcurrentHashMap<>(); FieldSetter.setField(plainAclPlugEngine, plainAclPlugEngine.getClass().getSuperclass().getDeclaredField("loginInfoMap"), loginInfoMap); }
#vulnerable code @Before public void init() throws NoSuchFieldException, SecurityException, IOException { Yaml ymal = new Yaml(); String home = System.getProperty(MixAll.ROCKETMQ_HOME_PROPERTY, System.getenv(MixAll.ROCKETMQ_HOME_ENV)); InputStream fis=null; if(home == null){ URL url = PlainAclPlugEngineTest.class.getResource("/conf/transport.yml"); if(url == null) { url = PlainAclPlugEngineTest.class.getResource("/"); home = url.toString(); home = home.substring(0, home.length()-1).replace("file:/", "").replace("target/test-classes", ""); home = home+"src/test/resources"; String filePath = home+"/conf/transport.yml"; fis = new FileInputStream(new File(filePath)); }else { fis = url.openStream(); url = PlainAclPlugEngineTest.class.getResource("/"); home = url.toString(); home = home.substring(0, home.length()-1).replace("file:/", ""); } }else { String filePath = home + "/conf/transport.yml"; fis = new FileInputStream(new File(filePath)); } transport = ymal.loadAs(fis, BorkerAccessControlTransport.class); ControllerParameters controllerParametersEntity = new ControllerParameters(); controllerParametersEntity.setFileHome(home); plainAclPlugEngine = new PlainAclPlugEngine(controllerParametersEntity); accessControl = new BorkerAccessControl(); accessControl.setAccount("rokcetmq"); accessControl.setPassword("aliyun11"); accessControl.setNetaddress("127.0.0.1"); accessControl.setRecognition("127.0.0.1:1"); accessControlTwo = new BorkerAccessControl(); accessControlTwo.setAccount("rokcet1"); accessControlTwo.setPassword("aliyun1"); accessControlTwo.setNetaddress("127.0.0.1"); accessControlTwo.setRecognition("127.0.0.1:2"); loginInfoMap = new ConcurrentHashMap<>(); FieldSetter.setField(plainAclPlugEngine, plainAclPlugEngine.getClass().getSuperclass().getDeclaredField("loginInfoMap"), loginInfoMap); } #location 10 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void requestThenAssertResponse() throws Exception { requestThenAssertResponse(remotingClient); }
#vulnerable code private void requestThenAssertResponse() throws Exception { RemotingCommand response = remotingClient.invokeSync("localhost:8888", createRequest(), 1000 * 3); assertTrue(response != null); assertThat(response.getLanguage()).isEqualTo(LanguageCode.JAVA); assertThat(response.getExtFields()).hasSize(2); assertThat(response.getExtFields().get("messageTitle")).isEqualTo("Welcome"); } #location 4 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public LocalTransactionState checkLocalTransactionState(MessageExt msg) { System.out.printf("server checking TrMsg %s%n", msg); int value = transactionIndex.getAndIncrement(); if ((value % 6) == 0) { throw new RuntimeException("Could not find db"); } else if ((value % 5) == 0) { return LocalTransactionState.ROLLBACK_MESSAGE; } else if ((value % 4) == 0) { return LocalTransactionState.COMMIT_MESSAGE; } return LocalTransactionState.UNKNOW; }
#vulnerable code @Override public LocalTransactionState checkLocalTransactionState(MessageExt msg) { System.out.printf("server checking TrMsg " + msg.toString() + "%n"); int value = transactionIndex.getAndIncrement(); if ((value % 6) == 0) { throw new RuntimeException("Could not find db"); } else if ((value % 5) == 0) { return LocalTransactionState.ROLLBACK_MESSAGE; } else if ((value % 4) == 0) { return LocalTransactionState.COMMIT_MESSAGE; } return LocalTransactionState.UNKNOW; } #location 3 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void should_get_consume_queue_offset_successfully_when_timestamp_is_skewing() throws InterruptedException { final int totalCount = 10; int queueId = 0; String topic = "FooBar"; AppendMessageResult[] appendMessageResults = putMessages(totalCount, topic, queueId, true); //Thread.sleep(10); StoreTestUtil.waitCommitLogReput((DefaultMessageStore) messageStore); int skewing = 2; ConsumeQueue consumeQueue = getDefaultMessageStore().findConsumeQueue(topic, queueId); for (AppendMessageResult appendMessageResult : appendMessageResults) { long offset = messageStore.getOffsetInQueueByTime(topic, queueId, appendMessageResult.getStoreTimestamp() + skewing); long offset2 = messageStore.getOffsetInQueueByTime(topic, queueId, appendMessageResult.getStoreTimestamp() - skewing); SelectMappedBufferResult indexBuffer = consumeQueue.getIndexBuffer(offset); SelectMappedBufferResult indexBuffer2 = consumeQueue.getIndexBuffer(offset2); assertThat(indexBuffer.getByteBuffer().getLong()).isEqualTo(appendMessageResult.getWroteOffset()); assertThat(indexBuffer.getByteBuffer().getInt()).isEqualTo(appendMessageResult.getWroteBytes()); assertThat(indexBuffer2.getByteBuffer().getLong()).isEqualTo(appendMessageResult.getWroteOffset()); assertThat(indexBuffer2.getByteBuffer().getInt()).isEqualTo(appendMessageResult.getWroteBytes()); indexBuffer.release(); indexBuffer2.release(); } }
#vulnerable code @Test public void should_get_consume_queue_offset_successfully_when_timestamp_is_skewing() throws InterruptedException { final int totalCount = 10; int queueId = 0; String topic = "FooBar"; AppendMessageResult[] appendMessageResults = putMessages(totalCount, topic, queueId, true); Thread.sleep(10); int skewing = 2; ConsumeQueue consumeQueue = getDefaultMessageStore().findConsumeQueue(topic, queueId); for (AppendMessageResult appendMessageResult : appendMessageResults) { long offset = messageStore.getOffsetInQueueByTime(topic, queueId, appendMessageResult.getStoreTimestamp() + skewing); long offset2 = messageStore.getOffsetInQueueByTime(topic, queueId, appendMessageResult.getStoreTimestamp() - skewing); SelectMappedBufferResult indexBuffer = consumeQueue.getIndexBuffer(offset); SelectMappedBufferResult indexBuffer2 = consumeQueue.getIndexBuffer(offset2); assertThat(indexBuffer.getByteBuffer().getLong()).isEqualTo(appendMessageResult.getWroteOffset()); assertThat(indexBuffer.getByteBuffer().getInt()).isEqualTo(appendMessageResult.getWroteBytes()); assertThat(indexBuffer2.getByteBuffer().getLong()).isEqualTo(appendMessageResult.getWroteOffset()); assertThat(indexBuffer2.getByteBuffer().getInt()).isEqualTo(appendMessageResult.getWroteBytes()); indexBuffer.release(); indexBuffer2.release(); } } #location 18 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQPullConsumer consumer = new DefaultMQPullConsumer(MixAll.TOOLS_CONSUMER_GROUP, rpcHook); try { String topic = commandLine.getOptionValue('t').trim(); String charsetName = !commandLine.hasOption('c') ? "UTF-8" : commandLine.getOptionValue('c').trim(); String subExpression = !commandLine.hasOption('s') ? "*" : commandLine.getOptionValue('s').trim(); boolean printBody = !commandLine.hasOption('d') || Boolean.parseBoolean(commandLine.getOptionValue('d').trim()); consumer.start(); Set<MessageQueue> mqs = consumer.fetchSubscribeMessageQueues(topic); for (MessageQueue mq : mqs) { long minOffset = consumer.minOffset(mq); long maxOffset = consumer.maxOffset(mq); if (commandLine.hasOption('b')) { String timestampStr = commandLine.getOptionValue('b').trim(); long timeValue = timestampFormat(timestampStr); minOffset = consumer.searchOffset(mq, timeValue); } if (commandLine.hasOption('e')) { String timestampStr = commandLine.getOptionValue('e').trim(); long timeValue = timestampFormat(timestampStr); maxOffset = consumer.searchOffset(mq, timeValue); } System.out.printf("minOffset=%s, maxOffset=%s, %s", minOffset, maxOffset, mq); READQ: for (long offset = minOffset; offset < maxOffset; ) { try { PullResult pullResult = consumer.pull(mq, subExpression, offset, 32); offset = pullResult.getNextBeginOffset(); switch (pullResult.getPullStatus()) { case FOUND: printMessage(pullResult.getMsgFoundList(), charsetName, printBody); break; case NO_MATCHED_MSG: System.out.printf("%s no matched msg. status=%s, offset=%s", mq, pullResult.getPullStatus(), offset); break; case NO_NEW_MSG: case OFFSET_ILLEGAL: System.out.printf("%s print msg finished. status=%s, offset=%s", mq, pullResult.getPullStatus(), offset); break READQ; } } catch (Exception e) { e.printStackTrace(); break; } } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { consumer.shutdown(); } }
#vulnerable code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQPullConsumer consumer = new DefaultMQPullConsumer(MixAll.TOOLS_CONSUMER_GROUP, rpcHook); try { String topic = commandLine.getOptionValue('t').trim(); String charsetName = !commandLine.hasOption('c') ? "UTF-8" : commandLine.getOptionValue('c').trim(); String subExpression = !commandLine.hasOption('s') ? "*" : commandLine.getOptionValue('s').trim(); boolean printBody = !commandLine.hasOption('d') || Boolean.parseBoolean(commandLine.getOptionValue('d').trim()); consumer.start(); Set<MessageQueue> mqs = consumer.fetchSubscribeMessageQueues(topic); for (MessageQueue mq : mqs) { long minOffset = consumer.minOffset(mq); long maxOffset = consumer.maxOffset(mq); if (commandLine.hasOption('b')) { String timestampStr = commandLine.getOptionValue('b').trim(); long timeValue = timestampFormat(timestampStr); minOffset = consumer.searchOffset(mq, timeValue); } if (commandLine.hasOption('e')) { String timestampStr = commandLine.getOptionValue('e').trim(); long timeValue = timestampFormat(timestampStr); maxOffset = consumer.searchOffset(mq, timeValue); } System.out.printf("minOffset=%s, maxOffset=%s, %s", minOffset, maxOffset, mq); READQ: for (long offset = minOffset; offset < maxOffset; ) { try { PullResult pullResult = consumer.pull(mq, subExpression, offset, 32); offset = pullResult.getNextBeginOffset(); switch (pullResult.getPullStatus()) { case FOUND: printMessage(pullResult.getMsgFoundList(), charsetName, printBody); break; case NO_MATCHED_MSG: System.out.printf(mq + " no matched msg. status=%s, offset=%s", pullResult.getPullStatus(), offset); break; case NO_NEW_MSG: case OFFSET_ILLEGAL: System.out.printf(mq + " print msg finished. status=%s, offset=%s", pullResult.getPullStatus(), offset); break READQ; } } catch (Exception e) { e.printStackTrace(); break; } } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { consumer.shutdown(); } } #location 47 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void sendMsg(final DefaultMQAdminExt defaultMQAdminExt, final DefaultMQProducer defaultMQProducer, final String msgId) throws RemotingException, MQBrokerException, InterruptedException, MQClientException { try { MessageExt msg = defaultMQAdminExt.viewMessage(msgId); if (msg != null) { // resend msg by id System.out.printf("prepare resend msg. originalMsgId=%s", msgId); SendResult result = defaultMQProducer.send(msg); System.out.printf("%s", result); } else { System.out.printf("no message. msgId=%s", msgId); } } catch (Exception e) { e.printStackTrace(); } }
#vulnerable code private void sendMsg(final DefaultMQAdminExt defaultMQAdminExt, final DefaultMQProducer defaultMQProducer, final String msgId) throws RemotingException, MQBrokerException, InterruptedException, MQClientException { try { MessageExt msg = defaultMQAdminExt.viewMessage(msgId); if (msg != null) { // resend msg by id System.out.printf("prepare resend msg. originalMsgId=" + msgId); SendResult result = defaultMQProducer.send(msg); System.out.printf("%s", result); } else { System.out.printf("no message. msgId=" + msgId); } } catch (Exception e) { e.printStackTrace(); } } #location 7 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testIPv6Check() throws UnknownHostException { InetAddress nonInternal = InetAddress.getByName("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); InetAddress internal = InetAddress.getByName("FE80:0000:0000:0000:0000:0000:0000:FFFF"); assertThat(UtilAll.isInternalV6IP(nonInternal)).isFalse(); assertThat(UtilAll.isInternalV6IP(internal)).isTrue(); assertThat(UtilAll.ipToIPv6Str(nonInternal.getAddress()).toUpperCase()).isEqualTo("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); }
#vulnerable code @Test public void testIPv6Check() { byte[] nonInternalIp = UtilAll.string2bytes("24084004018081003FAA1DDE2B3F898A"); byte[] internalIp = UtilAll.string2bytes("FEC0000000000000000000000000FFFF"); assertThat(UtilAll.isInternalV6IP(nonInternalIp)).isFalse(); assertThat(UtilAll.isInternalV6IP(internalIp)).isTrue(); assertThat(UtilAll.ipToIPv6Str(nonInternalIp).toUpperCase()).isEqualTo("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); } #location 5 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static NamesrvController main0(String[] args) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); try { //PackageConflictDetect.detectFastjson(); Options options = ServerUtil.buildCommandlineOptions(new Options()); commandLine = ServerUtil.parseCmdLine("mqnamesrv", args, buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return null; } final NamesrvConfig namesrvConfig = new NamesrvConfig(); final NettyServerConfig nettyServerConfig = new NettyServerConfig(); nettyServerConfig.setListenPort(9876); if (commandLine.hasOption('c')) { String file = commandLine.getOptionValue('c'); if (file != null) { InputStream in = new BufferedInputStream(new FileInputStream(file)); properties = new Properties(); properties.load(in); MixAll.properties2Object(properties, namesrvConfig); MixAll.properties2Object(properties, nettyServerConfig); namesrvConfig.setConfigStorePath(file); System.out.printf("load config properties file OK, %s%n", file); in.close(); } } if (commandLine.hasOption('p')) { MixAll.printObjectProperties(null, namesrvConfig); MixAll.printObjectProperties(null, nettyServerConfig); System.exit(0); } MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), namesrvConfig); if (null == namesrvConfig.getRocketmqHome()) { System.out.printf("Please set the %s variable in your environment to match the location of the RocketMQ installation%n", MixAll.ROCKETMQ_HOME_ENV); System.exit(-2); } LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(lc); lc.reset(); configurator.doConfigure(namesrvConfig.getRocketmqHome() + "/conf/logback_namesrv.xml"); final Logger log = LoggerFactory.getLogger(LoggerName.NAMESRV_LOGGER_NAME); MixAll.printObjectProperties(log, namesrvConfig); MixAll.printObjectProperties(log, nettyServerConfig); final NamesrvController controller = new NamesrvController(namesrvConfig, nettyServerConfig); // remember all configs to prevent discard controller.getConfiguration().registerConfig(properties); boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable<Void>() { @Override public Void call() throws Exception { controller.shutdown(); return null; } })); controller.start(); String tip = "The Name Server boot success. serializeType=" + RemotingCommand.getSerializeTypeConfigInThisServer(); log.info(tip); System.out.printf("%s%n", tip); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; }
#vulnerable code public static NamesrvController main0(String[] args) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); try { //PackageConflictDetect.detectFastjson(); Options options = ServerUtil.buildCommandlineOptions(new Options()); commandLine = ServerUtil.parseCmdLine("mqnamesrv", args, buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return null; } final NamesrvConfig namesrvConfig = new NamesrvConfig(); final NettyServerConfig nettyServerConfig = new NettyServerConfig(); nettyServerConfig.setListenPort(9876); if (commandLine.hasOption('c')) { String file = commandLine.getOptionValue('c'); if (file != null) { InputStream in = new BufferedInputStream(new FileInputStream(file)); properties = new Properties(); properties.load(in); MixAll.properties2Object(properties, namesrvConfig); MixAll.properties2Object(properties, nettyServerConfig); namesrvConfig.setConfigStorePath(file); System.out.printf("load config properties file OK, " + file + "%n"); in.close(); } } if (commandLine.hasOption('p')) { MixAll.printObjectProperties(null, namesrvConfig); MixAll.printObjectProperties(null, nettyServerConfig); System.exit(0); } MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), namesrvConfig); if (null == namesrvConfig.getRocketmqHome()) { System.out.printf("Please set the %s variable in your environment to match the location of the RocketMQ installation%n", MixAll.ROCKETMQ_HOME_ENV); System.exit(-2); } LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(lc); lc.reset(); configurator.doConfigure(namesrvConfig.getRocketmqHome() + "/conf/logback_namesrv.xml"); final Logger log = LoggerFactory.getLogger(LoggerName.NAMESRV_LOGGER_NAME); MixAll.printObjectProperties(log, namesrvConfig); MixAll.printObjectProperties(log, nettyServerConfig); final NamesrvController controller = new NamesrvController(namesrvConfig, nettyServerConfig); // remember all configs to prevent discard controller.getConfiguration().registerConfig(properties); boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable<Void>() { @Override public Void call() throws Exception { controller.shutdown(); return null; } })); controller.start(); String tip = "The Name Server boot success. serializeType=" + RemotingCommand.getSerializeTypeConfigInThisServer(); log.info(tip); System.out.printf(tip + "%n"); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; } #location 27 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main0(String[] args, RPCHook rpcHook) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); //PackageConflictDetect.detectFastjson(); initCommand(); try { initLogback(); switch (args.length) { case 0: printHelp(); break; case 2: if (args[0].equals("help")) { SubCommand cmd = findSubCommand(args[1]); if (cmd != null) { Options options = ServerUtil.buildCommandlineOptions(new Options()); options = cmd.buildCommandlineOptions(options); if (options != null) { ServerUtil.printCommandLineHelp("mqadmin " + cmd.commandName(), options); } } else { System.out.printf("The sub command %s not exist.%n", args[1]); } break; } case 1: default: SubCommand cmd = findSubCommand(args[0]); if (cmd != null) { String[] subargs = parseSubArgs(args); Options options = ServerUtil.buildCommandlineOptions(new Options()); final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return; } if (commandLine.hasOption('n')) { String namesrvAddr = commandLine.getOptionValue('n'); System.setProperty(MixAll.NAMESRV_ADDR_PROPERTY, namesrvAddr); } cmd.execute(commandLine, options, rpcHook); } else { System.out.printf("The sub command %s not exist.%n", args[0]); } break; } } catch (Exception e) { e.printStackTrace(); } }
#vulnerable code public static void main0(String[] args, RPCHook rpcHook) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); //PackageConflictDetect.detectFastjson(); initCommand(); try { initLogback(); switch (args.length) { case 0: printHelp(); break; case 2: if (args[0].equals("help")) { SubCommand cmd = findSubCommand(args[1]); if (cmd != null) { Options options = ServerUtil.buildCommandlineOptions(new Options()); options = cmd.buildCommandlineOptions(options); if (options != null) { ServerUtil.printCommandLineHelp("mqadmin " + cmd.commandName(), options); } } else { System.out.printf("The sub command \'" + args[1] + "\' not exist.%n"); } break; } case 1: default: SubCommand cmd = findSubCommand(args[0]); if (cmd != null) { String[] subargs = parseSubArgs(args); Options options = ServerUtil.buildCommandlineOptions(new Options()); final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return; } if (commandLine.hasOption('n')) { String namesrvAddr = commandLine.getOptionValue('n'); System.setProperty(MixAll.NAMESRV_ADDR_PROPERTY, namesrvAddr); } cmd.execute(commandLine, options, rpcHook); } else { System.out.printf("The sub command \'" + args[0] + "\' not exist.%n"); } break; } } catch (Exception e) { e.printStackTrace(); } } #location 24 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testIPv6Check() throws UnknownHostException { InetAddress nonInternal = InetAddress.getByName("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); InetAddress internal = InetAddress.getByName("FE80:0000:0000:0000:0000:0000:0000:FFFF"); assertThat(UtilAll.isInternalV6IP(nonInternal)).isFalse(); assertThat(UtilAll.isInternalV6IP(internal)).isTrue(); assertThat(UtilAll.ipToIPv6Str(nonInternal.getAddress()).toUpperCase()).isEqualTo("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); }
#vulnerable code @Test public void testIPv6Check() { byte[] nonInternalIp = UtilAll.string2bytes("24084004018081003FAA1DDE2B3F898A"); byte[] internalIp = UtilAll.string2bytes("FEC0000000000000000000000000FFFF"); assertThat(UtilAll.isInternalV6IP(nonInternalIp)).isFalse(); assertThat(UtilAll.isInternalV6IP(internalIp)).isTrue(); assertThat(UtilAll.ipToIPv6Str(nonInternalIp).toUpperCase()).isEqualTo("2408:4004:0180:8100:3FAA:1DDE:2B3F:898A"); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void processRequest_UnRegisterProducer() throws Exception { brokerController.getProducerManager().registerProducer(group, clientChannelInfo); Map<Channel, ClientChannelInfo> channelMap = brokerController.getProducerManager().getGroupChannelTable().get(group); assertThat(channelMap).isNotNull(); assertThat(channelMap.get(channel)).isEqualTo(clientChannelInfo); RemotingCommand request = createUnRegisterProducerCommand(); RemotingCommand response = clientManageProcessor.processRequest(handlerContext, request); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); channelMap = brokerController.getProducerManager().getGroupChannelTable().get(group); assertThat(channelMap).isNull(); }
#vulnerable code @Test public void processRequest_UnRegisterProducer() throws Exception { brokerController.getProducerManager().registerProducer(group, clientChannelInfo); HashMap<Channel, ClientChannelInfo> channelMap = brokerController.getProducerManager().getGroupChannelTable().get(group); assertThat(channelMap).isNotNull(); assertThat(channelMap.get(channel)).isEqualTo(clientChannelInfo); RemotingCommand request = createUnRegisterProducerCommand(); RemotingCommand response = clientManageProcessor.processRequest(handlerContext, request); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); channelMap = brokerController.getProducerManager().getGroupChannelTable().get(group); assertThat(channelMap).isNull(); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) throws MQClientException { DefaultMQPullConsumer consumer = new DefaultMQPullConsumer("please_rename_unique_group_name_5"); consumer.start(); Set<MessageQueue> mqs = consumer.fetchSubscribeMessageQueues("TopicTest1"); for (MessageQueue mq : mqs) { System.out.printf("Consume from the queue: %s%n", mq); SINGLE_MQ: while (true) { try { PullResult pullResult = consumer.pullBlockIfNotFound(mq, null, getMessageQueueOffset(mq), 32); System.out.printf("%s%n", pullResult); putMessageQueueOffset(mq, pullResult.getNextBeginOffset()); switch (pullResult.getPullStatus()) { case FOUND: break; case NO_MATCHED_MSG: break; case NO_NEW_MSG: break SINGLE_MQ; case OFFSET_ILLEGAL: break; default: break; } } catch (Exception e) { e.printStackTrace(); } } } consumer.shutdown(); }
#vulnerable code public static void main(String[] args) throws MQClientException { DefaultMQPullConsumer consumer = new DefaultMQPullConsumer("please_rename_unique_group_name_5"); consumer.start(); Set<MessageQueue> mqs = consumer.fetchSubscribeMessageQueues("TopicTest1"); for (MessageQueue mq : mqs) { System.out.printf("Consume from the queue: " + mq + "%n"); SINGLE_MQ: while (true) { try { PullResult pullResult = consumer.pullBlockIfNotFound(mq, null, getMessageQueueOffset(mq), 32); System.out.printf("%s%n", pullResult); putMessageQueueOffset(mq, pullResult.getNextBeginOffset()); switch (pullResult.getPullStatus()) { case FOUND: break; case NO_MATCHED_MSG: break; case NO_NEW_MSG: break SINGLE_MQ; case OFFSET_ILLEGAL: break; default: break; } } catch (Exception e) { e.printStackTrace(); } } } consumer.shutdown(); } #location 8 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try { defaultMQAdminExt.start(); String group = commandLine.getOptionValue('g').trim(); ConsumerConnection cc = defaultMQAdminExt.examineConsumerConnectionInfo(group); boolean jstack = commandLine.hasOption('s'); if (!commandLine.hasOption('i')) { int i = 1; long now = System.currentTimeMillis(); final TreeMap<String/* clientId */, ConsumerRunningInfo> criTable = new TreeMap<String, ConsumerRunningInfo>(); for (Connection conn : cc.getConnectionSet()) { try { ConsumerRunningInfo consumerRunningInfo = defaultMQAdminExt.getConsumerRunningInfo(group, conn.getClientId(), jstack); if (consumerRunningInfo != null) { criTable.put(conn.getClientId(), consumerRunningInfo); String filePath = now + "/" + conn.getClientId(); MixAll.string2FileNotSafe(consumerRunningInfo.formatString(), filePath); System.out.printf("%03d %-40s %-20s %s%n", i++, conn.getClientId(), MQVersion.getVersionDesc(conn.getVersion()), filePath); } } catch (Exception e) { e.printStackTrace(); } } if (!criTable.isEmpty()) { boolean subSame = ConsumerRunningInfo.analyzeSubscription(criTable); boolean rebalanceOK = subSame && ConsumerRunningInfo.analyzeRebalance(criTable); if (subSame) { System.out.printf("%n%nSame subscription in the same group of consumer"); System.out.printf("%n%nRebalance %s%n", rebalanceOK ? "OK" : "Failed"); Iterator<Entry<String, ConsumerRunningInfo>> it = criTable.entrySet().iterator(); while (it.hasNext()) { Entry<String, ConsumerRunningInfo> next = it.next(); String result = ConsumerRunningInfo.analyzeProcessQueue(next.getKey(), next.getValue()); if (result.length() > 0) { System.out.printf("%s", result); } } } else { System.out.printf("%n%nWARN: Different subscription in the same group of consumer!!!"); } } } else { String clientId = commandLine.getOptionValue('i').trim(); ConsumerRunningInfo consumerRunningInfo = defaultMQAdminExt.getConsumerRunningInfo(group, clientId, jstack); if (consumerRunningInfo != null) { System.out.printf("%s", consumerRunningInfo.formatString()); } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } }
#vulnerable code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try { defaultMQAdminExt.start(); String group = commandLine.getOptionValue('g').trim(); ConsumerConnection cc = defaultMQAdminExt.examineConsumerConnectionInfo(group); boolean jstack = commandLine.hasOption('s'); if (!commandLine.hasOption('i')) { int i = 1; long now = System.currentTimeMillis(); final TreeMap<String/* clientId */, ConsumerRunningInfo> criTable = new TreeMap<String, ConsumerRunningInfo>(); for (Connection conn : cc.getConnectionSet()) { try { ConsumerRunningInfo consumerRunningInfo = defaultMQAdminExt.getConsumerRunningInfo(group, conn.getClientId(), jstack); if (consumerRunningInfo != null) { criTable.put(conn.getClientId(), consumerRunningInfo); String filePath = now + "/" + conn.getClientId(); MixAll.string2FileNotSafe(consumerRunningInfo.formatString(), filePath); System.out.printf("%03d %-40s %-20s %s%n", i++, conn.getClientId(), MQVersion.getVersionDesc(conn.getVersion()), filePath); } } catch (Exception e) { e.printStackTrace(); } } if (!criTable.isEmpty()) { boolean subSame = ConsumerRunningInfo.analyzeSubscription(criTable); boolean rebalanceOK = subSame && ConsumerRunningInfo.analyzeRebalance(criTable); if (subSame) { System.out.printf("%n%nSame subscription in the same group of consumer"); System.out.printf("%n%nRebalance %s%n", rebalanceOK ? "OK" : "Failed"); Iterator<Entry<String, ConsumerRunningInfo>> it = criTable.entrySet().iterator(); while (it.hasNext()) { Entry<String, ConsumerRunningInfo> next = it.next(); String result = ConsumerRunningInfo.analyzeProcessQueue(next.getKey(), next.getValue()); if (result.length() > 0) { System.out.printf("%s", result); } } } else { System.out.printf("%n%nWARN: Different subscription in the same group of consumer!!!"); } } } else { String clientId = commandLine.getOptionValue('i').trim(); ConsumerRunningInfo consumerRunningInfo = defaultMQAdminExt.getConsumerRunningInfo(group, clientId, jstack); if (consumerRunningInfo != null) { System.out.printf(consumerRunningInfo.formatString()); } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } } #location 64 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code private void sendMsg(final DefaultMQAdminExt defaultMQAdminExt, final DefaultMQProducer defaultMQProducer, final String msgId) throws RemotingException, MQBrokerException, InterruptedException, MQClientException { try { MessageExt msg = defaultMQAdminExt.viewMessage(msgId); if (msg != null) { // resend msg by id System.out.printf("prepare resend msg. originalMsgId=%s", msgId); SendResult result = defaultMQProducer.send(msg); System.out.printf("%s", result); } else { System.out.printf("no message. msgId=%s", msgId); } } catch (Exception e) { e.printStackTrace(); } }
#vulnerable code private void sendMsg(final DefaultMQAdminExt defaultMQAdminExt, final DefaultMQProducer defaultMQProducer, final String msgId) throws RemotingException, MQBrokerException, InterruptedException, MQClientException { try { MessageExt msg = defaultMQAdminExt.viewMessage(msgId); if (msg != null) { // resend msg by id System.out.printf("prepare resend msg. originalMsgId=" + msgId); SendResult result = defaultMQProducer.send(msg); System.out.printf("%s", result); } else { System.out.printf("no message. msgId=" + msgId); } } catch (Exception e) { e.printStackTrace(); } } #location 11 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static FiltersrvController createController(String[] args) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); if (null == System.getProperty(NettySystemConfig.COM_ROCKETMQ_REMOTING_SOCKET_SNDBUF_SIZE)) { NettySystemConfig.socketSndbufSize = 65535; } if (null == System.getProperty(NettySystemConfig.COM_ROCKETMQ_REMOTING_SOCKET_RCVBUF_SIZE)) { NettySystemConfig.socketRcvbufSize = 1024; } try { Options options = ServerUtil.buildCommandlineOptions(new Options()); final CommandLine commandLine = ServerUtil.parseCmdLine("mqfiltersrv", args, buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return null; } final FiltersrvConfig filtersrvConfig = new FiltersrvConfig(); final NettyServerConfig nettyServerConfig = new NettyServerConfig(); if (commandLine.hasOption('c')) { String file = commandLine.getOptionValue('c'); if (file != null) { InputStream in = new BufferedInputStream(new FileInputStream(file)); Properties properties = new Properties(); properties.load(in); MixAll.properties2Object(properties, filtersrvConfig); System.out.printf("load config properties file OK, %s%n", file); in.close(); String port = properties.getProperty("listenPort"); if (port != null) { filtersrvConfig.setConnectWhichBroker(String.format("127.0.0.1:%s", port)); } } } nettyServerConfig.setListenPort(0); nettyServerConfig.setServerAsyncSemaphoreValue(filtersrvConfig.getFsServerAsyncSemaphoreValue()); nettyServerConfig.setServerCallbackExecutorThreads(filtersrvConfig .getFsServerCallbackExecutorThreads()); nettyServerConfig.setServerWorkerThreads(filtersrvConfig.getFsServerWorkerThreads()); if (commandLine.hasOption('p')) { MixAll.printObjectProperties(null, filtersrvConfig); MixAll.printObjectProperties(null, nettyServerConfig); System.exit(0); } MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), filtersrvConfig); if (null == filtersrvConfig.getRocketmqHome()) { System.out.printf("Please set the %s variable in your environment to match the location of the RocketMQ installation%n", MixAll.ROCKETMQ_HOME_ENV); System.exit(-2); } LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(lc); lc.reset(); configurator.doConfigure(filtersrvConfig.getRocketmqHome() + "/conf/logback_filtersrv.xml"); log = LoggerFactory.getLogger(LoggerName.FILTERSRV_LOGGER_NAME); final FiltersrvController controller = new FiltersrvController(filtersrvConfig, nettyServerConfig); boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable<Void>() { @Override public Void call() throws Exception { controller.shutdown(); return null; } })); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; }
#vulnerable code public static FiltersrvController createController(String[] args) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); if (null == System.getProperty(NettySystemConfig.COM_ROCKETMQ_REMOTING_SOCKET_SNDBUF_SIZE)) { NettySystemConfig.socketSndbufSize = 65535; } if (null == System.getProperty(NettySystemConfig.COM_ROCKETMQ_REMOTING_SOCKET_RCVBUF_SIZE)) { NettySystemConfig.socketRcvbufSize = 1024; } try { Options options = ServerUtil.buildCommandlineOptions(new Options()); final CommandLine commandLine = ServerUtil.parseCmdLine("mqfiltersrv", args, buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return null; } final FiltersrvConfig filtersrvConfig = new FiltersrvConfig(); final NettyServerConfig nettyServerConfig = new NettyServerConfig(); if (commandLine.hasOption('c')) { String file = commandLine.getOptionValue('c'); if (file != null) { InputStream in = new BufferedInputStream(new FileInputStream(file)); Properties properties = new Properties(); properties.load(in); MixAll.properties2Object(properties, filtersrvConfig); System.out.printf("load config properties file OK, " + file + "%n"); in.close(); String port = properties.getProperty("listenPort"); if (port != null) { filtersrvConfig.setConnectWhichBroker(String.format("127.0.0.1:%s", port)); } } } nettyServerConfig.setListenPort(0); nettyServerConfig.setServerAsyncSemaphoreValue(filtersrvConfig.getFsServerAsyncSemaphoreValue()); nettyServerConfig.setServerCallbackExecutorThreads(filtersrvConfig .getFsServerCallbackExecutorThreads()); nettyServerConfig.setServerWorkerThreads(filtersrvConfig.getFsServerWorkerThreads()); if (commandLine.hasOption('p')) { MixAll.printObjectProperties(null, filtersrvConfig); MixAll.printObjectProperties(null, nettyServerConfig); System.exit(0); } MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), filtersrvConfig); if (null == filtersrvConfig.getRocketmqHome()) { System.out.printf("Please set the " + MixAll.ROCKETMQ_HOME_ENV + " variable in your environment to match the location of the RocketMQ installation%n"); System.exit(-2); } LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(lc); lc.reset(); configurator.doConfigure(filtersrvConfig.getRocketmqHome() + "/conf/logback_filtersrv.xml"); log = LoggerFactory.getLogger(LoggerName.FILTERSRV_LOGGER_NAME); final FiltersrvController controller = new FiltersrvController(filtersrvConfig, nettyServerConfig); boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable<Void>() { @Override public Void call() throws Exception { controller.shutdown(); return null; } })); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; } #location 32 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void unregisterProducer() throws Exception { producerManager.registerProducer(group, clientInfo); Map<Channel, ClientChannelInfo> channelMap = producerManager.getGroupChannelTable().get(group); assertThat(channelMap).isNotNull(); assertThat(channelMap.get(channel)).isEqualTo(clientInfo); Channel channel1 = producerManager.findChannel("clientId"); assertThat(channel1).isNotNull(); assertThat(channel1).isEqualTo(channel); producerManager.unregisterProducer(group, clientInfo); channelMap = producerManager.getGroupChannelTable().get(group); channel1 = producerManager.findChannel("clientId"); assertThat(channelMap).isNull(); assertThat(channel1).isNull(); }
#vulnerable code @Test public void unregisterProducer() throws Exception { producerManager.registerProducer(group, clientInfo); HashMap<Channel, ClientChannelInfo> channelMap = producerManager.getGroupChannelTable().get(group); assertThat(channelMap).isNotNull(); assertThat(channelMap.get(channel)).isEqualTo(clientInfo); Channel channel1 = producerManager.findChannel("clientId"); assertThat(channel1).isNotNull(); assertThat(channel1).isEqualTo(channel); producerManager.unregisterProducer(group, clientInfo); channelMap = producerManager.getGroupChannelTable().get(group); channel1 = producerManager.findChannel("clientId"); assertThat(channelMap).isNull(); assertThat(channel1).isNull(); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); DefaultMQProducer producer = new DefaultMQProducer(rpcHook); producer.setProducerGroup(Long.toString(System.currentTimeMillis())); try { defaultMQAdminExt.start(); producer.start(); ClusterInfo clusterInfoSerializeWrapper = defaultMQAdminExt.examineBrokerClusterInfo(); HashMap<String, Set<String>> clusterAddr = clusterInfoSerializeWrapper .getClusterAddrTable(); Set<String> clusterNames = null; long amount = !commandLine.hasOption('a') ? 50 : Long.parseLong(commandLine .getOptionValue('a').trim()); long size = !commandLine.hasOption('s') ? 128 : Long.parseLong(commandLine .getOptionValue('s').trim()); long interval = !commandLine.hasOption('i') ? 10 : Long.parseLong(commandLine .getOptionValue('i').trim()); boolean printAsTlog = commandLine.hasOption('p') && Boolean.parseBoolean(commandLine.getOptionValue('p').trim()); String machineRoom = !commandLine.hasOption('m') ? "noname" : commandLine .getOptionValue('m').trim(); if (commandLine.hasOption('c')) { clusterNames = new TreeSet<String>(); clusterNames.add(commandLine.getOptionValue('c').trim()); } else { clusterNames = clusterAddr.keySet(); } if (!printAsTlog) { System.out.printf("%-24s %-24s %-4s %-8s %-8s%n", "#Cluster Name", "#Broker Name", "#RT", "#successCount", "#failCount" ); } while (true) { for (String clusterName : clusterNames) { Set<String> brokerNames = clusterAddr.get(clusterName); if (brokerNames == null) { System.out.printf("cluster [%s] not exist", clusterName); break; } for (String brokerName : brokerNames) { Message msg = new Message(brokerName, getStringBySize(size).getBytes(MixAll.DEFAULT_CHARSET)); long start = 0; long end = 0; long elapsed = 0; int successCount = 0; int failCount = 0; for (int i = 0; i < amount; i++) { start = System.currentTimeMillis(); try { producer.send(msg); successCount++; end = System.currentTimeMillis(); } catch (Exception e) { failCount++; end = System.currentTimeMillis(); } if (i != 0) { elapsed += end - start; } } double rt = (double) elapsed / (amount - 1); if (!printAsTlog) { System.out.printf("%-24s %-24s %-8s %-16s %-16s%n", clusterName, brokerName, String.format("%.2f", rt), successCount, failCount ); } else { System.out.printf("%s", String.format("%s|%s|%s|%s|%s%n", getCurTime(), machineRoom, clusterName, brokerName, new BigDecimal(rt).setScale(0, BigDecimal.ROUND_HALF_UP))); } } } Thread.sleep(interval * 1000); } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); producer.shutdown(); } }
#vulnerable code @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); DefaultMQProducer producer = new DefaultMQProducer(rpcHook); producer.setProducerGroup(Long.toString(System.currentTimeMillis())); try { defaultMQAdminExt.start(); producer.start(); ClusterInfo clusterInfoSerializeWrapper = defaultMQAdminExt.examineBrokerClusterInfo(); HashMap<String, Set<String>> clusterAddr = clusterInfoSerializeWrapper .getClusterAddrTable(); Set<String> clusterNames = null; long amount = !commandLine.hasOption('a') ? 50 : Long.parseLong(commandLine .getOptionValue('a').trim()); long size = !commandLine.hasOption('s') ? 128 : Long.parseLong(commandLine .getOptionValue('s').trim()); long interval = !commandLine.hasOption('i') ? 10 : Long.parseLong(commandLine .getOptionValue('i').trim()); boolean printAsTlog = commandLine.hasOption('p') && Boolean.parseBoolean(commandLine.getOptionValue('p').trim()); String machineRoom = !commandLine.hasOption('m') ? "noname" : commandLine .getOptionValue('m').trim(); if (commandLine.hasOption('c')) { clusterNames = new TreeSet<String>(); clusterNames.add(commandLine.getOptionValue('c').trim()); } else { clusterNames = clusterAddr.keySet(); } if (!printAsTlog) { System.out.printf("%-24s %-24s %-4s %-8s %-8s%n", "#Cluster Name", "#Broker Name", "#RT", "#successCount", "#failCount" ); } while (true) { for (String clusterName : clusterNames) { Set<String> brokerNames = clusterAddr.get(clusterName); if (brokerNames == null) { System.out.printf("cluster [%s] not exist", clusterName); break; } for (String brokerName : brokerNames) { Message msg = new Message(brokerName, getStringBySize(size).getBytes(MixAll.DEFAULT_CHARSET)); long start = 0; long end = 0; long elapsed = 0; int successCount = 0; int failCount = 0; for (int i = 0; i < amount; i++) { start = System.currentTimeMillis(); try { producer.send(msg); successCount++; end = System.currentTimeMillis(); } catch (Exception e) { failCount++; end = System.currentTimeMillis(); } if (i != 0) { elapsed += end - start; } } double rt = (double) elapsed / (amount - 1); if (!printAsTlog) { System.out.printf("%-24s %-24s %-8s %-16s %-16s%n", clusterName, brokerName, String.format("%.2f", rt), successCount, failCount ); } else { System.out.printf(String.format("%s|%s|%s|%s|%s%n", getCurTime(), machineRoom, clusterName, brokerName, new BigDecimal(rt).setScale(0, BigDecimal.ROUND_HALF_UP))); } } } Thread.sleep(interval * 1000); } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); producer.shutdown(); } } #location 92 #vulnerability type CHECKERS_PRINTF_ARGS
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static NamesrvController main0(String[] args) { try { NamesrvController controller = createNamesrvController(args); start(controller); String tip = "The Name Server boot success. serializeType=" + RemotingCommand.getSerializeTypeConfigInThisServer(); log.info(tip); System.out.printf("%s%n", tip); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; }
#vulnerable code public static NamesrvController main0(String[] args) { System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, Integer.toString(MQVersion.CURRENT_VERSION)); try { //PackageConflictDetect.detectFastjson(); Options options = ServerUtil.buildCommandlineOptions(new Options()); commandLine = ServerUtil.parseCmdLine("mqnamesrv", args, buildCommandlineOptions(options), new PosixParser()); if (null == commandLine) { System.exit(-1); return null; } final NamesrvConfig namesrvConfig = new NamesrvConfig(); final NettyServerConfig nettyServerConfig = new NettyServerConfig(); nettyServerConfig.setListenPort(9876); if (commandLine.hasOption('c')) { String file = commandLine.getOptionValue('c'); if (file != null) { InputStream in = new BufferedInputStream(new FileInputStream(file)); properties = new Properties(); properties.load(in); MixAll.properties2Object(properties, namesrvConfig); MixAll.properties2Object(properties, nettyServerConfig); namesrvConfig.setConfigStorePath(file); System.out.printf("load config properties file OK, %s%n", file); in.close(); } } if (commandLine.hasOption('p')) { MixAll.printObjectProperties(null, namesrvConfig); MixAll.printObjectProperties(null, nettyServerConfig); System.exit(0); } MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), namesrvConfig); if (null == namesrvConfig.getRocketmqHome()) { System.out.printf("Please set the %s variable in your environment to match the location of the RocketMQ installation%n", MixAll.ROCKETMQ_HOME_ENV); System.exit(-2); } LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory(); JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(lc); lc.reset(); configurator.doConfigure(namesrvConfig.getRocketmqHome() + "/conf/logback_namesrv.xml"); final InternalLogger log = InternalLoggerFactory.getLogger(LoggerName.NAMESRV_LOGGER_NAME); MixAll.printObjectProperties(log, namesrvConfig); MixAll.printObjectProperties(log, nettyServerConfig); final NamesrvController controller = new NamesrvController(namesrvConfig, nettyServerConfig); // remember all configs to prevent discard controller.getConfiguration().registerConfig(properties); boolean initResult = controller.initialize(); if (!initResult) { controller.shutdown(); System.exit(-3); } Runtime.getRuntime().addShutdownHook(new ShutdownHookThread(log, new Callable<Void>() { @Override public Void call() throws Exception { controller.shutdown(); return null; } })); controller.start(); String tip = "The Name Server boot success. serializeType=" + RemotingCommand.getSerializeTypeConfigInThisServer(); log.info(tip); System.out.printf("%s%n", tip); return controller; } catch (Throwable e) { e.printStackTrace(); System.exit(-1); } return null; } #location 81 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testTwoConsumerWithSameGroup() { int msgSize = 20; String originMsgDCName = RandomUtils.getStringByUUID(); String msgBodyDCName = RandomUtils.getStringByUUID(); RMQNormalConsumer consumer1 = getConsumer(nsAddr, topic, tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); getConsumer(nsAddr, consumer1.getConsumerGroup(), tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); producer.send(tag, msgSize); Assert.assertEquals("Not all are sent", msgSize, producer.getAllUndupMsgBody().size()); consumer1.getListner().waitForMessageConsume(producer.getAllMsgBody(), consumeTime); assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(), consumer1.getListner().getAllMsgBody())) .containsExactlyElementsIn(producer.getAllMsgBody()); }
#vulnerable code @Test public void testTwoConsumerWithSameGroup() { String tag = "jueyin"; int msgSize = 20; String originMsgDCName = RandomUtils.getStringByUUID(); String msgBodyDCName = RandomUtils.getStringByUUID(); RMQNormalConsumer consumer1 = getConsumer(nsAddr, topic, tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); RMQNormalConsumer consumer2 = getConsumer(nsAddr, consumer1.getConsumerGroup(), tag, new RMQNormalListner(originMsgDCName, msgBodyDCName)); producer.send(tag, msgSize); Assert.assertEquals("Not all are sent", msgSize, producer.getAllUndupMsgBody().size()); consumer1.getListner().waitForMessageConsume(producer.getAllMsgBody(), consumeTime); assertThat(VerifyUtils.getFilterdMessage(producer.getAllMsgBody(), consumer1.getListner().getAllMsgBody())) .containsExactlyElementsIn(producer.getAllMsgBody()); } #location 13 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testGetGroupChannelTable() throws Exception { producerManager.registerProducer(group, clientInfo); Map<Channel, ClientChannelInfo> oldMap = producerManager.getGroupChannelTable().get(group); producerManager.unregisterProducer(group, clientInfo); assertThat(oldMap.size()).isEqualTo(0); }
#vulnerable code @Test public void testGetGroupChannelTable() throws Exception { producerManager.registerProducer(group, clientInfo); HashMap<Channel, ClientChannelInfo> oldMap = producerManager.getGroupChannelTable().get(group); producerManager.unregisterProducer(group, clientInfo); assertThat(oldMap.size()).isNotEqualTo(0); } #location 7 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testCompose() throws Exception { Getter<ResultSet, Integer> getter = new Getter<ResultSet, Integer>() { @Override public Integer get(ResultSet target) throws Exception { return 3; } }; FieldMapper<ResultSet, Object> fieldMapper = new FieldMapper<ResultSet, Object>() { @Override public void map(ResultSet source, Object target) throws Exception { } }; FieldMapperColumnDefinition<JdbcColumnKey, ResultSet> compose = FieldMapperColumnDefinition.<JdbcColumnKey,ResultSet>identity().addRename("blop").addGetter(getter).addFieldMapper(fieldMapper); assertEquals("blop", compose.rename(new JdbcColumnKey("bar", -1)).getName()); assertEquals(fieldMapper, compose.getCustomFieldMapper()); assertEquals(new Integer(3), compose.getCustomGetter().get(null)); assertTrue(compose.hasCustomSource()); assertEquals(Integer.class, compose.getCustomSourceReturnType()); }
#vulnerable code @Test public void testCompose() throws Exception { Getter<ResultSet, Integer> getter = new Getter<ResultSet, Integer>() { @Override public Integer get(ResultSet target) throws Exception { return 3; } }; FieldMapperColumnDefinition<JdbcColumnKey, ResultSet> compose = FieldMapperColumnDefinition.compose( FieldMapperColumnDefinition.<JdbcColumnKey, ResultSet>renameDefinition("blop"), FieldMapperColumnDefinition.<JdbcColumnKey, ResultSet>customGetter(getter)); assertEquals("blop", compose.rename(new JdbcColumnKey("bar", -1)).getName()); assertEquals(new Integer(3), compose.getCustomGetter().get(null)); assertTrue(compose.hasCustomSource()); assertEquals(Integer.class, compose.getCustomSourceReturnType()); } #location 15 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public static void main(String[] args) throws SQLException, Exception { AllBenchmark.runBenchmark(DbHelper.getConnection(args), DynamicJdbcMapperForEachBenchmark.class); }
#vulnerable code public static void main(String[] args) throws SQLException, Exception { AllBenchmark.runBenchmark(DbHelper.mockDb(), SmallBenchmarkObject.class, DynamicJdbcMapperForEachBenchmark.class, BenchmarkConstants.SINGLE_QUERY_SIZE, BenchmarkConstants.SINGLE_NB_ITERATION); } #location 2 #vulnerability type RESOURCE_LEAK
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public <T> CsvMapperBuilder<T> newBuilder(final Class<T> target) { ClassMeta<T> classMeta = getClassMeta(target); CsvMapperBuilder<T> builder = new CsvMapperBuilder<T>(target, classMeta, aliases, customReaders, propertyNameMatcherFactory); builder.fieldMapperErrorHandler(fieldMapperErrorHandler); builder.mapperBuilderErrorHandler(mapperBuilderErrorHandler); builder.setDefaultDateFormat(defaultDateFormat); return builder; }
#vulnerable code public <T> CsvMapperBuilder<T> newBuilder(final Class<T> target) { CsvMapperBuilder<T> builder = new CsvMapperBuilder<T>(target, getClassMeta(target), aliases, customReaders, propertyNameMatcherFactory); builder.fieldMapperErrorHandler(fieldMapperErrorHandler); builder.mapperBuilderErrorHandler(mapperBuilderErrorHandler); builder.setDefaultDateFormat(defaultDateFormat); return builder; } #location 2 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public ResultSetMapperBuilder<T> addMapping(String property, String column) { Setter<T, Object> setter = setterFactory.getSetter(target, property); addMapping(setter, column); return this; }
#vulnerable code public ResultSetMapperBuilder<T> addMapping(String property, String column) { Setter<T, Object> setter = setterFactory.getSetter(target, property); Mapper<ResultSet, T> fieldMapper; if (setter.getPropertyType().isPrimitive()) { fieldMapper = primitiveFieldMapper(column, setter); } else { fieldMapper = objectFieldMapper(column, setter); } fields.add(fieldMapper); return this; } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testComposition() { CsvColumnDefinition compose = CsvColumnDefinition.dateFormatDefinition("yyyyMM").addRename("blop").addCustomReader( new CellValueReader<Integer>() { @Override public Integer read(char[] chars, int offset, int length, ParsingContext parsingContext) { return 3; } }); assertEquals("blop", compose.rename(new CsvColumnKey("bar", -1)).getName()); assertEquals("yyyyMM", compose.dateFormat()); assertEquals(new Integer(3), compose.getCustomReader().read(null, 0, 0 , null)); assertTrue(compose.hasCustomSource()); assertEquals(Integer.class, compose.getCustomSourceReturnType()); }
#vulnerable code @Test public void testComposition() { CsvColumnDefinition compose = CsvColumnDefinition.compose(CsvColumnDefinition.compose(CsvColumnDefinition.dateFormatDefinition("yyyyMM"), CsvColumnDefinition.renameDefinition("blop")), CsvColumnDefinition.customReaderDefinition(new CellValueReader<Integer>() { @Override public Integer read(char[] chars, int offset, int length, ParsingContext parsingContext) { return 3; } })); assertEquals("blop", compose.rename(new CsvColumnKey("bar", -1)).getName()); assertEquals("yyyyMM", compose.dateFormat()); assertEquals(new Integer(3), compose.getCustomReader().read(null, 0, 0 , null)); assertTrue(compose.hasCustomSource()); assertEquals(Integer.class, compose.getCustomSourceReturnType()); } #location 14 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code public <S, T> Instantiator<S, T> getInstantiator(final Class<S> source, final Class<T> target) throws NoSuchMethodException, SecurityException { final Constructor<T> constructor = getSmallerConstructor(target); if (constructor == null) { throw new NoSuchMethodException("No available constructor for " + target); } Object[] args; if (constructor.getParameterTypes().length == 0) { if (asmFactory != null && Modifier.isPublic(constructor.getModifiers())) { try { return asmFactory.createEmptyArgsInstatiantor(source, target); } catch (Exception e) { // fall back on reflection } } args = EMPTY_ARGS; } else { args = new Object[constructor.getParameterTypes().length]; for(int i = 0; i < args.length; i++) { if (constructor.getParameterTypes()[i].isPrimitive()) { args[i] = DEFAULT_VALUES.get(constructor.getParameterTypes()[i]); } } } constructor.setAccessible(true); return new StaticConstructorInstantiator<S, T>(constructor, args); }
#vulnerable code public <S, T> Instantiator<S, T> getInstantiator(final Class<S> source, final Class<T> target) throws NoSuchMethodException, SecurityException { final Constructor<T> constructor = getSmallerConstructor(target); Object[] args; if (constructor.getParameterTypes().length == 0) { if (asmFactory != null && Modifier.isPublic(constructor.getModifiers())) { try { return asmFactory.createEmptyArgsInstatiantor(source, target); } catch (Exception e) { // fall back on reflection } } args = EMPTY_ARGS; } else { args = new Object[constructor.getParameterTypes().length]; for(int i = 0; i < args.length; i++) { if (constructor.getParameterTypes()[i].isPrimitive()) { args[i] = DEFAULT_VALUES.get(constructor.getParameterTypes()[i]); } } } constructor.setAccessible(true); return new StaticConstructorInstantiator<S, T>(constructor, args); } #location 6 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.
#fixed code @Test public void testArrayElementConstructorInjectionWithIncompatibleConstructorUseIncompatibleOutlay() { ClassMeta<ObjectWithIncompatibleConstructor[]> classMeta = ReflectionService.newInstance().getRootClassMeta(ObjectWithIncompatibleConstructor[].class); PropertyFinder<ObjectWithIncompatibleConstructor[]> propertyFinder = classMeta.newPropertyFinder(); assertNotNull(propertyFinder.findProperty(matcher("1_arg1"))); assertNotNull(propertyFinder.findProperty(matcher("1_arg3"))); assertNull(propertyFinder.findProperty(matcher("1_arg2"))); }
#vulnerable code @Test public void testArrayElementConstructorInjectionWithIncompatibleConstructorUseIncompatibleOutlay() { ClassMeta<ObjectWithIncompatibleConstructor[]> classMeta = ReflectionService.newInstance().getClassMeta(ObjectWithIncompatibleConstructor[].class); PropertyFinder<ObjectWithIncompatibleConstructor[]> propertyFinder = classMeta.newPropertyFinder(); assertNotNull(propertyFinder.findProperty(matcher("1_arg1"))); assertNotNull(propertyFinder.findProperty(matcher("1_arg3"))); assertNull(propertyFinder.findProperty(matcher("1_arg2"))); } #location 5 #vulnerability type NULL_DEREFERENCE
Below is the vulnerable code, please generate the patch based on the following information.