_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q4600
ClientSocketStats.recordCheckoutQueueLength
train
public void recordCheckoutQueueLength(SocketDestination dest, int queueLength) { if(dest != null) { getOrCreateNodeStats(dest).recordCheckoutQueueLength(null, queueLength); recordCheckoutQueueLength(null, queueLength); } else { this.checkoutQueueLengthHistogram.insert(queueLength); checkMonitoringInterval(); } }
java
{ "resource": "" }
q4601
ClientSocketStats.recordResourceRequestTimeUs
train
public void recordResourceRequestTimeUs(SocketDestination dest, long resourceRequestTimeUs) { if(dest != null) { getOrCreateNodeStats(dest).recordResourceRequestTimeUs(null, resourceRequestTimeUs); recordResourceRequestTimeUs(null, resourceRequestTimeUs); } else { this.resourceRequestTimeRequestCounter.addRequest(resourceRequestTimeUs * Time.NS_PER_US); } }
java
{ "resource": "" }
q4602
ClientSocketStats.recordResourceRequestQueueLength
train
public void recordResourceRequestQueueLength(SocketDestination dest, int queueLength) { if(dest != null) { getOrCreateNodeStats(dest).recordResourceRequestQueueLength(null, queueLength); recordResourceRequestQueueLength(null, queueLength); } else { this.resourceRequestQueueLengthHistogram.insert(queueLength); checkMonitoringInterval(); } }
java
{ "resource": "" }
q4603
ClientSocketStats.close
train
public void close() { Iterator<SocketDestination> it = getStatsMap().keySet().iterator(); while(it.hasNext()) { try { SocketDestination destination = it.next(); JmxUtils.unregisterMbean(JmxUtils.createObjectName(JmxUtils.getPackageName(ClientRequestExecutor.class), "stats_" + destination.toString() .replace(':', '_') + identifierString)); } catch(Exception e) {} } }
java
{ "resource": "" }
q4604
SocketStore.request
train
private <T> T request(ClientRequest<T> delegate, String operationName) { long startTimeMs = -1; long startTimeNs = -1; if(logger.isDebugEnabled()) { startTimeMs = System.currentTimeMillis(); } ClientRequestExecutor clientRequestExecutor = pool.checkout(destination); String debugMsgStr = ""; startTimeNs = System.nanoTime(); BlockingClientRequest<T> blockingClientRequest = null; try { blockingClientRequest = new BlockingClientRequest<T>(delegate, timeoutMs); clientRequestExecutor.addClientRequest(blockingClientRequest, timeoutMs, System.nanoTime() - startTimeNs); boolean awaitResult = blockingClientRequest.await(); if(awaitResult == false) { blockingClientRequest.timeOut(); } if(logger.isDebugEnabled()) debugMsgStr += "success"; return blockingClientRequest.getResult(); } catch(InterruptedException e) { if(logger.isDebugEnabled()) debugMsgStr += "unreachable: " + e.getMessage(); throw new UnreachableStoreException("Failure in " + operationName + " on " + destination + ": " + e.getMessage(), e); } catch(UnreachableStoreException e) { clientRequestExecutor.close(); if(logger.isDebugEnabled()) debugMsgStr += "failure: " + e.getMessage(); throw new UnreachableStoreException("Failure in " + operationName + " on " + destination + ": " + e.getMessage(), e.getCause()); } finally { if(blockingClientRequest != null && !blockingClientRequest.isComplete()) { // close the executor if we timed out clientRequestExecutor.close(); } // Record operation time long opTimeNs = Utils.elapsedTimeNs(startTimeNs, System.nanoTime()); if(stats != null) { stats.recordSyncOpTimeNs(destination, opTimeNs); } if(logger.isDebugEnabled()) { logger.debug("Sync request end, type: " + operationName + " requestRef: " + System.identityHashCode(delegate) + " totalTimeNs: " + opTimeNs + " start time: " + startTimeMs + " end time: " + System.currentTimeMillis() + " client:" + clientRequestExecutor.getSocketChannel().socket().getLocalAddress() + ":" + clientRequestExecutor.getSocketChannel().socket().getLocalPort() + " server: " + clientRequestExecutor.getSocketChannel() .socket() .getRemoteSocketAddress() + " outcome: " + debugMsgStr); } pool.checkin(destination, clientRequestExecutor); } }
java
{ "resource": "" }
q4605
SocketStore.requestAsync
train
private <T> void requestAsync(ClientRequest<T> delegate, NonblockingStoreCallback callback, long timeoutMs, String operationName) { pool.submitAsync(this.destination, delegate, callback, timeoutMs, operationName); }
java
{ "resource": "" }
q4606
StreamingStats.getAvgFetchKeysNetworkTimeMs
train
@JmxGetter(name = "avgFetchKeysNetworkTimeMs", description = "average time spent on network, for fetch keys") public double getAvgFetchKeysNetworkTimeMs() { return networkTimeCounterMap.get(Operation.FETCH_KEYS).getAvgEventValue() / Time.NS_PER_MS; }
java
{ "resource": "" }
q4607
StreamingStats.getAvgFetchEntriesNetworkTimeMs
train
@JmxGetter(name = "avgFetchEntriesNetworkTimeMs", description = "average time spent on network, for streaming operations") public double getAvgFetchEntriesNetworkTimeMs() { return networkTimeCounterMap.get(Operation.FETCH_ENTRIES).getAvgEventValue() / Time.NS_PER_MS; }
java
{ "resource": "" }
q4608
StreamingStats.getAvgUpdateEntriesNetworkTimeMs
train
@JmxGetter(name = "avgUpdateEntriesNetworkTimeMs", description = "average time spent on network, for streaming operations") public double getAvgUpdateEntriesNetworkTimeMs() { return networkTimeCounterMap.get(Operation.UPDATE_ENTRIES).getAvgEventValue() / Time.NS_PER_MS; }
java
{ "resource": "" }
q4609
StreamingStats.getAvgSlopUpdateNetworkTimeMs
train
@JmxGetter(name = "avgSlopUpdateNetworkTimeMs", description = "average time spent on network, for streaming operations") public double getAvgSlopUpdateNetworkTimeMs() { return networkTimeCounterMap.get(Operation.SLOP_UPDATE).getAvgEventValue() / Time.NS_PER_MS; }
java
{ "resource": "" }
q4610
SerializationUtils.getJavaClassFromSchemaInfo
train
public static String getJavaClassFromSchemaInfo(String schemaInfo) { final String ONLY_JAVA_CLIENTS_SUPPORTED = "Only Java clients are supported currently, so the format of the schema-info should be: <schema-info>java=foo.Bar</schema-info> where foo.Bar is the fully qualified name of the message."; if(StringUtils.isEmpty(schemaInfo)) throw new IllegalArgumentException("This serializer requires a non-empty schema-info."); String[] languagePairs = StringUtils.split(schemaInfo, ','); if(languagePairs.length > 1) throw new IllegalArgumentException(ONLY_JAVA_CLIENTS_SUPPORTED); String[] javaPair = StringUtils.split(languagePairs[0], '='); if(javaPair.length != 2 || !javaPair[0].trim().equals("java")) throw new IllegalArgumentException(ONLY_JAVA_CLIENTS_SUPPORTED); return javaPair[1].trim(); }
java
{ "resource": "" }
q4611
StoreDefinitionUtils.filterStores
train
public static List<StoreDefinition> filterStores(List<StoreDefinition> storeDefs, final boolean isReadOnly) { List<StoreDefinition> filteredStores = Lists.newArrayList(); for(StoreDefinition storeDef: storeDefs) { if(storeDef.getType().equals(ReadOnlyStorageConfiguration.TYPE_NAME) == isReadOnly) { filteredStores.add(storeDef); } } return filteredStores; }
java
{ "resource": "" }
q4612
StoreDefinitionUtils.getStoreNames
train
public static List<String> getStoreNames(List<StoreDefinition> storeDefList) { List<String> storeList = new ArrayList<String>(); for(StoreDefinition def: storeDefList) { storeList.add(def.getName()); } return storeList; }
java
{ "resource": "" }
q4613
StoreDefinitionUtils.getStoreNamesSet
train
public static Set<String> getStoreNamesSet(List<StoreDefinition> storeDefList) { HashSet<String> storeSet = new HashSet<String>(); for(StoreDefinition def: storeDefList) { storeSet.add(def.getName()); } return storeSet; }
java
{ "resource": "" }
q4614
StoreDefinitionUtils.getUniqueStoreDefinitionsWithCounts
train
public static HashMap<StoreDefinition, Integer> getUniqueStoreDefinitionsWithCounts(List<StoreDefinition> storeDefs) { HashMap<StoreDefinition, Integer> uniqueStoreDefs = Maps.newHashMap(); for(StoreDefinition storeDef: storeDefs) { if(uniqueStoreDefs.isEmpty()) { uniqueStoreDefs.put(storeDef, 1); } else { StoreDefinition sameStore = null; // Go over all the other stores to find if this is unique for(StoreDefinition uniqueStoreDef: uniqueStoreDefs.keySet()) { if(uniqueStoreDef.getReplicationFactor() == storeDef.getReplicationFactor() && uniqueStoreDef.getRoutingStrategyType() .compareTo(storeDef.getRoutingStrategyType()) == 0) { // Further check for the zone routing case if(uniqueStoreDef.getRoutingStrategyType() .compareTo(RoutingStrategyType.ZONE_STRATEGY) == 0) { boolean zonesSame = true; for(int zoneId: uniqueStoreDef.getZoneReplicationFactor().keySet()) { if(storeDef.getZoneReplicationFactor().get(zoneId) == null || storeDef.getZoneReplicationFactor().get(zoneId) != uniqueStoreDef.getZoneReplicationFactor() .get(zoneId)) { zonesSame = false; break; } } if(zonesSame) { sameStore = uniqueStoreDef; } } else { sameStore = uniqueStoreDef; } if(sameStore != null) { // Bump up the count int currentCount = uniqueStoreDefs.get(sameStore); uniqueStoreDefs.put(sameStore, currentCount + 1); break; } } } if(sameStore == null) { // New store uniqueStoreDefs.put(storeDef, 1); } } } return uniqueStoreDefs; }
java
{ "resource": "" }
q4615
StoreDefinitionUtils.isAvroSchema
train
public static boolean isAvroSchema(String serializerName) { if(serializerName.equals(AVRO_GENERIC_VERSIONED_TYPE_NAME) || serializerName.equals(AVRO_GENERIC_TYPE_NAME) || serializerName.equals(AVRO_REFLECTIVE_TYPE_NAME) || serializerName.equals(AVRO_SPECIFIC_TYPE_NAME)) { return true; } else { return false; } }
java
{ "resource": "" }
q4616
StoreDefinitionUtils.validateIfAvroSchema
train
private static void validateIfAvroSchema(SerializerDefinition serializerDef) { if(serializerDef.getName().equals(AVRO_GENERIC_VERSIONED_TYPE_NAME) || serializerDef.getName().equals(AVRO_GENERIC_TYPE_NAME)) { SchemaEvolutionValidator.validateAllAvroSchemas(serializerDef); // check backwards compatibility if needed if(serializerDef.getName().equals(AVRO_GENERIC_VERSIONED_TYPE_NAME)) { SchemaEvolutionValidator.checkSchemaCompatibility(serializerDef); } } }
java
{ "resource": "" }
q4617
Histogram.insert
train
public synchronized void insert(long data) { resetIfNeeded(); long index = 0; if(data >= this.upperBound) { index = nBuckets - 1; } else if(data < 0) { logger.error(data + " can't be bucketed because it is negative!"); return; } else { index = data / step; } if(index < 0 || index >= nBuckets) { // This should be dead code. Defending against code changes in // future. logger.error(data + " can't be bucketed because index is not in range [0,nBuckets)."); return; } buckets[(int) index]++; sum += data; size++; }
java
{ "resource": "" }
q4618
RebootstrappingStore.checkAndAddNodeStore
train
private void checkAndAddNodeStore() { for(Node node: metadata.getCluster().getNodes()) { if(!routedStore.getInnerStores().containsKey(node.getId())) { if(!storeRepository.hasNodeStore(getName(), node.getId())) { storeRepository.addNodeStore(node.getId(), createNodeStore(node)); } routedStore.getInnerStores().put(node.getId(), storeRepository.getNodeStore(getName(), node.getId())); } } }
java
{ "resource": "" }
q4619
ResourcePoolConfig.setTimeout
train
public ResourcePoolConfig setTimeout(long timeout, TimeUnit unit) { if(timeout < 0) throw new IllegalArgumentException("The timeout must be a non-negative number."); this.timeoutNs = TimeUnit.NANOSECONDS.convert(timeout, unit); return this; }
java
{ "resource": "" }
q4620
KratiStorageEngine.assembleValues
train
private byte[] assembleValues(List<Versioned<byte[]>> values) throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); DataOutputStream dataStream = new DataOutputStream(stream); for(Versioned<byte[]> value: values) { byte[] object = value.getValue(); dataStream.writeInt(object.length); dataStream.write(object); VectorClock clock = (VectorClock) value.getVersion(); dataStream.writeInt(clock.sizeInBytes()); dataStream.write(clock.toBytes()); } return stream.toByteArray(); }
java
{ "resource": "" }
q4621
KratiStorageEngine.disassembleValues
train
private List<Versioned<byte[]>> disassembleValues(byte[] values) throws IOException { if(values == null) return new ArrayList<Versioned<byte[]>>(0); List<Versioned<byte[]>> returnList = new ArrayList<Versioned<byte[]>>(); ByteArrayInputStream stream = new ByteArrayInputStream(values); DataInputStream dataStream = new DataInputStream(stream); while(dataStream.available() > 0) { byte[] object = new byte[dataStream.readInt()]; dataStream.read(object); byte[] clockBytes = new byte[dataStream.readInt()]; dataStream.read(clockBytes); VectorClock clock = new VectorClock(clockBytes); returnList.add(new Versioned<byte[]>(object, clock)); } return returnList; }
java
{ "resource": "" }
q4622
PartitionScanFetchStreamRequestHandler.statusInfoMessage
train
protected void statusInfoMessage(final String tag) { if(logger.isInfoEnabled()) { logger.info(tag + " : [partition: " + currentPartition + ", partitionFetched: " + currentPartitionFetched + "] for store " + storageEngine.getName()); } }
java
{ "resource": "" }
q4623
StreamingSlopPusherJob.slopSize
train
private int slopSize(Versioned<Slop> slopVersioned) { int nBytes = 0; Slop slop = slopVersioned.getValue(); nBytes += slop.getKey().length(); nBytes += ((VectorClock) slopVersioned.getVersion()).sizeInBytes(); switch(slop.getOperation()) { case PUT: { nBytes += slop.getValue().length; break; } case DELETE: { break; } default: logger.error("Unknown slop operation: " + slop.getOperation()); } return nBytes; }
java
{ "resource": "" }
q4624
RESTClientFactory.getStoreClient
train
@Override public <K, V> StoreClient<K, V> getStoreClient(final String storeName, final InconsistencyResolver<Versioned<V>> resolver) { // wrap it in LazyStoreClient here so any direct calls to this method // returns a lazy client return new LazyStoreClient<K, V>(new Callable<StoreClient<K, V>>() { @Override public StoreClient<K, V> call() throws Exception { Store<K, V, Object> clientStore = getRawStore(storeName, resolver); return new RESTClient<K, V>(storeName, clientStore); } }, true); }
java
{ "resource": "" }
q4625
ConsistentRoutingStrategy.abs
train
private static int abs(int a) { if(a >= 0) return a; else if(a != Integer.MIN_VALUE) return -a; return Integer.MAX_VALUE; }
java
{ "resource": "" }
q4626
ConsistentRoutingStrategy.getMasterPartition
train
@Override public Integer getMasterPartition(byte[] key) { return abs(hash.hash(key)) % (Math.max(1, this.partitionToNode.length)); }
java
{ "resource": "" }
q4627
SlopPusherJob.isSlopDead
train
protected boolean isSlopDead(Cluster cluster, Set<String> storeNames, Slop slop) { // destination node , no longer exists if(!cluster.getNodeIds().contains(slop.getNodeId())) { return true; } // destination store, no longer exists if(!storeNames.contains(slop.getStoreName())) { return true; } // else. slop is alive return false; }
java
{ "resource": "" }
q4628
SlopPusherJob.handleDeadSlop
train
protected void handleDeadSlop(SlopStorageEngine slopStorageEngine, Pair<ByteArray, Versioned<Slop>> keyAndVal) { Versioned<Slop> versioned = keyAndVal.getSecond(); // If configured to delete the dead slop if(voldemortConfig.getAutoPurgeDeadSlops()) { slopStorageEngine.delete(keyAndVal.getFirst(), versioned.getVersion()); if(getLogger().isDebugEnabled()) { getLogger().debug("Auto purging dead slop :" + versioned.getValue()); } } else { // Keep ignoring the dead slops if(getLogger().isDebugEnabled()) { getLogger().debug("Ignoring dead slop :" + versioned.getValue()); } } }
java
{ "resource": "" }
q4629
ClientRequestExecutorFactory.destroy
train
@Override public void destroy(SocketDestination dest, ClientRequestExecutor clientRequestExecutor) throws Exception { clientRequestExecutor.close(); int numDestroyed = destroyed.incrementAndGet(); if(stats != null) { stats.incrementCount(dest, ClientSocketStats.Tracked.CONNECTION_DESTROYED_EVENT); } if(logger.isDebugEnabled()) logger.debug("Destroyed socket " + numDestroyed + " connection to " + dest.getHost() + ":" + dest.getPort()); }
java
{ "resource": "" }
q4630
ClientConfigUtil.readSingleClientConfigAvro
train
@SuppressWarnings("unchecked") public static Properties readSingleClientConfigAvro(String configAvro) { Properties props = new Properties(); try { JsonDecoder decoder = new JsonDecoder(CLIENT_CONFIG_AVRO_SCHEMA, configAvro); GenericDatumReader<Object> datumReader = new GenericDatumReader<Object>(CLIENT_CONFIG_AVRO_SCHEMA); Map<Utf8, Utf8> flowMap = (Map<Utf8, Utf8>) datumReader.read(null, decoder); for(Utf8 key: flowMap.keySet()) { props.put(key.toString(), flowMap.get(key).toString()); } } catch(Exception e) { e.printStackTrace(); } return props; }
java
{ "resource": "" }
q4631
ClientConfigUtil.readMultipleClientConfigAvro
train
@SuppressWarnings("unchecked") public static Map<String, Properties> readMultipleClientConfigAvro(String configAvro) { Map<String, Properties> mapStoreToProps = Maps.newHashMap(); try { JsonDecoder decoder = new JsonDecoder(CLIENT_CONFIGS_AVRO_SCHEMA, configAvro); GenericDatumReader<Object> datumReader = new GenericDatumReader<Object>(CLIENT_CONFIGS_AVRO_SCHEMA); Map<Utf8, Map<Utf8, Utf8>> storeConfigs = (Map<Utf8, Map<Utf8, Utf8>>) datumReader.read(null, decoder); // Store config props to return back for(Utf8 storeName: storeConfigs.keySet()) { Properties props = new Properties(); Map<Utf8, Utf8> singleConfig = storeConfigs.get(storeName); for(Utf8 key: singleConfig.keySet()) { props.put(key.toString(), singleConfig.get(key).toString()); } if(storeName == null || storeName.length() == 0) { throw new Exception("Invalid store name found!"); } mapStoreToProps.put(storeName.toString(), props); } } catch(Exception e) { e.printStackTrace(); } return mapStoreToProps; }
java
{ "resource": "" }
q4632
ClientConfigUtil.writeSingleClientConfigAvro
train
public static String writeSingleClientConfigAvro(Properties props) { // TODO: Use a dedicated json lib. We shouldn't be manually manipulating json... String avroConfig = ""; Boolean firstProp = true; for(String key: props.stringPropertyNames()) { if(firstProp) { firstProp = false; } else { avroConfig = avroConfig + ",\n"; } avroConfig = avroConfig + "\t\t\"" + key + "\": \"" + props.getProperty(key) + "\""; } if(avroConfig.isEmpty()) { return "{}"; } else { return "{\n" + avroConfig + "\n\t}"; } }
java
{ "resource": "" }
q4633
ClientConfigUtil.writeMultipleClientConfigAvro
train
public static String writeMultipleClientConfigAvro(Map<String, Properties> mapStoreToProps) { // TODO: Use a dedicated json lib. We shouldn't be manually manipulating json... String avroConfig = ""; Boolean firstStore = true; for(String storeName: mapStoreToProps.keySet()) { if(firstStore) { firstStore = false; } else { avroConfig = avroConfig + ",\n"; } Properties props = mapStoreToProps.get(storeName); avroConfig = avroConfig + "\t\"" + storeName + "\": " + writeSingleClientConfigAvro(props); } return "{\n" + avroConfig + "\n}"; }
java
{ "resource": "" }
q4634
ClientConfigUtil.compareSingleClientConfigAvro
train
public static Boolean compareSingleClientConfigAvro(String configAvro1, String configAvro2) { Properties props1 = readSingleClientConfigAvro(configAvro1); Properties props2 = readSingleClientConfigAvro(configAvro2); if(props1.equals(props2)) { return true; } else { return false; } }
java
{ "resource": "" }
q4635
ClientConfigUtil.compareMultipleClientConfigAvro
train
public static Boolean compareMultipleClientConfigAvro(String configAvro1, String configAvro2) { Map<String, Properties> mapStoreToProps1 = readMultipleClientConfigAvro(configAvro1); Map<String, Properties> mapStoreToProps2 = readMultipleClientConfigAvro(configAvro2); Set<String> keySet1 = mapStoreToProps1.keySet(); Set<String> keySet2 = mapStoreToProps2.keySet(); if(!keySet1.equals(keySet2)) { return false; } for(String storeName: keySet1) { Properties props1 = mapStoreToProps1.get(storeName); Properties props2 = mapStoreToProps2.get(storeName); if(!props1.equals(props2)) { return false; } } return true; }
java
{ "resource": "" }
q4636
AdminCommandAsyncJob.printHelp
train
public static void printHelp(PrintStream stream) { stream.println(); stream.println("Voldemort Admin Tool Async-Job Commands"); stream.println("---------------------------------------"); stream.println("list Get async job list from nodes."); stream.println("stop Stop async jobs on one node."); stream.println(); stream.println("To get more information on each command,"); stream.println("please try \'help async-job <command-name>\'."); stream.println(); }
java
{ "resource": "" }
q4637
BdbStorageConfiguration.removeStorageEngine
train
@Override public void removeStorageEngine(StorageEngine<ByteArray, byte[], byte[]> engine) { String storeName = engine.getName(); BdbStorageEngine bdbEngine = (BdbStorageEngine) engine; synchronized(lock) { // Only cleanup the environment if it is per store. We cannot // cleanup a shared 'Environment' object if(useOneEnvPerStore) { Environment environment = this.environments.get(storeName); if(environment == null) { // Nothing to clean up. return; } // Remove from the set of unreserved stores if needed. if(this.unreservedStores.remove(environment)) { logger.info("Removed environment for store name: " + storeName + " from unreserved stores"); } else { logger.info("No environment found in unreserved stores for store name: " + storeName); } // Try to delete the BDB directory associated File bdbDir = environment.getHome(); if(bdbDir.exists() && bdbDir.isDirectory()) { String bdbDirPath = bdbDir.getPath(); try { FileUtils.deleteDirectory(bdbDir); logger.info("Successfully deleted BDB directory : " + bdbDirPath + " for store name: " + storeName); } catch(IOException e) { logger.error("Unable to delete BDB directory: " + bdbDirPath + " for store name: " + storeName); } } // Remove the reference to BdbEnvironmentStats, which holds a // reference to the Environment BdbEnvironmentStats bdbEnvStats = bdbEngine.getBdbEnvironmentStats(); this.aggBdbStats.unTrackEnvironment(bdbEnvStats); // Unregister the JMX bean for Environment if(voldemortConfig.isJmxEnabled()) { ObjectName name = JmxUtils.createObjectName(JmxUtils.getPackageName(bdbEnvStats.getClass()), storeName); // Un-register the environment stats mbean JmxUtils.unregisterMbean(name); } // Cleanup the environment environment.close(); this.environments.remove(storeName); logger.info("Successfully closed the environment for store name : " + storeName); } } }
java
{ "resource": "" }
q4638
BdbStorageConfiguration.cleanLogs
train
@JmxOperation(description = "Forcefully invoke the log cleaning") public void cleanLogs() { synchronized(lock) { try { for(Environment environment: environments.values()) { environment.cleanLog(); } } catch(DatabaseException e) { throw new VoldemortException(e); } } }
java
{ "resource": "" }
q4639
BdbStorageConfiguration.update
train
public void update(StoreDefinition storeDef) { if(!useOneEnvPerStore) throw new VoldemortException("Memory foot print can be set only when using different environments per store"); String storeName = storeDef.getName(); Environment environment = environments.get(storeName); // change reservation amount of reserved store if(!unreservedStores.contains(environment) && storeDef.hasMemoryFootprint()) { EnvironmentMutableConfig mConfig = environment.getMutableConfig(); long currentCacheSize = mConfig.getCacheSize(); long newCacheSize = storeDef.getMemoryFootprintMB() * ByteUtils.BYTES_PER_MB; if(currentCacheSize != newCacheSize) { long newReservedCacheSize = this.reservedCacheSize - currentCacheSize + newCacheSize; // check that we leave a 'minimum' shared cache if((voldemortConfig.getBdbCacheSize() - newReservedCacheSize) < voldemortConfig.getBdbMinimumSharedCache()) { throw new StorageInitializationException("Reservation of " + storeDef.getMemoryFootprintMB() + " MB for store " + storeName + " violates minimum shared cache size of " + voldemortConfig.getBdbMinimumSharedCache()); } this.reservedCacheSize = newReservedCacheSize; adjustCacheSizes(); mConfig.setCacheSize(newCacheSize); environment.setMutableConfig(mConfig); logger.info("Setting private cache for store " + storeDef.getName() + " to " + newCacheSize); } } else { // we cannot support changing a reserved store to unreserved or vice // versa since the sharedCache param is not mutable throw new VoldemortException("Cannot switch between shared and private cache dynamically"); } }
java
{ "resource": "" }
q4640
Repartitioner.getBalancedNumberOfPrimaryPartitionsPerNode
train
public static HashMap<Integer, List<Integer>> getBalancedNumberOfPrimaryPartitionsPerNode(final Cluster nextCandidateCluster, Map<Integer, Integer> targetPartitionsPerZone) { HashMap<Integer, List<Integer>> numPartitionsPerNode = Maps.newHashMap(); for(Integer zoneId: nextCandidateCluster.getZoneIds()) { List<Integer> partitionsOnNode = Utils.distributeEvenlyIntoList(nextCandidateCluster.getNumberOfNodesInZone(zoneId), targetPartitionsPerZone.get(zoneId)); numPartitionsPerNode.put(zoneId, partitionsOnNode); } return numPartitionsPerNode; }
java
{ "resource": "" }
q4641
Repartitioner.getDonorsAndStealersForBalance
train
public static Pair<HashMap<Node, Integer>, HashMap<Node, Integer>> getDonorsAndStealersForBalance(final Cluster nextCandidateCluster, Map<Integer, List<Integer>> numPartitionsPerNodePerZone) { HashMap<Node, Integer> donorNodes = Maps.newHashMap(); HashMap<Node, Integer> stealerNodes = Maps.newHashMap(); HashMap<Integer, Integer> numNodesAssignedInZone = Maps.newHashMap(); for(Integer zoneId: nextCandidateCluster.getZoneIds()) { numNodesAssignedInZone.put(zoneId, 0); } for(Node node: nextCandidateCluster.getNodes()) { int zoneId = node.getZoneId(); int offset = numNodesAssignedInZone.get(zoneId); numNodesAssignedInZone.put(zoneId, offset + 1); int numPartitions = numPartitionsPerNodePerZone.get(zoneId).get(offset); if(numPartitions < node.getNumberOfPartitions()) { donorNodes.put(node, numPartitions); } else if(numPartitions > node.getNumberOfPartitions()) { stealerNodes.put(node, numPartitions); } } // Print out donor/stealer information for(Node node: donorNodes.keySet()) { System.out.println("Donor Node: " + node.getId() + ", zoneId " + node.getZoneId() + ", numPartitions " + node.getNumberOfPartitions() + ", target number of partitions " + donorNodes.get(node)); } for(Node node: stealerNodes.keySet()) { System.out.println("Stealer Node: " + node.getId() + ", zoneId " + node.getZoneId() + ", numPartitions " + node.getNumberOfPartitions() + ", target number of partitions " + stealerNodes.get(node)); } return new Pair<HashMap<Node, Integer>, HashMap<Node, Integer>>(donorNodes, stealerNodes); }
java
{ "resource": "" }
q4642
Repartitioner.repeatedlyBalanceContiguousPartitionsPerZone
train
public static Cluster repeatedlyBalanceContiguousPartitionsPerZone(final Cluster nextCandidateCluster, final int maxContiguousPartitionsPerZone) { System.out.println("Looping to evenly balance partitions across zones while limiting contiguous partitions"); // This loop is hard to make definitive. I.e., there are corner cases // for small clusters and/or clusters with few partitions for which it // may be impossible to achieve tight limits on contiguous run lenghts. // Therefore, a constant number of loops are run. Note that once the // goal is reached, the loop becomes a no-op. int repeatContigBalance = 10; Cluster returnCluster = nextCandidateCluster; for(int i = 0; i < repeatContigBalance; i++) { returnCluster = balanceContiguousPartitionsPerZone(returnCluster, maxContiguousPartitionsPerZone); returnCluster = balancePrimaryPartitions(returnCluster, false); System.out.println("Completed round of balancing contiguous partitions: round " + (i + 1) + " of " + repeatContigBalance); } return returnCluster; }
java
{ "resource": "" }
q4643
Repartitioner.balanceContiguousPartitionsPerZone
train
public static Cluster balanceContiguousPartitionsPerZone(final Cluster nextCandidateCluster, final int maxContiguousPartitionsPerZone) { System.out.println("Balance number of contiguous partitions within a zone."); System.out.println("numPartitionsPerZone"); for(int zoneId: nextCandidateCluster.getZoneIds()) { System.out.println(zoneId + " : " + nextCandidateCluster.getNumberOfPartitionsInZone(zoneId)); } System.out.println("numNodesPerZone"); for(int zoneId: nextCandidateCluster.getZoneIds()) { System.out.println(zoneId + " : " + nextCandidateCluster.getNumberOfNodesInZone(zoneId)); } // Break up contiguous partitions within each zone HashMap<Integer, List<Integer>> partitionsToRemoveFromZone = Maps.newHashMap(); System.out.println("Contiguous partitions"); for(Integer zoneId: nextCandidateCluster.getZoneIds()) { System.out.println("\tZone: " + zoneId); Map<Integer, Integer> partitionToRunLength = PartitionBalanceUtils.getMapOfContiguousPartitions(nextCandidateCluster, zoneId); List<Integer> partitionsToRemoveFromThisZone = new ArrayList<Integer>(); for(Map.Entry<Integer, Integer> entry: partitionToRunLength.entrySet()) { if(entry.getValue() > maxContiguousPartitionsPerZone) { List<Integer> contiguousPartitions = new ArrayList<Integer>(entry.getValue()); for(int partitionId = entry.getKey(); partitionId < entry.getKey() + entry.getValue(); partitionId++) { contiguousPartitions.add(partitionId % nextCandidateCluster.getNumberOfPartitions()); } System.out.println("Contiguous partitions: " + contiguousPartitions); partitionsToRemoveFromThisZone.addAll(Utils.removeItemsToSplitListEvenly(contiguousPartitions, maxContiguousPartitionsPerZone)); } } partitionsToRemoveFromZone.put(zoneId, partitionsToRemoveFromThisZone); System.out.println("\t\tPartitions to remove: " + partitionsToRemoveFromThisZone); } Cluster returnCluster = Cluster.cloneCluster(nextCandidateCluster); Random r = new Random(); for(int zoneId: returnCluster.getZoneIds()) { for(int partitionId: partitionsToRemoveFromZone.get(zoneId)) { // Pick a random other zone Id List<Integer> otherZoneIds = new ArrayList<Integer>(); for(int otherZoneId: returnCluster.getZoneIds()) { if(otherZoneId != zoneId) { otherZoneIds.add(otherZoneId); } } int whichOtherZoneId = otherZoneIds.get(r.nextInt(otherZoneIds.size())); // Pick a random node from other zone ID int whichNodeOffset = r.nextInt(returnCluster.getNumberOfNodesInZone(whichOtherZoneId)); int whichNodeId = new ArrayList<Integer>(returnCluster.getNodeIdsInZone(whichOtherZoneId)).get(whichNodeOffset); // Steal partition from one zone to another! returnCluster = UpdateClusterUtils.createUpdatedCluster(returnCluster, whichNodeId, Lists.newArrayList(partitionId)); } } return returnCluster; }
java
{ "resource": "" }
q4644
Repartitioner.swapPartitions
train
public static Cluster swapPartitions(final Cluster nextCandidateCluster, final int nodeIdA, final int partitionIdA, final int nodeIdB, final int partitionIdB) { Cluster returnCluster = Cluster.cloneCluster(nextCandidateCluster); // Swap partitions between nodes! returnCluster = UpdateClusterUtils.createUpdatedCluster(returnCluster, nodeIdA, Lists.newArrayList(partitionIdB)); returnCluster = UpdateClusterUtils.createUpdatedCluster(returnCluster, nodeIdB, Lists.newArrayList(partitionIdA)); return returnCluster; }
java
{ "resource": "" }
q4645
Repartitioner.swapRandomPartitionsWithinZone
train
public static Cluster swapRandomPartitionsWithinZone(final Cluster nextCandidateCluster, final int zoneId) { Cluster returnCluster = Cluster.cloneCluster(nextCandidateCluster); Random r = new Random(); List<Integer> nodeIdsInZone = new ArrayList<Integer>(nextCandidateCluster.getNodeIdsInZone(zoneId)); if(nodeIdsInZone.size() == 0) { return returnCluster; } // Select random stealer node int stealerNodeOffset = r.nextInt(nodeIdsInZone.size()); Integer stealerNodeId = nodeIdsInZone.get(stealerNodeOffset); // Select random stealer partition List<Integer> stealerPartitions = returnCluster.getNodeById(stealerNodeId) .getPartitionIds(); if(stealerPartitions.size() == 0) { return nextCandidateCluster; } int stealerPartitionOffset = r.nextInt(stealerPartitions.size()); int stealerPartitionId = stealerPartitions.get(stealerPartitionOffset); // Select random donor node List<Integer> donorNodeIds = new ArrayList<Integer>(); donorNodeIds.addAll(nodeIdsInZone); donorNodeIds.remove(stealerNodeId); if(donorNodeIds.isEmpty()) { // No donor nodes! return returnCluster; } int donorIdOffset = r.nextInt(donorNodeIds.size()); Integer donorNodeId = donorNodeIds.get(donorIdOffset); // Select random donor partition List<Integer> donorPartitions = returnCluster.getNodeById(donorNodeId).getPartitionIds(); int donorPartitionOffset = r.nextInt(donorPartitions.size()); int donorPartitionId = donorPartitions.get(donorPartitionOffset); return swapPartitions(returnCluster, stealerNodeId, stealerPartitionId, donorNodeId, donorPartitionId); }
java
{ "resource": "" }
q4646
Repartitioner.randomShufflePartitions
train
public static Cluster randomShufflePartitions(final Cluster nextCandidateCluster, final int randomSwapAttempts, final int randomSwapSuccesses, final List<Integer> randomSwapZoneIds, List<StoreDefinition> storeDefs) { List<Integer> zoneIds = null; if(randomSwapZoneIds.isEmpty()) { zoneIds = new ArrayList<Integer>(nextCandidateCluster.getZoneIds()); } else { zoneIds = new ArrayList<Integer>(randomSwapZoneIds); } List<Integer> nodeIds = new ArrayList<Integer>(); Cluster returnCluster = Cluster.cloneCluster(nextCandidateCluster); double currentUtility = new PartitionBalance(returnCluster, storeDefs).getUtility(); int successes = 0; for(int i = 0; i < randomSwapAttempts; i++) { // Iterate over zone ids to decide which node ids to include for // intra-zone swapping. // In future, if there is a need to support inter-zone swapping, // then just remove the // zone specific logic that populates nodeIdSet and add all nodes // from across all zones. int zoneIdOffset = i % zoneIds.size(); Set<Integer> nodeIdSet = nextCandidateCluster.getNodeIdsInZone(zoneIds.get(zoneIdOffset)); nodeIds = new ArrayList<Integer>(nodeIdSet); Collections.shuffle(zoneIds, new Random(System.currentTimeMillis())); Cluster shuffleResults = swapRandomPartitionsAmongNodes(returnCluster, nodeIds); double nextUtility = new PartitionBalance(shuffleResults, storeDefs).getUtility(); if(nextUtility < currentUtility) { System.out.println("Swap improved max-min ratio: " + currentUtility + " -> " + nextUtility + " (improvement " + successes + " on swap attempt " + i + ")"); successes++; returnCluster = shuffleResults; currentUtility = nextUtility; } if(successes >= randomSwapSuccesses) { // Enough successes, move on. break; } } return returnCluster; }
java
{ "resource": "" }
q4647
Repartitioner.swapGreedyRandomPartitions
train
public static Cluster swapGreedyRandomPartitions(final Cluster nextCandidateCluster, final List<Integer> nodeIds, final int greedySwapMaxPartitionsPerNode, final int greedySwapMaxPartitionsPerZone, List<StoreDefinition> storeDefs) { System.out.println("GreedyRandom : nodeIds:" + nodeIds); Cluster returnCluster = Cluster.cloneCluster(nextCandidateCluster); double currentUtility = new PartitionBalance(returnCluster, storeDefs).getUtility(); int nodeIdA = -1; int nodeIdB = -1; int partitionIdA = -1; int partitionIdB = -1; for(int nodeIdAPrime: nodeIds) { System.out.println("GreedyRandom : processing nodeId:" + nodeIdAPrime); List<Integer> partitionIdsAPrime = new ArrayList<Integer>(); partitionIdsAPrime.addAll(returnCluster.getNodeById(nodeIdAPrime).getPartitionIds()); Collections.shuffle(partitionIdsAPrime); int maxPartitionsInAPrime = Math.min(greedySwapMaxPartitionsPerNode, partitionIdsAPrime.size()); for(int offsetAPrime = 0; offsetAPrime < maxPartitionsInAPrime; offsetAPrime++) { Integer partitionIdAPrime = partitionIdsAPrime.get(offsetAPrime); List<Pair<Integer, Integer>> partitionIdsZone = new ArrayList<Pair<Integer, Integer>>(); for(int nodeIdBPrime: nodeIds) { if(nodeIdBPrime == nodeIdAPrime) continue; for(Integer partitionIdBPrime: returnCluster.getNodeById(nodeIdBPrime) .getPartitionIds()) { partitionIdsZone.add(new Pair<Integer, Integer>(nodeIdBPrime, partitionIdBPrime)); } } Collections.shuffle(partitionIdsZone); int maxPartitionsInZone = Math.min(greedySwapMaxPartitionsPerZone, partitionIdsZone.size()); for(int offsetZone = 0; offsetZone < maxPartitionsInZone; offsetZone++) { Integer nodeIdBPrime = partitionIdsZone.get(offsetZone).getFirst(); Integer partitionIdBPrime = partitionIdsZone.get(offsetZone).getSecond(); Cluster swapResult = swapPartitions(returnCluster, nodeIdAPrime, partitionIdAPrime, nodeIdBPrime, partitionIdBPrime); double swapUtility = new PartitionBalance(swapResult, storeDefs).getUtility(); if(swapUtility < currentUtility) { currentUtility = swapUtility; System.out.println(" -> " + currentUtility); nodeIdA = nodeIdAPrime; partitionIdA = partitionIdAPrime; nodeIdB = nodeIdBPrime; partitionIdB = partitionIdBPrime; } } } } if(nodeIdA == -1) { return returnCluster; } return swapPartitions(returnCluster, nodeIdA, partitionIdA, nodeIdB, partitionIdB); }
java
{ "resource": "" }
q4648
Repartitioner.greedyShufflePartitions
train
public static Cluster greedyShufflePartitions(final Cluster nextCandidateCluster, final int greedyAttempts, final int greedySwapMaxPartitionsPerNode, final int greedySwapMaxPartitionsPerZone, List<Integer> greedySwapZoneIds, List<StoreDefinition> storeDefs) { List<Integer> zoneIds = null; if(greedySwapZoneIds.isEmpty()) { zoneIds = new ArrayList<Integer>(nextCandidateCluster.getZoneIds()); } else { zoneIds = new ArrayList<Integer>(greedySwapZoneIds); } List<Integer> nodeIds = new ArrayList<Integer>(); Cluster returnCluster = Cluster.cloneCluster(nextCandidateCluster); double currentUtility = new PartitionBalance(returnCluster, storeDefs).getUtility(); for(int i = 0; i < greedyAttempts; i++) { // Iterate over zone ids to decide which node ids to include for // intra-zone swapping. // In future, if there is a need to support inter-zone swapping, // then just remove the // zone specific logic that populates nodeIdSet and add all nodes // from across all zones. int zoneIdOffset = i % zoneIds.size(); Set<Integer> nodeIdSet = nextCandidateCluster.getNodeIdsInZone(zoneIds.get(zoneIdOffset)); nodeIds = new ArrayList<Integer>(nodeIdSet); Collections.shuffle(zoneIds, new Random(System.currentTimeMillis())); Cluster shuffleResults = swapGreedyRandomPartitions(returnCluster, nodeIds, greedySwapMaxPartitionsPerNode, greedySwapMaxPartitionsPerZone, storeDefs); double nextUtility = new PartitionBalance(shuffleResults, storeDefs).getUtility(); System.out.println("Swap improved max-min ratio: " + currentUtility + " -> " + nextUtility + " (swap attempt " + i + " in zone " + zoneIds.get(zoneIdOffset) + ")"); returnCluster = shuffleResults; currentUtility = nextUtility; } return returnCluster; }
java
{ "resource": "" }
q4649
RestService.stopInner
train
@Override protected void stopInner() { /* * TODO REST-Server Need to handle inflight operations. What happens to * the existing async operations when a channel.close() is issued in * Netty? */ if(this.nettyServerChannel != null) { this.nettyServerChannel.close(); } if(allChannels != null) { allChannels.close().awaitUninterruptibly(); } this.bootstrap.releaseExternalResources(); }
java
{ "resource": "" }
q4650
RestServerRequestHandler.parseZoneId
train
protected int parseZoneId() { int result = -1; String zoneIdStr = this.request.getHeader(RestMessageHeaders.X_VOLD_ZONE_ID); if(zoneIdStr != null) { try { int zoneId = Integer.parseInt(zoneIdStr); if(zoneId < 0) { logger.error("ZoneId cannot be negative. Assuming the default zone id."); } else { result = zoneId; } } catch(NumberFormatException nfe) { logger.error("Exception when validating request. Incorrect zone id parameter. Cannot parse this to int: " + zoneIdStr, nfe); } } return result; }
java
{ "resource": "" }
q4651
RestServerRequestHandler.registerRequest
train
@Override protected void registerRequest(RestRequestValidator requestValidator, ChannelHandlerContext ctx, MessageEvent messageEvent) { // At this point we know the request is valid and we have a // error handler. So we construct the composite Voldemort // request object. CompositeVoldemortRequest<ByteArray, byte[]> requestObject = requestValidator.constructCompositeVoldemortRequestObject(); if(requestObject != null) { // Dropping dead requests from going to next handler long now = System.currentTimeMillis(); if(requestObject.getRequestOriginTimeInMs() + requestObject.getRoutingTimeoutInMs() <= now) { RestErrorHandler.writeErrorResponse(messageEvent, HttpResponseStatus.REQUEST_TIMEOUT, "current time: " + now + "\torigin time: " + requestObject.getRequestOriginTimeInMs() + "\ttimeout in ms: " + requestObject.getRoutingTimeoutInMs()); return; } else { Store store = getStore(requestValidator.getStoreName(), requestValidator.getParsedRoutingType()); if(store != null) { VoldemortStoreRequest voldemortStoreRequest = new VoldemortStoreRequest(requestObject, store, parseZoneId()); Channels.fireMessageReceived(ctx, voldemortStoreRequest); } else { logger.error("Error when getting store. Non Existing store name."); RestErrorHandler.writeErrorResponse(messageEvent, HttpResponseStatus.BAD_REQUEST, "Non Existing store name. Critical error."); return; } } } }
java
{ "resource": "" }
q4652
RebalanceController.getCurrentClusterState
train
private Pair<Cluster, List<StoreDefinition>> getCurrentClusterState() { // Retrieve the latest cluster metadata from the existing nodes Versioned<Cluster> currentVersionedCluster = adminClient.rebalanceOps.getLatestCluster(Utils.nodeListToNodeIdList(Lists.newArrayList(adminClient.getAdminClientCluster() .getNodes()))); Cluster cluster = currentVersionedCluster.getValue(); List<StoreDefinition> storeDefs = adminClient.rebalanceOps.getCurrentStoreDefinitions(cluster); return new Pair<Cluster, List<StoreDefinition>>(cluster, storeDefs); }
java
{ "resource": "" }
q4653
RebalanceController.executePlan
train
private void executePlan(RebalancePlan rebalancePlan) { logger.info("Starting to execute rebalance Plan!"); int batchCount = 0; int partitionStoreCount = 0; long totalTimeMs = 0; List<RebalanceBatchPlan> entirePlan = rebalancePlan.getPlan(); int numBatches = entirePlan.size(); int numPartitionStores = rebalancePlan.getPartitionStoresMoved(); for(RebalanceBatchPlan batchPlan: entirePlan) { logger.info("======== REBALANCING BATCH " + (batchCount + 1) + " ========"); RebalanceUtils.printBatchLog(batchCount, logger, batchPlan.toString()); long startTimeMs = System.currentTimeMillis(); // ACTUALLY DO A BATCH OF REBALANCING! executeBatch(batchCount, batchPlan); totalTimeMs += (System.currentTimeMillis() - startTimeMs); // Bump up the statistics batchCount++; partitionStoreCount += batchPlan.getPartitionStoreMoves(); batchStatusLog(batchCount, numBatches, partitionStoreCount, numPartitionStores, totalTimeMs); } }
java
{ "resource": "" }
q4654
RebalanceController.batchStatusLog
train
private void batchStatusLog(int batchCount, int numBatches, int partitionStoreCount, int numPartitionStores, long totalTimeMs) { // Calculate the estimated end time and pretty print stats double rate = 1; long estimatedTimeMs = 0; if(numPartitionStores > 0) { rate = partitionStoreCount / numPartitionStores; estimatedTimeMs = (long) (totalTimeMs / rate) - totalTimeMs; } StringBuilder sb = new StringBuilder(); sb.append("Batch Complete!") .append(Utils.NEWLINE) .append("\tbatches moved: ") .append(batchCount) .append(" out of ") .append(numBatches) .append(Utils.NEWLINE) .append("\tPartition stores moved: ") .append(partitionStoreCount) .append(" out of ") .append(numPartitionStores) .append(Utils.NEWLINE) .append("\tPercent done: ") .append(decimalFormatter.format(rate * 100.0)) .append(Utils.NEWLINE) .append("\tEstimated time left: ") .append(estimatedTimeMs) .append(" ms (") .append(TimeUnit.MILLISECONDS.toHours(estimatedTimeMs)) .append(" hours)"); RebalanceUtils.printBatchLog(batchCount, logger, sb.toString()); }
java
{ "resource": "" }
q4655
RebalanceController.executeBatch
train
private void executeBatch(int batchId, final RebalanceBatchPlan batchPlan) { final Cluster batchCurrentCluster = batchPlan.getCurrentCluster(); final List<StoreDefinition> batchCurrentStoreDefs = batchPlan.getCurrentStoreDefs(); final Cluster batchFinalCluster = batchPlan.getFinalCluster(); final List<StoreDefinition> batchFinalStoreDefs = batchPlan.getFinalStoreDefs(); try { final List<RebalanceTaskInfo> rebalanceTaskInfoList = batchPlan.getBatchPlan(); if(rebalanceTaskInfoList.isEmpty()) { RebalanceUtils.printBatchLog(batchId, logger, "Skipping batch " + batchId + " since it is empty."); // Even though there is no rebalancing work to do, cluster // metadata must be updated so that the server is aware of the // new cluster xml. adminClient.rebalanceOps.rebalanceStateChange(batchCurrentCluster, batchFinalCluster, batchCurrentStoreDefs, batchFinalStoreDefs, rebalanceTaskInfoList, false, true, false, false, true); return; } RebalanceUtils.printBatchLog(batchId, logger, "Starting batch " + batchId + "."); // Split the store definitions List<StoreDefinition> readOnlyStoreDefs = StoreDefinitionUtils.filterStores(batchFinalStoreDefs, true); List<StoreDefinition> readWriteStoreDefs = StoreDefinitionUtils.filterStores(batchFinalStoreDefs, false); boolean hasReadOnlyStores = readOnlyStoreDefs != null && readOnlyStoreDefs.size() > 0; boolean hasReadWriteStores = readWriteStoreDefs != null && readWriteStoreDefs.size() > 0; // STEP 1 - Cluster state change boolean finishedReadOnlyPhase = false; List<RebalanceTaskInfo> filteredRebalancePartitionPlanList = RebalanceUtils.filterTaskPlanWithStores(rebalanceTaskInfoList, readOnlyStoreDefs); rebalanceStateChange(batchId, batchCurrentCluster, batchCurrentStoreDefs, batchFinalCluster, batchFinalStoreDefs, filteredRebalancePartitionPlanList, hasReadOnlyStores, hasReadWriteStores, finishedReadOnlyPhase); // STEP 2 - Move RO data if(hasReadOnlyStores) { RebalanceBatchPlanProgressBar progressBar = batchPlan.getProgressBar(batchId); executeSubBatch(batchId, progressBar, batchCurrentCluster, batchCurrentStoreDefs, filteredRebalancePartitionPlanList, hasReadOnlyStores, hasReadWriteStores, finishedReadOnlyPhase); } // STEP 3 - Cluster change state finishedReadOnlyPhase = true; filteredRebalancePartitionPlanList = RebalanceUtils.filterTaskPlanWithStores(rebalanceTaskInfoList, readWriteStoreDefs); rebalanceStateChange(batchId, batchCurrentCluster, batchCurrentStoreDefs, batchFinalCluster, batchFinalStoreDefs, filteredRebalancePartitionPlanList, hasReadOnlyStores, hasReadWriteStores, finishedReadOnlyPhase); // STEP 4 - Move RW data if(hasReadWriteStores) { proxyPause(); RebalanceBatchPlanProgressBar progressBar = batchPlan.getProgressBar(batchId); executeSubBatch(batchId, progressBar, batchCurrentCluster, batchCurrentStoreDefs, filteredRebalancePartitionPlanList, hasReadOnlyStores, hasReadWriteStores, finishedReadOnlyPhase); } RebalanceUtils.printBatchLog(batchId, logger, "Successfully terminated batch " + batchId + "."); } catch(Exception e) { RebalanceUtils.printErrorLog(batchId, logger, "Error in batch " + batchId + " - " + e.getMessage(), e); throw new VoldemortException("Rebalance failed on batch " + batchId, e); } }
java
{ "resource": "" }
q4656
RebalanceController.proxyPause
train
private void proxyPause() { logger.info("Pausing after cluster state has changed to allow proxy bridges to be established. " + "Will start rebalancing work on servers in " + proxyPauseSec + " seconds."); try { Thread.sleep(TimeUnit.SECONDS.toMillis(proxyPauseSec)); } catch(InterruptedException e) { logger.warn("Sleep interrupted in proxy pause."); } }
java
{ "resource": "" }
q4657
RebalanceController.executeSubBatch
train
private void executeSubBatch(final int batchId, RebalanceBatchPlanProgressBar progressBar, final Cluster batchRollbackCluster, final List<StoreDefinition> batchRollbackStoreDefs, final List<RebalanceTaskInfo> rebalanceTaskPlanList, boolean hasReadOnlyStores, boolean hasReadWriteStores, boolean finishedReadOnlyStores) { RebalanceUtils.printBatchLog(batchId, logger, "Submitting rebalance tasks "); // Get an ExecutorService in place used for submitting our tasks ExecutorService service = RebalanceUtils.createExecutors(maxParallelRebalancing); // Sub-list of the above list final List<RebalanceTask> failedTasks = Lists.newArrayList(); final List<RebalanceTask> incompleteTasks = Lists.newArrayList(); // Semaphores for donor nodes - To avoid multiple disk sweeps Map<Integer, Semaphore> donorPermits = new HashMap<Integer, Semaphore>(); for(Node node: batchRollbackCluster.getNodes()) { donorPermits.put(node.getId(), new Semaphore(1)); } try { // List of tasks which will run asynchronously List<RebalanceTask> allTasks = executeTasks(batchId, progressBar, service, rebalanceTaskPlanList, donorPermits); RebalanceUtils.printBatchLog(batchId, logger, "All rebalance tasks submitted"); // Wait and shutdown after (infinite) timeout RebalanceUtils.executorShutDown(service, Long.MAX_VALUE); RebalanceUtils.printBatchLog(batchId, logger, "Finished waiting for executors"); // Collects all failures + incomplete tasks from the rebalance // tasks. List<Exception> failures = Lists.newArrayList(); for(RebalanceTask task: allTasks) { if(task.hasException()) { failedTasks.add(task); failures.add(task.getError()); } else if(!task.isComplete()) { incompleteTasks.add(task); } } if(failedTasks.size() > 0) { throw new VoldemortRebalancingException("Rebalance task terminated unsuccessfully on tasks " + failedTasks, failures); } // If there were no failures, then we could have had a genuine // timeout ( Rebalancing took longer than the operator expected ). // We should throw a VoldemortException and not a // VoldemortRebalancingException ( which will start reverting // metadata ). The operator may want to manually then resume the // process. if(incompleteTasks.size() > 0) { throw new VoldemortException("Rebalance tasks are still incomplete / running " + incompleteTasks); } } catch(VoldemortRebalancingException e) { logger.error("Failure while migrating partitions for rebalance task " + batchId); if(hasReadOnlyStores && hasReadWriteStores && finishedReadOnlyStores) { // Case 0 adminClient.rebalanceOps.rebalanceStateChange(null, batchRollbackCluster, null, batchRollbackStoreDefs, null, true, true, false, false, false); } else if(hasReadWriteStores && finishedReadOnlyStores) { // Case 4 adminClient.rebalanceOps.rebalanceStateChange(null, batchRollbackCluster, null, batchRollbackStoreDefs, null, false, true, false, false, false); } throw e; } finally { if(!service.isShutdown()) { RebalanceUtils.printErrorLog(batchId, logger, "Could not shutdown service cleanly for rebalance task " + batchId, null); service.shutdownNow(); } } }
java
{ "resource": "" }
q4658
ConsistencyCheck.determineConsistency
train
public static ConsistencyLevel determineConsistency(Map<Value, Set<ClusterNode>> versionNodeSetMap, int replicationFactor) { boolean fullyConsistent = true; Value latestVersion = null; for (Map.Entry<Value, Set<ClusterNode>> versionNodeSetEntry : versionNodeSetMap.entrySet()) { Value value = versionNodeSetEntry.getKey(); if (latestVersion == null) { latestVersion = value; } else if (value.isTimeStampLaterThan(latestVersion)) { latestVersion = value; } Set<ClusterNode> nodeSet = versionNodeSetEntry.getValue(); fullyConsistent = fullyConsistent && (nodeSet.size() == replicationFactor); } if (fullyConsistent) { return ConsistencyLevel.FULL; } else { // latest write consistent, effectively consistent if (latestVersion != null && versionNodeSetMap.get(latestVersion).size() == replicationFactor) { return ConsistencyLevel.LATEST_CONSISTENT; } // all other states inconsistent return ConsistencyLevel.INCONSISTENT; } }
java
{ "resource": "" }
q4659
ConsistencyCheck.cleanIneligibleKeys
train
public static void cleanIneligibleKeys(Map<ByteArray, Map<Value, Set<ClusterNode>>> keyVersionNodeSetMap, int requiredWrite) { Set<ByteArray> keysToDelete = new HashSet<ByteArray>(); for (Map.Entry<ByteArray, Map<Value, Set<ClusterNode>>> entry : keyVersionNodeSetMap.entrySet()) { Set<Value> valuesToDelete = new HashSet<Value>(); ByteArray key = entry.getKey(); Map<Value, Set<ClusterNode>> valueNodeSetMap = entry.getValue(); // mark version for deletion if not enough writes for (Map.Entry<Value, Set<ClusterNode>> versionNodeSetEntry : valueNodeSetMap.entrySet()) { Set<ClusterNode> nodeSet = versionNodeSetEntry.getValue(); if (nodeSet.size() < requiredWrite) { valuesToDelete.add(versionNodeSetEntry.getKey()); } } // delete versions for (Value v : valuesToDelete) { valueNodeSetMap.remove(v); } // mark key for deletion if no versions left if (valueNodeSetMap.size() == 0) { keysToDelete.add(key); } } // delete keys for (ByteArray k : keysToDelete) { keyVersionNodeSetMap.remove(k); } }
java
{ "resource": "" }
q4660
ConsistencyCheck.keyVersionToString
train
public static String keyVersionToString(ByteArray key, Map<Value, Set<ClusterNode>> versionMap, String storeName, Integer partitionId) { StringBuilder record = new StringBuilder(); for (Map.Entry<Value, Set<ClusterNode>> versionSet : versionMap.entrySet()) { Value value = versionSet.getKey(); Set<ClusterNode> nodeSet = versionSet.getValue(); record.append("BAD_KEY,"); record.append(storeName + ","); record.append(partitionId + ","); record.append(ByteUtils.toHexString(key.get()) + ","); record.append(nodeSet.toString().replace(", ", ";") + ","); record.append(value.toString()); } return record.toString(); }
java
{ "resource": "" }
q4661
GetMetadataResponseSender.sendResponse
train
@Override public void sendResponse(StoreStats performanceStats, boolean isFromLocalZone, long startTimeInMs) throws Exception { ChannelBuffer responseContent = ChannelBuffers.dynamicBuffer(this.responseValue.length); responseContent.writeBytes(responseValue); // 1. Create the Response object HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); // 2. Set the right headers response.setHeader(CONTENT_TYPE, "binary"); response.setHeader(CONTENT_TRANSFER_ENCODING, "binary"); // 3. Copy the data into the payload response.setContent(responseContent); response.setHeader(CONTENT_LENGTH, response.getContent().readableBytes()); if(logger.isDebugEnabled()) { logger.debug("Response = " + response); } // Write the response to the Netty Channel this.messageEvent.getChannel().write(response); if(performanceStats != null && isFromLocalZone) { recordStats(performanceStats, startTimeInMs, Tracked.GET); } }
java
{ "resource": "" }
q4662
FailureDetectorConfig.setCluster
train
public FailureDetectorConfig setCluster(Cluster cluster) { Utils.notNull(cluster); this.cluster = cluster; /* * FIXME: this is the hacky way to refresh the admin connection * verifier, but it'll just work. The clean way to do so is to have a * centralized metadata management, and all references of cluster object * point to that. */ if(this.connectionVerifier instanceof AdminConnectionVerifier) { ((AdminConnectionVerifier) connectionVerifier).setCluster(cluster); } return this; }
java
{ "resource": "" }
q4663
FailureDetectorConfig.setNodes
train
@Deprecated public synchronized FailureDetectorConfig setNodes(Collection<Node> nodes) { Utils.notNull(nodes); this.nodes = new HashSet<Node>(nodes); return this; }
java
{ "resource": "" }
q4664
Cluster.hasNodeWithId
train
public boolean hasNodeWithId(int nodeId) { Node node = nodesById.get(nodeId); if(node == null) { return false; } return true; }
java
{ "resource": "" }
q4665
Cluster.cloneCluster
train
public static Cluster cloneCluster(Cluster cluster) { // Could add a better .clone() implementation that clones the derived // data structures. The constructor invoked by this clone implementation // can be slow for large numbers of partitions. Probably faster to copy // all the maps and stuff. return new Cluster(cluster.getName(), new ArrayList<Node>(cluster.getNodes()), new ArrayList<Zone>(cluster.getZones())); /*- * Historic "clone" code being kept in case this, for some reason, was the "right" way to be doing this. ClusterMapper mapper = new ClusterMapper(); return mapper.readCluster(new StringReader(mapper.writeCluster(cluster))); */ }
java
{ "resource": "" }
q4666
AdminClientPool.checkout
train
public AdminClient checkout() { if (isClosed.get()) { throw new IllegalStateException("Pool is closing"); } AdminClient client; // Try to get one from the Cache. while ((client = clientCache.poll()) != null) { if (!client.isClusterModified()) { return client; } else { // Cluster is Modified, after the AdminClient is created. Close it client.close(); } } // None is available, create new one. return createAdminClient(); }
java
{ "resource": "" }
q4667
AdminClientPool.checkin
train
public void checkin(AdminClient client) { if (isClosed.get()) { throw new IllegalStateException("Pool is closing"); } if (client == null) { throw new IllegalArgumentException("client is null"); } boolean isCheckedIn = clientCache.offer(client); if (!isCheckedIn) { // Cache is already full, close this AdminClient client.close(); } }
java
{ "resource": "" }
q4668
AdminClientPool.close
train
public void close() { boolean isPreviouslyClosed = isClosed.getAndSet(true); if (isPreviouslyClosed) { return; } AdminClient client; while ((client = clientCache.poll()) != null) { client.close(); } }
java
{ "resource": "" }
q4669
PartitionBalanceUtils.compressedListOfPartitionsInZone
train
public static String compressedListOfPartitionsInZone(final Cluster cluster, int zoneId) { Map<Integer, Integer> idToRunLength = PartitionBalanceUtils.getMapOfContiguousPartitions(cluster, zoneId); StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; Set<Integer> sortedInitPartitionIds = new TreeSet<Integer>(idToRunLength.keySet()); for(int initPartitionId: sortedInitPartitionIds) { if(!first) { sb.append(", "); } else { first = false; } int runLength = idToRunLength.get(initPartitionId); if(runLength == 1) { sb.append(initPartitionId); } else { int endPartitionId = (initPartitionId + runLength - 1) % cluster.getNumberOfPartitions(); sb.append(initPartitionId).append("-").append(endPartitionId); } } sb.append("]"); return sb.toString(); }
java
{ "resource": "" }
q4670
PartitionBalanceUtils.getMapOfContiguousPartitions
train
public static Map<Integer, Integer> getMapOfContiguousPartitions(final Cluster cluster, int zoneId) { List<Integer> partitionIds = new ArrayList<Integer>(cluster.getPartitionIdsInZone(zoneId)); Map<Integer, Integer> partitionIdToRunLength = Maps.newHashMap(); if(partitionIds.isEmpty()) { return partitionIdToRunLength; } int lastPartitionId = partitionIds.get(0); int initPartitionId = lastPartitionId; for(int offset = 1; offset < partitionIds.size(); offset++) { int partitionId = partitionIds.get(offset); if(partitionId == lastPartitionId + 1) { lastPartitionId = partitionId; continue; } int runLength = lastPartitionId - initPartitionId + 1; partitionIdToRunLength.put(initPartitionId, runLength); initPartitionId = partitionId; lastPartitionId = initPartitionId; } int runLength = lastPartitionId - initPartitionId + 1; if(lastPartitionId == cluster.getNumberOfPartitions() - 1 && partitionIdToRunLength.containsKey(0)) { // special case of contiguity that wraps around the ring. partitionIdToRunLength.put(initPartitionId, runLength + partitionIdToRunLength.get(0)); partitionIdToRunLength.remove(0); } else { partitionIdToRunLength.put(initPartitionId, runLength); } return partitionIdToRunLength; }
java
{ "resource": "" }
q4671
PartitionBalanceUtils.getMapOfContiguousPartitionRunLengths
train
public static Map<Integer, Integer> getMapOfContiguousPartitionRunLengths(final Cluster cluster, int zoneId) { Map<Integer, Integer> idToRunLength = getMapOfContiguousPartitions(cluster, zoneId); Map<Integer, Integer> runLengthToCount = Maps.newHashMap(); if(idToRunLength.isEmpty()) { return runLengthToCount; } for(int runLength: idToRunLength.values()) { if(!runLengthToCount.containsKey(runLength)) { runLengthToCount.put(runLength, 0); } runLengthToCount.put(runLength, runLengthToCount.get(runLength) + 1); } return runLengthToCount; }
java
{ "resource": "" }
q4672
PartitionBalanceUtils.getPrettyMapOfContiguousPartitionRunLengths
train
public static String getPrettyMapOfContiguousPartitionRunLengths(final Cluster cluster, int zoneId) { Map<Integer, Integer> runLengthToCount = getMapOfContiguousPartitionRunLengths(cluster, zoneId); String prettyHistogram = "["; boolean first = true; Set<Integer> runLengths = new TreeSet<Integer>(runLengthToCount.keySet()); for(int runLength: runLengths) { if(first) { first = false; } else { prettyHistogram += ", "; } prettyHistogram += "{" + runLength + " : " + runLengthToCount.get(runLength) + "}"; } prettyHistogram += "]"; return prettyHistogram; }
java
{ "resource": "" }
q4673
PartitionBalanceUtils.getHotPartitionsDueToContiguity
train
public static String getHotPartitionsDueToContiguity(final Cluster cluster, int hotContiguityCutoff) { StringBuilder sb = new StringBuilder(); for(int zoneId: cluster.getZoneIds()) { Map<Integer, Integer> idToRunLength = getMapOfContiguousPartitions(cluster, zoneId); for(Integer initialPartitionId: idToRunLength.keySet()) { int runLength = idToRunLength.get(initialPartitionId); if(runLength < hotContiguityCutoff) continue; int hotPartitionId = (initialPartitionId + runLength) % cluster.getNumberOfPartitions(); Node hotNode = cluster.getNodeForPartitionId(hotPartitionId); sb.append("\tNode " + hotNode.getId() + " (" + hotNode.getHost() + ") has hot primary partition " + hotPartitionId + " that follows contiguous run of length " + runLength + Utils.NEWLINE); } } return sb.toString(); }
java
{ "resource": "" }
q4674
PartitionBalanceUtils.analyzeInvalidMetadataRate
train
public static String analyzeInvalidMetadataRate(final Cluster currentCluster, List<StoreDefinition> currentStoreDefs, final Cluster finalCluster, List<StoreDefinition> finalStoreDefs) { StringBuilder sb = new StringBuilder(); sb.append("Dump of invalid metadata rates per zone").append(Utils.NEWLINE); HashMap<StoreDefinition, Integer> uniqueStores = StoreDefinitionUtils.getUniqueStoreDefinitionsWithCounts(currentStoreDefs); for(StoreDefinition currentStoreDef: uniqueStores.keySet()) { sb.append("Store exemplar: " + currentStoreDef.getName()) .append(Utils.NEWLINE) .append("\tThere are " + uniqueStores.get(currentStoreDef) + " other similar stores.") .append(Utils.NEWLINE); StoreRoutingPlan currentSRP = new StoreRoutingPlan(currentCluster, currentStoreDef); StoreDefinition finalStoreDef = StoreUtils.getStoreDef(finalStoreDefs, currentStoreDef.getName()); StoreRoutingPlan finalSRP = new StoreRoutingPlan(finalCluster, finalStoreDef); // Only care about existing zones for(int zoneId: currentCluster.getZoneIds()) { int zonePrimariesCount = 0; int invalidMetadata = 0; // Examine nodes in current cluster in existing zone. for(int nodeId: currentCluster.getNodeIdsInZone(zoneId)) { // For every zone-primary in current cluster for(int zonePrimaryPartitionId: currentSRP.getZonePrimaryPartitionIds(nodeId)) { zonePrimariesCount++; // Determine if original zone-primary node is still some // form of n-ary in final cluster. If not, // InvalidMetadataException will fire. if(!finalSRP.getZoneNAryPartitionIds(nodeId) .contains(zonePrimaryPartitionId)) { invalidMetadata++; } } } float rate = invalidMetadata / (float) zonePrimariesCount; sb.append("\tZone " + zoneId) .append(" : total zone primaries " + zonePrimariesCount) .append(", # that trigger invalid metadata " + invalidMetadata) .append(" => " + rate) .append(Utils.NEWLINE); } } return sb.toString(); }
java
{ "resource": "" }
q4675
QueuedKeyedResourcePool.create
train
public static <K, V> QueuedKeyedResourcePool<K, V> create(ResourceFactory<K, V> factory, ResourcePoolConfig config) { return new QueuedKeyedResourcePool<K, V>(factory, config); }
java
{ "resource": "" }
q4676
QueuedKeyedResourcePool.create
train
public static <K, V> QueuedKeyedResourcePool<K, V> create(ResourceFactory<K, V> factory) { return create(factory, new ResourcePoolConfig()); }
java
{ "resource": "" }
q4677
QueuedKeyedResourcePool.internalNonBlockingGet
train
public V internalNonBlockingGet(K key) throws Exception { Pool<V> resourcePool = getResourcePoolForKey(key); return attemptNonBlockingCheckout(key, resourcePool); }
java
{ "resource": "" }
q4678
QueuedKeyedResourcePool.getNextUnexpiredResourceRequest
train
private AsyncResourceRequest<V> getNextUnexpiredResourceRequest(Queue<AsyncResourceRequest<V>> requestQueue) { AsyncResourceRequest<V> resourceRequest = requestQueue.poll(); while(resourceRequest != null) { if(resourceRequest.getDeadlineNs() < System.nanoTime()) { resourceRequest.handleTimeout(); resourceRequest = requestQueue.poll(); } else { break; } } return resourceRequest; }
java
{ "resource": "" }
q4679
QueuedKeyedResourcePool.processQueue
train
private boolean processQueue(K key) { Queue<AsyncResourceRequest<V>> requestQueue = getRequestQueueForKey(key); if(requestQueue.isEmpty()) { return false; } // Attempt to get a resource. Pool<V> resourcePool = getResourcePoolForKey(key); V resource = null; Exception ex = null; try { // Must attempt non-blocking checkout to ensure resources are // created for the pool. resource = attemptNonBlockingCheckout(key, resourcePool); } catch(Exception e) { destroyResource(key, resourcePool, resource); ex = e; resource = null; } // Neither we got a resource, nor an exception. So no requests can be // processed return if(resource == null && ex == null) { return false; } // With resource in hand, process the resource requests AsyncResourceRequest<V> resourceRequest = getNextUnexpiredResourceRequest(requestQueue); if(resourceRequest == null) { if(resource != null) { // Did not use the resource! Directly check in via super to // avoid // circular call to processQueue(). try { super.checkin(key, resource); } catch(Exception e) { logger.error("Exception checking in resource: ", e); } } else { // Poor exception, no request to tag this exception onto // drop it on the floor and continue as usual. } return false; } else { // We have a request here. if(resource != null) { resourceRequest.useResource(resource); } else { resourceRequest.handleException(ex); } return true; } }
java
{ "resource": "" }
q4680
QueuedKeyedResourcePool.checkin
train
@Override public void checkin(K key, V resource) { super.checkin(key, resource); // NB: Blocking checkout calls for synchronous requests get the resource // checked in above before processQueueLoop() attempts checkout below. // There is therefore a risk that asynchronous requests will be starved. processQueueLoop(key); }
java
{ "resource": "" }
q4681
QueuedKeyedResourcePool.destroyRequest
train
protected void destroyRequest(AsyncResourceRequest<V> resourceRequest) { if(resourceRequest != null) { try { // To hand control back to the owner of the // AsyncResourceRequest, treat "destroy" as an exception since // there is no resource to pass into useResource, and the // timeout has not expired. Exception e = new UnreachableStoreException("Client request was terminated while waiting in the queue."); resourceRequest.handleException(e); } catch(Exception ex) { logger.error("Exception while destroying resource request:", ex); } } }
java
{ "resource": "" }
q4682
QueuedKeyedResourcePool.destroyRequestQueue
train
private void destroyRequestQueue(Queue<AsyncResourceRequest<V>> requestQueue) { if(requestQueue != null) { AsyncResourceRequest<V> resourceRequest = requestQueue.poll(); while(resourceRequest != null) { destroyRequest(resourceRequest); resourceRequest = requestQueue.poll(); } } }
java
{ "resource": "" }
q4683
QueuedKeyedResourcePool.getRegisteredResourceRequestCount
train
public int getRegisteredResourceRequestCount(K key) { if(requestQueueMap.containsKey(key)) { Queue<AsyncResourceRequest<V>> requestQueue = getRequestQueueForExistingKey(key); // FYI: .size() is not constant time in the next call. ;) if(requestQueue != null) { return requestQueue.size(); } } return 0; }
java
{ "resource": "" }
q4684
QueuedKeyedResourcePool.getRegisteredResourceRequestCount
train
public int getRegisteredResourceRequestCount() { int count = 0; for(Entry<K, Queue<AsyncResourceRequest<V>>> entry: this.requestQueueMap.entrySet()) { // FYI: .size() is not constant time in the next call. ;) count += entry.getValue().size(); } return count; }
java
{ "resource": "" }
q4685
RebalanceScheduler.populateTasksByStealer
train
protected void populateTasksByStealer(List<StealerBasedRebalanceTask> sbTaskList) { // Setup mapping of stealers to work for this run. for(StealerBasedRebalanceTask task: sbTaskList) { if(task.getStealInfos().size() != 1) { throw new VoldemortException("StealerBasedRebalanceTasks should have a list of RebalancePartitionsInfo of length 1."); } RebalanceTaskInfo stealInfo = task.getStealInfos().get(0); int stealerId = stealInfo.getStealerId(); if(!this.tasksByStealer.containsKey(stealerId)) { this.tasksByStealer.put(stealerId, new ArrayList<StealerBasedRebalanceTask>()); } this.tasksByStealer.get(stealerId).add(task); } if(tasksByStealer.isEmpty()) { return; } // Shuffle order of each stealer's work list. This randomization // helps to get rid of any "patterns" in how rebalancing tasks were // added to the task list passed in. for(List<StealerBasedRebalanceTask> taskList: tasksByStealer.values()) { Collections.shuffle(taskList); } }
java
{ "resource": "" }
q4686
RebalanceScheduler.scheduleNextTask
train
protected synchronized StealerBasedRebalanceTask scheduleNextTask(boolean executeService) { // Make sure there is work left to do. if(doneSignal.getCount() == 0) { logger.info("All tasks completion signaled... returning"); return null; } // Limit number of tasks outstanding. if(this.numTasksExecuting >= maxParallelRebalancing) { logger.info("Executing more tasks than [" + this.numTasksExecuting + "] the parallel allowed " + maxParallelRebalancing); return null; } // Shuffle list of stealer IDs each time a new task to schedule needs to // be found. Randomizing the order should avoid prioritizing one // specific stealer's work ahead of all others. List<Integer> stealerIds = new ArrayList<Integer>(tasksByStealer.keySet()); Collections.shuffle(stealerIds); for(int stealerId: stealerIds) { if(nodeIdsWithWork.contains(stealerId)) { logger.info("Stealer " + stealerId + " is already working... continuing"); continue; } for(StealerBasedRebalanceTask sbTask: tasksByStealer.get(stealerId)) { int donorId = sbTask.getStealInfos().get(0).getDonorId(); if(nodeIdsWithWork.contains(donorId)) { logger.info("Stealer " + stealerId + " Donor " + donorId + " is already working... continuing"); continue; } // Book keeping addNodesToWorkerList(Arrays.asList(stealerId, donorId)); numTasksExecuting++; // Remove this task from list thus destroying list being // iterated over. This is safe because returning directly out of // this branch. tasksByStealer.get(stealerId).remove(sbTask); try { if(executeService) { logger.info("Stealer " + stealerId + " Donor " + donorId + " going to schedule work"); service.execute(sbTask); } } catch(RejectedExecutionException ree) { logger.error("Stealer " + stealerId + "Rebalancing task rejected by executor service.", ree); throw new VoldemortRebalancingException("Stealer " + stealerId + "Rebalancing task rejected by executor service."); } return sbTask; } } printRemainingTasks(stealerIds); return null; }
java
{ "resource": "" }
q4687
RebalanceScheduler.addNodesToWorkerList
train
public synchronized void addNodesToWorkerList(List<Integer> nodeIds) { // Bookkeeping for nodes that will be involved in the next task nodeIdsWithWork.addAll(nodeIds); logger.info("Node IDs with work: " + nodeIdsWithWork + " Newly added nodes " + nodeIds); }
java
{ "resource": "" }
q4688
RebalanceScheduler.doneTask
train
public synchronized void doneTask(int stealerId, int donorId) { removeNodesFromWorkerList(Arrays.asList(stealerId, donorId)); numTasksExecuting--; doneSignal.countDown(); // Try and schedule more tasks now that resources may be available to do // so. scheduleMoreTasks(); }
java
{ "resource": "" }
q4689
AggregatedBdbEnvironmentStats.collectLongMetric
train
private List<Long> collectLongMetric(String metricGetterName) { List<Long> vals = new ArrayList<Long>(); for(BdbEnvironmentStats envStats: environmentStatsTracked) { vals.add((Long) ReflectUtils.callMethod(envStats, BdbEnvironmentStats.class, metricGetterName, new Class<?>[0], new Object[0])); } return vals; }
java
{ "resource": "" }
q4690
ByteArray.toHexStrings
train
public static Iterable<String> toHexStrings(Iterable<ByteArray> arrays) { ArrayList<String> ret = new ArrayList<String>(); for(ByteArray array: arrays) ret.add(ByteUtils.toHexString(array.get())); return ret; }
java
{ "resource": "" }
q4691
GetResponseSender.sendResponse
train
@Override public void sendResponse(StoreStats performanceStats, boolean isFromLocalZone, long startTimeInMs) throws Exception { /* * Pay attention to the code below. Note that in this method we wrap a multiPart object with a mimeMessage. * However when writing to the outputStream we only send the multiPart object and not the entire * mimeMessage. This is intentional. * * In the earlier version of this code we used to create a multiPart object and just send that multiPart * across the wire. * * However, we later discovered that upon setting the content of a MimeBodyPart, JavaMail internally creates * a DataHandler object wrapping the object you passed in. The part's Content-Type header is not updated * immediately. In order to get the headers updated, one needs to to call MimeMessage.saveChanges() on the * enclosing message, which cascades down the MIME structure into a call to MimeBodyPart.updateHeaders() * on the body part. It's this updateHeaders call that transfers the content type from the * DataHandler to the part's MIME Content-Type header. * * To make sure that the Content-Type headers are being updated (without changing too much code), we decided * to wrap the multiPart in a mimeMessage, call mimeMessage.saveChanges() and then just send the multiPart. * This is to make sure multiPart's headers are updated accurately. */ MimeMessage message = new MimeMessage(Session.getDefaultInstance(new Properties())); MimeMultipart multiPart = new MimeMultipart(); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); String base64Key = RestUtils.encodeVoldemortKey(key.get()); String contentLocationKey = "/" + this.storeName + "/" + base64Key; for(Versioned<byte[]> versionedValue: versionedValues) { byte[] responseValue = versionedValue.getValue(); VectorClock vectorClock = (VectorClock) versionedValue.getVersion(); String eTag = RestUtils.getSerializedVectorClock(vectorClock); numVectorClockEntries += vectorClock.getVersionMap().size(); // Create the individual body part for each versioned value of the // requested key MimeBodyPart body = new MimeBodyPart(); try { // Add the right headers body.addHeader(CONTENT_TYPE, "application/octet-stream"); body.addHeader(CONTENT_TRANSFER_ENCODING, "binary"); body.addHeader(RestMessageHeaders.X_VOLD_VECTOR_CLOCK, eTag); body.setContent(responseValue, "application/octet-stream"); body.addHeader(RestMessageHeaders.CONTENT_LENGTH, Integer.toString(responseValue.length)); multiPart.addBodyPart(body); } catch(MessagingException me) { logger.error("Exception while constructing body part", me); outputStream.close(); throw me; } } message.setContent(multiPart); message.saveChanges(); try { multiPart.writeTo(outputStream); } catch(Exception e) { logger.error("Exception while writing multipart to output stream", e); outputStream.close(); throw e; } ChannelBuffer responseContent = ChannelBuffers.dynamicBuffer(); responseContent.writeBytes(outputStream.toByteArray()); // Create the Response object HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); // Set the right headers response.setHeader(CONTENT_TYPE, "multipart/binary"); response.setHeader(CONTENT_TRANSFER_ENCODING, "binary"); response.setHeader(CONTENT_LOCATION, contentLocationKey); // Copy the data into the payload response.setContent(responseContent); response.setHeader(CONTENT_LENGTH, response.getContent().readableBytes()); // Write the response to the Netty Channel if(logger.isDebugEnabled()) { String keyStr = RestUtils.getKeyHexString(this.key); debugLog("GET", this.storeName, keyStr, startTimeInMs, System.currentTimeMillis(), numVectorClockEntries); } this.messageEvent.getChannel().write(response); if(performanceStats != null && isFromLocalZone) { recordStats(performanceStats, startTimeInMs, Tracked.GET); } outputStream.close(); }
java
{ "resource": "" }
q4692
VoldemortConfig.getPublicConfigValue
train
public String getPublicConfigValue(String key) throws ConfigurationException { if (!allProps.containsKey(key)) { throw new UndefinedPropertyException("The requested config key does not exist."); } if (restrictedConfigs.contains(key)) { throw new ConfigurationException("The requested config key is not publicly available!"); } return allProps.get(key); }
java
{ "resource": "" }
q4693
QuotaLimitingStore.checkRateLimit
train
private void checkRateLimit(String quotaKey, Tracked trackedOp) { String quotaValue = null; try { if(!metadataStore.getQuotaEnforcingEnabledUnlocked()) { return; } quotaValue = quotaStore.cacheGet(quotaKey); // Store may not have any quotas if(quotaValue == null) { return; } // But, if it does float currentRate = getThroughput(trackedOp); float allowedRate = Float.parseFloat(quotaValue); // TODO the histogram should be reasonably accurate to do all // these things.. (ghost qps and all) // Report the current quota usage level quotaStats.reportQuotaUsed(trackedOp, Utils.safeGetPercentage(currentRate, allowedRate)); // check if we have exceeded rate. if(currentRate > allowedRate) { quotaStats.reportRateLimitedOp(trackedOp); throw new QuotaExceededException("Exceeded rate limit for " + quotaKey + ". Maximum allowed : " + allowedRate + " Current: " + currentRate); } } catch(NumberFormatException nfe) { // move on, if we cannot parse quota value properly logger.debug("Invalid formatting of quota value for key " + quotaKey + " : " + quotaValue); } }
java
{ "resource": "" }
q4694
AsyncOperationService.submitOperation
train
public synchronized void submitOperation(int requestId, AsyncOperation operation) { if(this.operations.containsKey(requestId)) throw new VoldemortException("Request " + requestId + " already submitted to the system"); this.operations.put(requestId, operation); scheduler.scheduleNow(operation); logger.debug("Handling async operation " + requestId); }
java
{ "resource": "" }
q4695
AsyncOperationService.isComplete
train
public synchronized boolean isComplete(int requestId, boolean remove) { if (!operations.containsKey(requestId)) throw new VoldemortException("No operation with id " + requestId + " found"); if (operations.get(requestId).getStatus().isComplete()) { if (logger.isDebugEnabled()) logger.debug("Operation complete " + requestId); if (remove) operations.remove(requestId); return true; } return false; }
java
{ "resource": "" }
q4696
AsyncOperationService.getStatus
train
@JmxOperation(description = "Retrieve operation status") public String getStatus(int id) { try { return getOperationStatus(id).toString(); } catch(VoldemortException e) { return "No operation with id " + id + " found"; } }
java
{ "resource": "" }
q4697
AsyncOperationService.getAsyncOperationList
train
public List<Integer> getAsyncOperationList(boolean showCompleted) { /** * Create a copy using an immutable set to avoid a * {@link java.util.ConcurrentModificationException} */ Set<Integer> keySet = ImmutableSet.copyOf(operations.keySet()); if(showCompleted) return new ArrayList<Integer>(keySet); List<Integer> keyList = new ArrayList<Integer>(); for(int key: keySet) { AsyncOperation operation = operations.get(key); if(operation != null && !operation.getStatus().isComplete()) keyList.add(key); } return keyList; }
java
{ "resource": "" }
q4698
AsyncOperationService.stopAsyncOperation
train
@JmxOperation public String stopAsyncOperation(int requestId) { try { stopOperation(requestId); } catch(VoldemortException e) { return e.getMessage(); } return "Stopping operation " + requestId; }
java
{ "resource": "" }
q4699
RetentionEnforcingStore.updateStoreDefinition
train
@Override public void updateStoreDefinition(StoreDefinition storeDef) { this.storeDef = storeDef; if(storeDef.hasRetentionPeriod()) this.retentionTimeMs = storeDef.getRetentionDays() * Time.MS_PER_DAY; }
java
{ "resource": "" }