_id
stringlengths 2
7
| title
stringlengths 3
140
| partition
stringclasses 3
values | text
stringlengths 73
34.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q4400
|
CoordinatorConfig.setProperties
|
train
|
private void setProperties(Properties properties) {
Props props = new Props(properties);
if(props.containsKey(BOOTSTRAP_URLS_PROPERTY)) {
setBootstrapURLs(props.getList(BOOTSTRAP_URLS_PROPERTY));
}
if(props.containsKey(FAT_CLIENTS_CONFIG_SOURCE)) {
setFatClientConfigSource(StoreClientConfigSource.get(props.getString(FAT_CLIENTS_CONFIG_SOURCE)));
}
if(props.containsKey(FAT_CLIENTS_CONFIG_FILE_PATH_PROPERTY)) {
setFatClientConfigPath(props.getString(FAT_CLIENTS_CONFIG_FILE_PATH_PROPERTY));
}
if(props.containsKey(METADATA_CHECK_INTERVAL_IN_MS)) {
setMetadataCheckIntervalInMs(props.getInt(METADATA_CHECK_INTERVAL_IN_MS));
}
if(props.containsKey(NETTY_SERVER_PORT)) {
setServerPort(props.getInt(NETTY_SERVER_PORT));
}
if(props.containsKey(NETTY_SERVER_BACKLOG)) {
setNettyServerBacklog(props.getInt(NETTY_SERVER_BACKLOG));
}
if(props.containsKey(COORDINATOR_CORE_THREADS)) {
setCoordinatorCoreThreads(props.getInt(COORDINATOR_CORE_THREADS));
}
if(props.containsKey(COORDINATOR_MAX_THREADS)) {
setCoordinatorMaxThreads(props.getInt(COORDINATOR_MAX_THREADS));
}
if(props.containsKey(COORDINATOR_QUEUED_REQUESTS)) {
setCoordinatorQueuedRequestsSize(props.getInt(COORDINATOR_QUEUED_REQUESTS));
}
if(props.containsKey(HTTP_MESSAGE_DECODER_MAX_INITIAL_LINE_LENGTH)) {
setHttpMessageDecoderMaxInitialLength(props.getInt(HTTP_MESSAGE_DECODER_MAX_INITIAL_LINE_LENGTH));
}
if(props.containsKey(HTTP_MESSAGE_DECODER_MAX_HEADER_SIZE)) {
setHttpMessageDecoderMaxHeaderSize(props.getInt(HTTP_MESSAGE_DECODER_MAX_HEADER_SIZE));
}
if(props.containsKey(HTTP_MESSAGE_DECODER_MAX_CHUNK_SIZE)) {
setHttpMessageDecoderMaxChunkSize(props.getInt(HTTP_MESSAGE_DECODER_MAX_CHUNK_SIZE));
}
if(props.containsKey(ADMIN_ENABLE)) {
setAdminServiceEnabled(props.getBoolean(ADMIN_ENABLE));
}
if(props.containsKey(ADMIN_PORT)) {
setAdminPort(props.getInt(ADMIN_PORT));
}
}
|
java
|
{
"resource": ""
}
|
q4401
|
CoordinatorConfig.setBootstrapURLs
|
train
|
public CoordinatorConfig setBootstrapURLs(List<String> bootstrapUrls) {
this.bootstrapURLs = Utils.notNull(bootstrapUrls);
if(this.bootstrapURLs.size() <= 0)
throw new IllegalArgumentException("Must provide at least one bootstrap URL.");
return this;
}
|
java
|
{
"resource": ""
}
|
q4402
|
HintedHandoff.sendOneAsyncHint
|
train
|
private void sendOneAsyncHint(final ByteArray slopKey,
final Versioned<byte[]> slopVersioned,
final List<Node> nodesToTry) {
Node nodeToHostHint = null;
boolean foundNode = false;
while(nodesToTry.size() > 0) {
nodeToHostHint = nodesToTry.remove(0);
if(!failedNodes.contains(nodeToHostHint) && failureDetector.isAvailable(nodeToHostHint)) {
foundNode = true;
break;
}
}
if(!foundNode) {
Slop slop = slopSerializer.toObject(slopVersioned.getValue());
logger.error("Trying to send an async hint but used up all nodes. key: "
+ slop.getKey() + " version: " + slopVersioned.getVersion().toString());
return;
}
final Node node = nodeToHostHint;
int nodeId = node.getId();
NonblockingStore nonblockingStore = nonblockingSlopStores.get(nodeId);
Utils.notNull(nonblockingStore);
final Long startNs = System.nanoTime();
NonblockingStoreCallback callback = new NonblockingStoreCallback() {
@Override
public void requestComplete(Object result, long requestTime) {
Slop slop = null;
boolean loggerDebugEnabled = logger.isDebugEnabled();
if(loggerDebugEnabled) {
slop = slopSerializer.toObject(slopVersioned.getValue());
}
Response<ByteArray, Object> response = new Response<ByteArray, Object>(node,
slopKey,
result,
requestTime);
if(response.getValue() instanceof Exception
&& !(response.getValue() instanceof ObsoleteVersionException)) {
if(!failedNodes.contains(node))
failedNodes.add(node);
if(response.getValue() instanceof UnreachableStoreException) {
UnreachableStoreException use = (UnreachableStoreException) response.getValue();
if(loggerDebugEnabled) {
logger.debug("Write of key " + slop.getKey() + " for "
+ slop.getNodeId() + " to node " + node
+ " failed due to unreachable: " + use.getMessage());
}
failureDetector.recordException(node, (System.nanoTime() - startNs)
/ Time.NS_PER_MS, use);
}
sendOneAsyncHint(slopKey, slopVersioned, nodesToTry);
}
if(loggerDebugEnabled)
logger.debug("Slop write of key " + slop.getKey() + " for node "
+ slop.getNodeId() + " to node " + node + " succeeded in "
+ (System.nanoTime() - startNs) + " ns");
failureDetector.recordSuccess(node, (System.nanoTime() - startNs) / Time.NS_PER_MS);
}
};
nonblockingStore.submitPutRequest(slopKey, slopVersioned, null, callback, timeoutMs);
}
|
java
|
{
"resource": ""
}
|
q4403
|
ExternalSorter.sorted
|
train
|
public Iterable<V> sorted(Iterator<V> input) {
ExecutorService executor = new ThreadPoolExecutor(this.numThreads,
this.numThreads,
1000L,
TimeUnit.MILLISECONDS,
new SynchronousQueue<Runnable>(),
new CallerRunsPolicy());
final AtomicInteger count = new AtomicInteger(0);
final List<File> tempFiles = Collections.synchronizedList(new ArrayList<File>());
while(input.hasNext()) {
final int segmentId = count.getAndIncrement();
final long segmentStartMs = System.currentTimeMillis();
logger.info("Segment " + segmentId + ": filling sort buffer for segment...");
@SuppressWarnings("unchecked")
final V[] buffer = (V[]) new Object[internalSortSize];
int segmentSizeIter = 0;
for(; segmentSizeIter < internalSortSize && input.hasNext(); segmentSizeIter++)
buffer[segmentSizeIter] = input.next();
final int segmentSize = segmentSizeIter;
logger.info("Segment " + segmentId + ": sort buffer filled...adding to sort queue.");
// sort and write out asynchronously
executor.execute(new Runnable() {
public void run() {
logger.info("Segment " + segmentId + ": sorting buffer.");
long start = System.currentTimeMillis();
Arrays.sort(buffer, 0, segmentSize, comparator);
long elapsed = System.currentTimeMillis() - start;
logger.info("Segment " + segmentId + ": sort completed in " + elapsed
+ " ms, writing to temp file.");
// write out values to a temp file
try {
File tempFile = File.createTempFile("segment-", ".dat", tempDir);
tempFile.deleteOnExit();
tempFiles.add(tempFile);
OutputStream os = new BufferedOutputStream(new FileOutputStream(tempFile),
bufferSize);
if(gzip)
os = new GZIPOutputStream(os);
DataOutputStream output = new DataOutputStream(os);
for(int i = 0; i < segmentSize; i++)
writeValue(output, buffer[i]);
output.close();
} catch(IOException e) {
throw new VoldemortException(e);
}
long segmentElapsed = System.currentTimeMillis() - segmentStartMs;
logger.info("Segment " + segmentId + ": completed processing of segment in "
+ segmentElapsed + " ms.");
}
});
}
// wait for all sorting to complete
executor.shutdown();
try {
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
// create iterator over sorted values
return new DefaultIterable<V>(new ExternalSorterIterator(tempFiles, bufferSize
/ tempFiles.size()));
} catch(InterruptedException e) {
throw new RuntimeException(e);
}
}
|
java
|
{
"resource": ""
}
|
q4404
|
PerformParallelPutRequests.processResponse
|
train
|
private void processResponse(Response<ByteArray, Object> response, Pipeline pipeline) {
if(response == null) {
logger.warn("RoutingTimedout on waiting for async ops; parallelResponseToWait: "
+ numNodesPendingResponse + "; preferred-1: " + (preferred - 1)
+ "; quorumOK: " + quorumSatisfied + "; zoneOK: " + zonesSatisfied);
} else {
numNodesPendingResponse = numNodesPendingResponse - 1;
numResponsesGot = numResponsesGot + 1;
if(response.getValue() instanceof Exception
&& !(response.getValue() instanceof ObsoleteVersionException)) {
if(logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} handling async put error");
}
if(response.getValue() instanceof QuotaExceededException) {
/**
* TODO Not sure if we need to count this Exception for
* stats or silently ignore and just log a warning. While
* QuotaExceededException thrown from other places mean the
* operation failed, this one does not fail the operation
* but instead stores slops. Introduce a new Exception in
* client side to just monitor how mamy Async writes fail on
* exceeding Quota?
*
*/
if(logger.isDebugEnabled()) {
logger.debug("Received quota exceeded exception after a successful "
+ pipeline.getOperation().getSimpleName() + " call on node "
+ response.getNode().getId() + ", store '"
+ pipelineData.getStoreName() + "', master-node '"
+ pipelineData.getMaster().getId() + "'");
}
} else if(handleResponseError(response, pipeline, failureDetector)) {
if(logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key
+ "} severe async put error, exiting parallel put stage");
}
return;
}
if(PipelineRoutedStore.isSlopableFailure(response.getValue())
|| response.getValue() instanceof QuotaExceededException) {
/**
* We want to slop ParallelPuts which fail due to
* QuotaExceededException.
*
* TODO Though this is not the right way of doing things, in
* order to avoid inconsistencies and data loss, we chose to
* slop the quota failed parallel puts.
*
* As a long term solution - 1) either Quota management
* should be hidden completely in a routing layer like
* Coordinator or 2) the Server should be able to
* distinguish between serial and parallel puts and should
* only quota for serial puts
*
*/
pipelineData.getSynchronizer().tryDelegateSlop(response.getNode());
}
if(logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} handled async put error");
}
} else {
pipelineData.incrementSuccesses();
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
pipelineData.getZoneResponses().add(response.getNode().getZoneId());
}
}
}
|
java
|
{
"resource": ""
}
|
q4405
|
PerformParallelPutRequests.isZonesSatisfied
|
train
|
private boolean isZonesSatisfied() {
boolean zonesSatisfied = false;
if(pipelineData.getZonesRequired() == null) {
zonesSatisfied = true;
} else {
int numZonesSatisfied = pipelineData.getZoneResponses().size();
if(numZonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
zonesSatisfied = true;
}
}
return zonesSatisfied;
}
|
java
|
{
"resource": ""
}
|
q4406
|
ClientConfig.setIdleConnectionTimeout
|
train
|
public ClientConfig setIdleConnectionTimeout(long idleConnectionTimeout, TimeUnit unit) {
if (idleConnectionTimeout <= 0) {
this.idleConnectionTimeoutMs = -1;
} else {
if(unit.toMinutes(idleConnectionTimeout) < 10) {
throw new IllegalArgumentException("idleConnectionTimeout should be minimum of 10 minutes");
}
this.idleConnectionTimeoutMs = unit.toMillis(idleConnectionTimeout);
}
return this;
}
|
java
|
{
"resource": ""
}
|
q4407
|
ClientConfig.setNodeBannagePeriod
|
train
|
@Deprecated
public ClientConfig setNodeBannagePeriod(int nodeBannagePeriod, TimeUnit unit) {
this.failureDetectorBannagePeriod = unit.toMillis(nodeBannagePeriod);
return this;
}
|
java
|
{
"resource": ""
}
|
q4408
|
ClientConfig.setThreadIdleTime
|
train
|
@Deprecated
public ClientConfig setThreadIdleTime(long threadIdleTime, TimeUnit unit) {
this.threadIdleMs = unit.toMillis(threadIdleTime);
return this;
}
|
java
|
{
"resource": ""
}
|
q4409
|
ClientRequestExecutorPool.close
|
train
|
@Override
public void close(SocketDestination destination) {
factory.setLastClosedTimestamp(destination);
queuedPool.reset(destination);
}
|
java
|
{
"resource": ""
}
|
q4410
|
ClientRequestExecutorPool.close
|
train
|
@Override
public void close() {
// unregister MBeans
if(stats != null) {
try {
if(this.jmxEnabled)
JmxUtils.unregisterMbean(getAggregateMetricName());
} catch(Exception e) {}
stats.close();
}
factory.close();
queuedPool.close();
}
|
java
|
{
"resource": ""
}
|
q4411
|
HadoopStoreBuilderUtils.readFileContents
|
train
|
public static String readFileContents(FileSystem fs, Path path, int bufferSize)
throws IOException {
if(bufferSize <= 0)
return new String();
FSDataInputStream input = fs.open(path);
byte[] buffer = new byte[bufferSize];
ByteArrayOutputStream stream = new ByteArrayOutputStream();
while(true) {
int read = input.read(buffer);
if(read < 0) {
break;
} else {
buffer = ByteUtils.copy(buffer, 0, read);
}
stream.write(buffer);
}
return new String(stream.toByteArray());
}
|
java
|
{
"resource": ""
}
|
q4412
|
HadoopStoreBuilderUtils.getDataChunkFiles
|
train
|
public static FileStatus[] getDataChunkFiles(FileSystem fs,
Path path,
final int partitionId,
final int replicaType) throws IOException {
return fs.listStatus(path, new PathFilter() {
public boolean accept(Path input) {
if(input.getName().matches("^" + Integer.toString(partitionId) + "_"
+ Integer.toString(replicaType) + "_[\\d]+\\.data")) {
return true;
} else {
return false;
}
}
});
}
|
java
|
{
"resource": ""
}
|
q4413
|
PerformSerialRequests.isSatisfied
|
train
|
private boolean isSatisfied() {
if(pipelineData.getZonesRequired() != null) {
return ((pipelineData.getSuccesses() >= required) && (pipelineData.getZoneResponses()
.size() >= (pipelineData.getZonesRequired() + 1)));
} else {
return pipelineData.getSuccesses() >= required;
}
}
|
java
|
{
"resource": ""
}
|
q4414
|
RebalanceBatchPlan.getCrossZonePartitionStoreMoves
|
train
|
public int getCrossZonePartitionStoreMoves() {
int xzonePartitionStoreMoves = 0;
for (RebalanceTaskInfo info : batchPlan) {
Node donorNode = finalCluster.getNodeById(info.getDonorId());
Node stealerNode = finalCluster.getNodeById(info.getStealerId());
if(donorNode.getZoneId() != stealerNode.getZoneId()) {
xzonePartitionStoreMoves += info.getPartitionStoreMoves();
}
}
return xzonePartitionStoreMoves;
}
|
java
|
{
"resource": ""
}
|
q4415
|
RebalanceBatchPlan.getDonorId
|
train
|
protected int getDonorId(StoreRoutingPlan currentSRP,
StoreRoutingPlan finalSRP,
int stealerZoneId,
int stealerNodeId,
int stealerPartitionId) {
int stealerZoneNAry = finalSRP.getZoneNaryForNodesPartition(stealerZoneId,
stealerNodeId,
stealerPartitionId);
int donorZoneId;
if(currentSRP.zoneNAryExists(stealerZoneId, stealerZoneNAry, stealerPartitionId)) {
// Steal from local n-ary (since one exists).
donorZoneId = stealerZoneId;
} else {
// Steal from zone that hosts primary partition Id.
int currentMasterNodeId = currentSRP.getNodeIdForPartitionId(stealerPartitionId);
donorZoneId = currentCluster.getNodeById(currentMasterNodeId).getZoneId();
}
return currentSRP.getNodeIdForZoneNary(donorZoneId, stealerZoneNAry, stealerPartitionId);
}
|
java
|
{
"resource": ""
}
|
q4416
|
RestErrorHandler.handleExceptions
|
train
|
protected void handleExceptions(MessageEvent messageEvent, Exception exception) {
logger.error("Unknown exception. Internal Server Error.", exception);
writeErrorResponse(messageEvent,
HttpResponseStatus.INTERNAL_SERVER_ERROR,
"Internal Server Error");
}
|
java
|
{
"resource": ""
}
|
q4417
|
RestErrorHandler.writeErrorResponse
|
train
|
public static void writeErrorResponse(MessageEvent messageEvent,
HttpResponseStatus status,
String message) {
// Create the Response object
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, status);
response.setHeader(CONTENT_TYPE, "text/plain; charset=UTF-8");
response.setContent(ChannelBuffers.copiedBuffer("Failure: " + status.toString() + ". "
+ message + "\r\n", CharsetUtil.UTF_8));
response.setHeader(CONTENT_LENGTH, response.getContent().readableBytes());
// Write the response to the Netty Channel
messageEvent.getChannel().write(response);
}
|
java
|
{
"resource": ""
}
|
q4418
|
RestPutRequestValidator.parseAndValidateRequest
|
train
|
@Override
public boolean parseAndValidateRequest() {
boolean result = false;
if(!super.parseAndValidateRequest() || !hasVectorClock(this.isVectorClockOptional)
|| !hasContentLength() || !hasContentType()) {
result = false;
} else {
result = true;
}
return result;
}
|
java
|
{
"resource": ""
}
|
q4419
|
RestPutRequestValidator.hasContentLength
|
train
|
protected boolean hasContentLength() {
boolean result = false;
String contentLength = this.request.getHeader(RestMessageHeaders.CONTENT_LENGTH);
if(contentLength != null) {
try {
Long.parseLong(contentLength);
result = true;
} catch(NumberFormatException nfe) {
logger.error("Exception when validating put request. Incorrect content length parameter. Cannot parse this to long: "
+ contentLength + ". Details: " + nfe.getMessage(),
nfe);
RestErrorHandler.writeErrorResponse(this.messageEvent,
HttpResponseStatus.BAD_REQUEST,
"Incorrect content length parameter. Cannot parse this to long: "
+ contentLength + ". Details: "
+ nfe.getMessage());
}
} else {
logger.error("Error when validating put request. Missing Content-Length header.");
RestErrorHandler.writeErrorResponse(this.messageEvent,
HttpResponseStatus.BAD_REQUEST,
"Missing Content-Length header");
}
return result;
}
|
java
|
{
"resource": ""
}
|
q4420
|
RestPutRequestValidator.hasContentType
|
train
|
protected boolean hasContentType() {
boolean result = false;
if(this.request.getHeader(RestMessageHeaders.CONTENT_TYPE) != null) {
result = true;
} else {
logger.error("Error when validating put request. Missing Content-Type header.");
RestErrorHandler.writeErrorResponse(this.messageEvent,
HttpResponseStatus.BAD_REQUEST,
"Missing Content-Type header");
}
return result;
}
|
java
|
{
"resource": ""
}
|
q4421
|
RestPutRequestValidator.parseValue
|
train
|
private void parseValue() {
ChannelBuffer content = this.request.getContent();
this.parsedValue = new byte[content.capacity()];
content.readBytes(parsedValue);
}
|
java
|
{
"resource": ""
}
|
q4422
|
HdfsFailedFetchLock.handleIOException
|
train
|
private void handleIOException(IOException e, String action, int attempt)
throws VoldemortException, InterruptedException {
if ( // any of the following happens, we need to bubble up
// FileSystem instance got closed, somehow
e.getMessage().contains("Filesystem closed") ||
// HDFS permission issues
ExceptionUtils.recursiveClassEquals(e, AccessControlException.class)) {
throw new VoldemortException("Got an IOException we cannot recover from while trying to " +
action + ". Attempt # " + attempt + "/" + maxAttempts + ". Will not try again.", e);
} else {
logFailureAndWait(action, IO_EXCEPTION, attempt, e);
}
}
|
java
|
{
"resource": ""
}
|
q4423
|
AbstractFailureDetector.setAvailable
|
train
|
private boolean setAvailable(NodeStatus nodeStatus, boolean isAvailable) {
synchronized(nodeStatus) {
boolean previous = nodeStatus.isAvailable();
nodeStatus.setAvailable(isAvailable);
nodeStatus.setLastChecked(getConfig().getTime().getMilliseconds());
return previous;
}
}
|
java
|
{
"resource": ""
}
|
q4424
|
AdminParserUtils.acceptsDir
|
train
|
public static void acceptsDir(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_D, OPT_DIR), "directory path for input/output")
.withRequiredArg()
.describedAs("dir-path")
.ofType(String.class);
}
|
java
|
{
"resource": ""
}
|
q4425
|
AdminParserUtils.acceptsFile
|
train
|
public static void acceptsFile(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_F, OPT_FILE), "file path for input/output")
.withRequiredArg()
.describedAs("file-path")
.ofType(String.class);
}
|
java
|
{
"resource": ""
}
|
q4426
|
AdminParserUtils.acceptsFormat
|
train
|
public static void acceptsFormat(OptionParser parser) {
parser.accepts(OPT_FORMAT, "format of key or entry, could be hex, json or binary")
.withRequiredArg()
.describedAs("hex | json | binary")
.ofType(String.class);
}
|
java
|
{
"resource": ""
}
|
q4427
|
AdminParserUtils.acceptsNodeSingle
|
train
|
public static void acceptsNodeSingle(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_N, OPT_NODE), "node id")
.withRequiredArg()
.describedAs("node-id")
.ofType(Integer.class);
}
|
java
|
{
"resource": ""
}
|
q4428
|
AdminParserUtils.acceptsUrl
|
train
|
public static void acceptsUrl(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_U, OPT_URL), "bootstrap url")
.withRequiredArg()
.describedAs("url")
.ofType(String.class);
}
|
java
|
{
"resource": ""
}
|
q4429
|
AdminParserUtils.acceptsZone
|
train
|
public static void acceptsZone(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_Z, OPT_ZONE), "zone id")
.withRequiredArg()
.describedAs("zone-id")
.ofType(Integer.class);
}
|
java
|
{
"resource": ""
}
|
q4430
|
AdminParserUtils.acceptsHex
|
train
|
public static void acceptsHex(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_X, OPT_HEX), "fetch key/entry by key value of hex type")
.withRequiredArg()
.describedAs("key-list")
.withValuesSeparatedBy(',')
.ofType(String.class);
}
|
java
|
{
"resource": ""
}
|
q4431
|
AdminParserUtils.acceptsJson
|
train
|
public static void acceptsJson(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_J, OPT_JSON),
"fetch key/entry by key value of json type")
.withRequiredArg()
.describedAs("key-list")
.withValuesSeparatedBy(',')
.ofType(String.class);
}
|
java
|
{
"resource": ""
}
|
q4432
|
AdminParserUtils.acceptsNodeMultiple
|
train
|
public static void acceptsNodeMultiple(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_N, OPT_NODE), "node id list")
.withRequiredArg()
.describedAs("node-id-list")
.withValuesSeparatedBy(',')
.ofType(Integer.class);
}
|
java
|
{
"resource": ""
}
|
q4433
|
AdminParserUtils.acceptsPartition
|
train
|
public static void acceptsPartition(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_P, OPT_PARTITION), "partition id list")
.withRequiredArg()
.describedAs("partition-id-list")
.withValuesSeparatedBy(',')
.ofType(Integer.class);
}
|
java
|
{
"resource": ""
}
|
q4434
|
AdminParserUtils.checkRequired
|
train
|
public static void checkRequired(OptionSet options, String opt) throws VoldemortException {
List<String> opts = Lists.newArrayList();
opts.add(opt);
checkRequired(options, opts);
}
|
java
|
{
"resource": ""
}
|
q4435
|
AdminParserUtils.checkRequired
|
train
|
public static void checkRequired(OptionSet options, String opt1, String opt2)
throws VoldemortException {
List<String> opts = Lists.newArrayList();
opts.add(opt1);
opts.add(opt2);
checkRequired(options, opts);
}
|
java
|
{
"resource": ""
}
|
q4436
|
AdminParserUtils.checkRequired
|
train
|
public static void checkRequired(OptionSet options, List<String> opts)
throws VoldemortException {
List<String> optCopy = Lists.newArrayList();
for(String opt: opts) {
if(options.has(opt)) {
optCopy.add(opt);
}
}
if(optCopy.size() < 1) {
System.err.println("Please specify one of the following options:");
for(String opt: opts) {
System.err.println("--" + opt);
}
Utils.croak("Missing required option.");
}
if(optCopy.size() > 1) {
System.err.println("Conflicting options:");
for(String opt: optCopy) {
System.err.println("--" + opt);
}
Utils.croak("Conflicting options detected.");
}
}
|
java
|
{
"resource": ""
}
|
q4437
|
RestCoordinatorRequestHandler.registerRequest
|
train
|
@Override
protected void registerRequest(RestRequestValidator requestValidator,
ChannelHandlerContext ctx,
MessageEvent messageEvent) {
// At this point we know the request is valid and we have a
// error handler. So we construct the composite Voldemort
// request object.
CompositeVoldemortRequest<ByteArray, byte[]> requestObject = requestValidator.constructCompositeVoldemortRequestObject();
if(requestObject != null) {
DynamicTimeoutStoreClient<ByteArray, byte[]> storeClient = null;
if(!requestValidator.getStoreName().equalsIgnoreCase(RestMessageHeaders.SCHEMATA_STORE)) {
storeClient = this.fatClientMap.get(requestValidator.getStoreName());
if(storeClient == null) {
logger.error("Error when getting store. Non Existing store client.");
RestErrorHandler.writeErrorResponse(messageEvent,
HttpResponseStatus.BAD_REQUEST,
"Non Existing store client. Critical error.");
return;
}
} else {
requestObject.setOperationType(VoldemortOpCode.GET_METADATA_OP_CODE);
}
CoordinatorStoreClientRequest coordinatorRequest = new CoordinatorStoreClientRequest(requestObject,
storeClient);
Channels.fireMessageReceived(ctx, coordinatorRequest);
}
}
|
java
|
{
"resource": ""
}
|
q4438
|
RestDeleteErrorHandler.handleExceptions
|
train
|
@Override
public void handleExceptions(MessageEvent messageEvent, Exception exception) {
if(exception instanceof InvalidMetadataException) {
logger.error("Exception when deleting. The requested key does not exist in this partition",
exception);
writeErrorResponse(messageEvent,
HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE,
"The requested key does not exist in this partition");
} else if(exception instanceof PersistenceFailureException) {
logger.error("Exception when deleting. Operation failed", exception);
writeErrorResponse(messageEvent,
HttpResponseStatus.INTERNAL_SERVER_ERROR,
"Operation failed");
} else if(exception instanceof UnsupportedOperationException) {
logger.error("Exception when deleting. Operation not supported in read-only store ",
exception);
writeErrorResponse(messageEvent,
HttpResponseStatus.METHOD_NOT_ALLOWED,
"Operation not supported in read-only store");
} else if(exception instanceof StoreTimeoutException) {
String errorDescription = "DELETE Request timed out: " + exception.getMessage();
logger.error(errorDescription);
writeErrorResponse(messageEvent, HttpResponseStatus.REQUEST_TIMEOUT, errorDescription);
} else if(exception instanceof InsufficientOperationalNodesException) {
String errorDescription = "DELETE Request failed: " + exception.getMessage();
logger.error(errorDescription);
writeErrorResponse(messageEvent,
HttpResponseStatus.INTERNAL_SERVER_ERROR,
errorDescription);
} else {
super.handleExceptions(messageEvent, exception);
}
}
|
java
|
{
"resource": ""
}
|
q4439
|
StoreStats.recordPutTimeAndSize
|
train
|
public void recordPutTimeAndSize(long timeNS, long valueSize, long keySize) {
recordTime(Tracked.PUT, timeNS, 0, valueSize, keySize, 0);
}
|
java
|
{
"resource": ""
}
|
q4440
|
StoreStats.recordGetAllTime
|
train
|
public void recordGetAllTime(long timeNS,
int requested,
int returned,
long totalValueBytes,
long totalKeyBytes) {
recordTime(Tracked.GET_ALL,
timeNS,
requested - returned,
totalValueBytes,
totalKeyBytes,
requested);
}
|
java
|
{
"resource": ""
}
|
q4441
|
StoreStats.recordTime
|
train
|
private void recordTime(Tracked op,
long timeNS,
long numEmptyResponses,
long valueSize,
long keySize,
long getAllAggregateRequests) {
counters.get(op).addRequest(timeNS,
numEmptyResponses,
valueSize,
keySize,
getAllAggregateRequests);
if (logger.isTraceEnabled() && !storeName.contains("aggregate") && !storeName.contains("voldsys$"))
logger.trace("Store '" + storeName + "' logged a " + op.toString() + " request taking " +
((double) timeNS / voldemort.utils.Time.NS_PER_MS) + " ms");
}
|
java
|
{
"resource": ""
}
|
q4442
|
VoldemortUtils.getCommaSeparatedStringValues
|
train
|
public static List<String> getCommaSeparatedStringValues(String paramValue, String type) {
List<String> commaSeparatedProps = Lists.newArrayList();
for(String url: Utils.COMMA_SEP.split(paramValue.trim()))
if(url.trim().length() > 0)
commaSeparatedProps.add(url);
if(commaSeparatedProps.size() == 0) {
throw new RuntimeException("Number of " + type + " should be greater than zero");
}
return commaSeparatedProps;
}
|
java
|
{
"resource": ""
}
|
q4443
|
CoordinatorProxyService.initializeFatClient
|
train
|
private synchronized void initializeFatClient(String storeName, Properties storeClientProps) {
// updates the coordinator metadata with recent stores and cluster xml
updateCoordinatorMetadataWithLatestState();
logger.info("Creating a Fat client for store: " + storeName);
SocketStoreClientFactory fatClientFactory = getFatClientFactory(this.coordinatorConfig.getBootstrapURLs(),
storeClientProps);
if(this.fatClientMap == null) {
this.fatClientMap = new HashMap<String, DynamicTimeoutStoreClient<ByteArray, byte[]>>();
}
DynamicTimeoutStoreClient<ByteArray, byte[]> fatClient = new DynamicTimeoutStoreClient<ByteArray, byte[]>(storeName,
fatClientFactory,
1,
this.coordinatorMetadata.getStoreDefs(),
this.coordinatorMetadata.getClusterXmlStr());
this.fatClientMap.put(storeName, fatClient);
}
|
java
|
{
"resource": ""
}
|
q4444
|
RebalanceBatchPlanProgressBar.completeTask
|
train
|
synchronized public void completeTask(int taskId, int partitionStoresMigrated) {
tasksInFlight.remove(taskId);
numTasksCompleted++;
numPartitionStoresMigrated += partitionStoresMigrated;
updateProgressBar();
}
|
java
|
{
"resource": ""
}
|
q4445
|
RebalanceBatchPlanProgressBar.getPrettyProgressBar
|
train
|
synchronized public String getPrettyProgressBar() {
StringBuilder sb = new StringBuilder();
double taskRate = numTasksCompleted / (double) totalTaskCount;
double partitionStoreRate = numPartitionStoresMigrated / (double) totalPartitionStoreCount;
long deltaTimeMs = System.currentTimeMillis() - startTimeMs;
long taskTimeRemainingMs = Long.MAX_VALUE;
if(taskRate > 0) {
taskTimeRemainingMs = (long) (deltaTimeMs * ((1.0 / taskRate) - 1.0));
}
long partitionStoreTimeRemainingMs = Long.MAX_VALUE;
if(partitionStoreRate > 0) {
partitionStoreTimeRemainingMs = (long) (deltaTimeMs * ((1.0 / partitionStoreRate) - 1.0));
}
// Title line
sb.append("Progress update on rebalancing batch " + batchId).append(Utils.NEWLINE);
// Tasks in flight update
sb.append("There are currently " + tasksInFlight.size() + " rebalance tasks executing: ")
.append(tasksInFlight)
.append(".")
.append(Utils.NEWLINE);
// Tasks completed update
sb.append("\t" + numTasksCompleted + " out of " + totalTaskCount
+ " rebalance tasks complete.")
.append(Utils.NEWLINE)
.append("\t")
.append(decimalFormatter.format(taskRate * 100.0))
.append("% done, estimate ")
.append(taskTimeRemainingMs)
.append(" ms (")
.append(TimeUnit.MILLISECONDS.toMinutes(taskTimeRemainingMs))
.append(" minutes) remaining.")
.append(Utils.NEWLINE);
// Partition-stores migrated update
sb.append("\t" + numPartitionStoresMigrated + " out of " + totalPartitionStoreCount
+ " partition-stores migrated.")
.append(Utils.NEWLINE)
.append("\t")
.append(decimalFormatter.format(partitionStoreRate * 100.0))
.append("% done, estimate ")
.append(partitionStoreTimeRemainingMs)
.append(" ms (")
.append(TimeUnit.MILLISECONDS.toMinutes(partitionStoreTimeRemainingMs))
.append(" minutes) remaining.")
.append(Utils.NEWLINE);
return sb.toString();
}
|
java
|
{
"resource": ""
}
|
q4446
|
FetchStreamRequestHandler.progressInfoMessage
|
train
|
protected void progressInfoMessage(final String tag) {
if(logger.isInfoEnabled()) {
long totalTimeS = (System.currentTimeMillis() - startTimeMs) / Time.MS_PER_SECOND;
logger.info(tag + " : scanned " + scanned + " and fetched " + fetched + " for store '"
+ storageEngine.getName() + "' partitionIds:" + partitionIds + " in "
+ totalTimeS + " s");
}
}
|
java
|
{
"resource": ""
}
|
q4447
|
FetchStreamRequestHandler.sendMessage
|
train
|
protected void sendMessage(DataOutputStream outputStream, Message message) throws IOException {
long startNs = System.nanoTime();
ProtoUtils.writeMessage(outputStream, message);
if(streamStats != null) {
streamStats.reportNetworkTime(operation,
Utils.elapsedTimeNs(startNs, System.nanoTime()));
}
}
|
java
|
{
"resource": ""
}
|
q4448
|
FetchStreamRequestHandler.reportStorageOpTime
|
train
|
protected void reportStorageOpTime(long startNs) {
if(streamStats != null) {
streamStats.reportStreamingScan(operation);
streamStats.reportStorageTime(operation,
Utils.elapsedTimeNs(startNs, System.nanoTime()));
}
}
|
java
|
{
"resource": ""
}
|
q4449
|
SocketServer.awaitStartupCompletion
|
train
|
public void awaitStartupCompletion() {
try {
Object obj = startedStatusQueue.take();
if(obj instanceof Throwable)
throw new VoldemortException((Throwable) obj);
} catch(InterruptedException e) {
// this is okay, if we are interrupted we can stop waiting
}
}
|
java
|
{
"resource": ""
}
|
q4450
|
StreamingClient.streamingSlopPut
|
train
|
protected synchronized void streamingSlopPut(ByteArray key,
Versioned<byte[]> value,
String storeName,
int failedNodeId) throws IOException {
Slop slop = new Slop(storeName,
Slop.Operation.PUT,
key,
value.getValue(),
null,
failedNodeId,
new Date());
ByteArray slopKey = slop.makeKey();
Versioned<byte[]> slopValue = new Versioned<byte[]>(slopSerializer.toBytes(slop),
value.getVersion());
Node failedNode = adminClient.getAdminClientCluster().getNodeById(failedNodeId);
HandoffToAnyStrategy slopRoutingStrategy = new HandoffToAnyStrategy(adminClient.getAdminClientCluster(),
true,
failedNode.getZoneId());
// node Id which will receive the slop
int slopDestination = slopRoutingStrategy.routeHint(failedNode).get(0).getId();
VAdminProto.PartitionEntry partitionEntry = VAdminProto.PartitionEntry.newBuilder()
.setKey(ProtoUtils.encodeBytes(slopKey))
.setVersioned(ProtoUtils.encodeVersioned(slopValue))
.build();
VAdminProto.UpdatePartitionEntriesRequest.Builder updateRequest = VAdminProto.UpdatePartitionEntriesRequest.newBuilder()
.setStore(SLOP_STORE)
.setPartitionEntry(partitionEntry);
DataOutputStream outputStream = nodeIdStoreToOutputStreamRequest.get(new Pair<String, Integer>(SLOP_STORE,
slopDestination));
if(nodeIdStoreInitialized.get(new Pair<String, Integer>(SLOP_STORE, slopDestination))) {
ProtoUtils.writeMessage(outputStream, updateRequest.build());
} else {
ProtoUtils.writeMessage(outputStream,
VAdminProto.VoldemortAdminRequest.newBuilder()
.setType(VAdminProto.AdminRequestType.UPDATE_PARTITION_ENTRIES)
.setUpdatePartitionEntries(updateRequest)
.build());
outputStream.flush();
nodeIdStoreInitialized.put(new Pair<String, Integer>(SLOP_STORE, slopDestination), true);
}
throttler.maybeThrottle(1);
}
|
java
|
{
"resource": ""
}
|
q4451
|
StreamingClient.synchronousInvokeCallback
|
train
|
@SuppressWarnings("rawtypes")
private void synchronousInvokeCallback(Callable call) {
Future future = streamingSlopResults.submit(call);
try {
future.get();
} catch(InterruptedException e1) {
logger.error("Callback failed", e1);
throw new VoldemortException("Callback failed");
} catch(ExecutionException e1) {
logger.error("Callback failed during execution", e1);
throw new VoldemortException("Callback failed during execution");
}
}
|
java
|
{
"resource": ""
}
|
q4452
|
HttpHookRunnable.handleResponse
|
train
|
protected void handleResponse(int responseCode, InputStream inputStream) {
BufferedReader rd = null;
try {
// Buffer the result into a string
rd = new BufferedReader(new InputStreamReader(inputStream));
StringBuilder sb = new StringBuilder();
String line;
while((line = rd.readLine()) != null) {
sb.append(line);
}
log.info("HttpHook [" + hookName + "] received " + responseCode + " response: " + sb);
} catch (IOException e) {
log.error("Error while reading response for HttpHook [" + hookName + "]", e);
} finally {
if (rd != null) {
try {
rd.close();
} catch (IOException e) {
// no-op
}
}
}
}
|
java
|
{
"resource": ""
}
|
q4453
|
BaseStreamingClient.addStoreToSession
|
train
|
@SuppressWarnings({ "unchecked", "rawtypes" })
protected void addStoreToSession(String store) {
Exception initializationException = null;
storeNames.add(store);
for(Node node: nodesToStream) {
SocketDestination destination = null;
SocketAndStreams sands = null;
try {
destination = new SocketDestination(node.getHost(),
node.getAdminPort(),
RequestFormatType.ADMIN_PROTOCOL_BUFFERS);
sands = streamingSocketPool.checkout(destination);
DataOutputStream outputStream = sands.getOutputStream();
DataInputStream inputStream = sands.getInputStream();
nodeIdStoreToSocketRequest.put(new Pair(store, node.getId()), destination);
nodeIdStoreToOutputStreamRequest.put(new Pair(store, node.getId()), outputStream);
nodeIdStoreToInputStreamRequest.put(new Pair(store, node.getId()), inputStream);
nodeIdStoreToSocketAndStreams.put(new Pair(store, node.getId()), sands);
nodeIdStoreInitialized.put(new Pair(store, node.getId()), false);
remoteStoreDefs = adminClient.metadataMgmtOps.getRemoteStoreDefList(node.getId())
.getValue();
} catch(Exception e) {
logger.error(e);
try {
close(sands.getSocket());
streamingSocketPool.checkin(destination, sands);
} catch(Exception ioE) {
logger.error(ioE);
}
if(!faultyNodes.contains(node.getId()))
faultyNodes.add(node.getId());
initializationException = e;
}
}
if(initializationException != null)
throw new VoldemortException(initializationException);
if(store.equals("slop"))
return;
boolean foundStore = false;
for(StoreDefinition remoteStoreDef: remoteStoreDefs) {
if(remoteStoreDef.getName().equals(store)) {
RoutingStrategyFactory factory = new RoutingStrategyFactory();
RoutingStrategy storeRoutingStrategy = factory.updateRoutingStrategy(remoteStoreDef,
adminClient.getAdminClientCluster());
storeToRoutingStrategy.put(store, storeRoutingStrategy);
validateSufficientNodesAvailable(blackListedNodes, remoteStoreDef);
foundStore = true;
break;
}
}
if(!foundStore) {
logger.error("Store Name not found on the cluster");
throw new VoldemortException("Store Name not found on the cluster");
}
}
|
java
|
{
"resource": ""
}
|
q4454
|
BaseStreamingClient.removeStoreFromSession
|
train
|
@SuppressWarnings({})
public synchronized void removeStoreFromSession(List<String> storeNameToRemove) {
logger.info("closing the Streaming session for a few stores");
commitToVoldemort(storeNameToRemove);
cleanupSessions(storeNameToRemove);
}
|
java
|
{
"resource": ""
}
|
q4455
|
BaseStreamingClient.blacklistNode
|
train
|
@SuppressWarnings({ "rawtypes", "unchecked" })
public void blacklistNode(int nodeId) {
Collection<Node> nodesInCluster = adminClient.getAdminClientCluster().getNodes();
if(blackListedNodes == null) {
blackListedNodes = new ArrayList();
}
blackListedNodes.add(nodeId);
for(Node node: nodesInCluster) {
if(node.getId() == nodeId) {
nodesToStream.remove(node);
break;
}
}
for(String store: storeNames) {
try {
SocketAndStreams sands = nodeIdStoreToSocketAndStreams.get(new Pair(store, nodeId));
close(sands.getSocket());
SocketDestination destination = nodeIdStoreToSocketRequest.get(new Pair(store,
nodeId));
streamingSocketPool.checkin(destination, sands);
} catch(Exception ioE) {
logger.error(ioE);
}
}
}
|
java
|
{
"resource": ""
}
|
q4456
|
BaseStreamingClient.commitToVoldemort
|
train
|
@SuppressWarnings({ "unchecked", "rawtypes", "unused" })
private void commitToVoldemort(List<String> storeNamesToCommit) {
if(logger.isDebugEnabled()) {
logger.debug("Trying to commit to Voldemort");
}
boolean hasError = false;
if(nodesToStream == null || nodesToStream.size() == 0) {
if(logger.isDebugEnabled()) {
logger.debug("No nodes to stream to. Returning.");
}
return;
}
for(Node node: nodesToStream) {
for(String store: storeNamesToCommit) {
if(!nodeIdStoreInitialized.get(new Pair(store, node.getId())))
continue;
nodeIdStoreInitialized.put(new Pair(store, node.getId()), false);
DataOutputStream outputStream = nodeIdStoreToOutputStreamRequest.get(new Pair(store,
node.getId()));
try {
ProtoUtils.writeEndOfStream(outputStream);
outputStream.flush();
DataInputStream inputStream = nodeIdStoreToInputStreamRequest.get(new Pair(store,
node.getId()));
VAdminProto.UpdatePartitionEntriesResponse.Builder updateResponse = ProtoUtils.readToBuilder(inputStream,
VAdminProto.UpdatePartitionEntriesResponse.newBuilder());
if(updateResponse.hasError()) {
hasError = true;
}
} catch(IOException e) {
logger.error("Exception during commit", e);
hasError = true;
if(!faultyNodes.contains(node.getId()))
faultyNodes.add(node.getId());
}
}
}
if(streamingresults == null) {
logger.warn("StreamingSession may not have been initialized since Variable streamingresults is null. Skipping callback ");
return;
}
// remove redundant callbacks
if(hasError) {
logger.info("Invoking the Recovery Callback");
Future future = streamingresults.submit(recoveryCallback);
try {
future.get();
} catch(InterruptedException e1) {
MARKED_BAD = true;
logger.error("Recovery Callback failed", e1);
throw new VoldemortException("Recovery Callback failed");
} catch(ExecutionException e1) {
MARKED_BAD = true;
logger.error("Recovery Callback failed during execution", e1);
throw new VoldemortException("Recovery Callback failed during execution");
}
} else {
if(logger.isDebugEnabled()) {
logger.debug("Commit successful");
logger.debug("calling checkpoint callback");
}
Future future = streamingresults.submit(checkpointCallback);
try {
future.get();
} catch(InterruptedException e1) {
logger.warn("Checkpoint callback failed!", e1);
} catch(ExecutionException e1) {
logger.warn("Checkpoint callback failed during execution!", e1);
}
}
}
|
java
|
{
"resource": ""
}
|
q4457
|
BaseStreamingClient.cleanupSessions
|
train
|
@SuppressWarnings({ "rawtypes", "unchecked" })
private void cleanupSessions(List<String> storeNamesToCleanUp) {
logger.info("Performing cleanup");
for(String store: storeNamesToCleanUp) {
for(Node node: nodesToStream) {
try {
SocketAndStreams sands = nodeIdStoreToSocketAndStreams.get(new Pair(store,
node.getId()));
close(sands.getSocket());
SocketDestination destination = nodeIdStoreToSocketRequest.get(new Pair(store,
node.getId()));
streamingSocketPool.checkin(destination, sands);
} catch(Exception ioE) {
logger.error(ioE);
}
}
}
cleanedUp = true;
}
|
java
|
{
"resource": ""
}
|
q4458
|
InsufficientOperationalNodesException.stripNodeIds
|
train
|
private static List<Integer> stripNodeIds(List<Node> nodeList) {
List<Integer> nodeidList = new ArrayList<Integer>();
if(nodeList != null) {
for(Node node: nodeList) {
nodeidList.add(node.getId());
}
}
return nodeidList;
}
|
java
|
{
"resource": ""
}
|
q4459
|
InsufficientOperationalNodesException.difference
|
train
|
private static List<Node> difference(List<Node> listA, List<Node> listB) {
if(listA != null && listB != null)
listA.removeAll(listB);
return listA;
}
|
java
|
{
"resource": ""
}
|
q4460
|
SchemaEvolutionValidator.main
|
train
|
public static void main(String[] args) {
if(args.length != 2) {
System.out.println("Usage: SchemaEvolutionValidator pathToOldSchema pathToNewSchema");
return;
}
Schema oldSchema;
Schema newSchema;
try {
oldSchema = Schema.parse(new File(args[0]));
} catch(Exception ex) {
oldSchema = null;
System.out.println("Could not open or parse the old schema (" + args[0] + ") due to "
+ ex);
}
try {
newSchema = Schema.parse(new File(args[1]));
} catch(Exception ex) {
newSchema = null;
System.out.println("Could not open or parse the new schema (" + args[1] + ") due to "
+ ex);
}
if(oldSchema == null || newSchema == null) {
return;
}
System.out.println("Comparing: ");
System.out.println("\t" + args[0]);
System.out.println("\t" + args[1]);
List<Message> messages = SchemaEvolutionValidator.checkBackwardCompatibility(oldSchema,
newSchema,
oldSchema.getName());
Level maxLevel = Level.ALL;
for(Message message: messages) {
System.out.println(message.getLevel() + ": " + message.getMessage());
if(message.getLevel().isGreaterOrEqual(maxLevel)) {
maxLevel = message.getLevel();
}
}
if(maxLevel.isGreaterOrEqual(Level.ERROR)) {
System.out.println(Level.ERROR
+ ": The schema is not backward compatible. New clients will not be able to read existing data.");
} else if(maxLevel.isGreaterOrEqual(Level.WARN)) {
System.out.println(Level.WARN
+ ": The schema is partially backward compatible, but old clients will not be able to read data serialized in the new format.");
} else {
System.out.println(Level.INFO
+ ": The schema is backward compatible. Old and new clients will be able to read records serialized by one another.");
}
}
|
java
|
{
"resource": ""
}
|
q4461
|
SchemaEvolutionValidator.validateAllAvroSchemas
|
train
|
public static void validateAllAvroSchemas(SerializerDefinition avroSerDef) {
Map<Integer, String> schemaVersions = avroSerDef.getAllSchemaInfoVersions();
if(schemaVersions.size() < 1) {
throw new VoldemortException("No schema specified");
}
for(Map.Entry<Integer, String> entry: schemaVersions.entrySet()) {
Integer schemaVersionNumber = entry.getKey();
String schemaStr = entry.getValue();
try {
Schema.parse(schemaStr);
} catch(Exception e) {
throw new VoldemortException("Unable to parse Avro schema version :"
+ schemaVersionNumber + ", schema string :"
+ schemaStr);
}
}
}
|
java
|
{
"resource": ""
}
|
q4462
|
AbstractStoreBuilderConfigurable.getPartition
|
train
|
public int getPartition(byte[] key,
byte[] value,
int numReduceTasks) {
try {
/**
* {@link partitionId} is the Voldemort primary partition that this
* record belongs to.
*/
int partitionId = ByteUtils.readInt(value, ByteUtils.SIZE_OF_INT);
/**
* This is the base number we will ultimately mod by {@link numReduceTasks}
* to determine which reduce task to shuffle to.
*/
int magicNumber = partitionId;
if (getSaveKeys() && !buildPrimaryReplicasOnly) {
/**
* When saveKeys is enabled (which also implies we are generating
* READ_ONLY_V2 format files), then we are generating files with
* a replica type, with one file per replica.
*
* Each replica is sent to a different reducer, and thus the
* {@link magicNumber} is scaled accordingly.
*
* The downside to this is that it is pretty wasteful. The files
* generated for each replicas are identical to one another, so
* there's no point in generating them independently in many
* reducers.
*
* This is one of the reasons why buildPrimaryReplicasOnly was
* written. In this mode, we only generate the files for the
* primary replica, which means the number of reducers is
* minimized and {@link magicNumber} does not need to be scaled.
*/
int replicaType = (int) ByteUtils.readBytes(value,
2 * ByteUtils.SIZE_OF_INT,
ByteUtils.SIZE_OF_BYTE);
magicNumber = magicNumber * getStoreDef().getReplicationFactor() + replicaType;
}
if (!getReducerPerBucket()) {
/**
* Partition files can be split in many chunks in order to limit the
* maximum file size downloaded and handled by Voldemort servers.
*
* {@link chunkId} represents which chunk of partition then current
* record belongs to.
*/
int chunkId = ReadOnlyUtils.chunk(key, getNumChunks());
/**
* When reducerPerBucket is disabled, all chunks are sent to a
* different reducer. This increases parallelism at the expense
* of adding more load on Hadoop.
*
* {@link magicNumber} is thus scaled accordingly, in order to
* leverage the extra reducers available to us.
*/
magicNumber = magicNumber * getNumChunks() + chunkId;
}
/**
* Finally, we mod {@link magicNumber} by {@link numReduceTasks},
* since the MapReduce framework expects the return of this function
* to be bounded by the number of reduce tasks running in the job.
*/
return magicNumber % numReduceTasks;
} catch (Exception e) {
throw new VoldemortException("Caught exception in getPartition()!" +
" key: " + ByteUtils.toHexString(key) +
", value: " + ByteUtils.toHexString(value) +
", numReduceTasks: " + numReduceTasks, e);
}
}
|
java
|
{
"resource": ""
}
|
q4463
|
HadoopStoreWriter.initFileStreams
|
train
|
@NotThreadsafe
private void initFileStreams(int chunkId) {
/**
* {@link Set#add(Object)} returns false if the element already existed in the set.
* This ensures we initialize the resources for each chunk only once.
*/
if (chunksHandled.add(chunkId)) {
try {
this.indexFileSizeInBytes[chunkId] = 0L;
this.valueFileSizeInBytes[chunkId] = 0L;
this.checkSumDigestIndex[chunkId] = CheckSum.getInstance(checkSumType);
this.checkSumDigestValue[chunkId] = CheckSum.getInstance(checkSumType);
this.position[chunkId] = 0;
this.taskIndexFileName[chunkId] = new Path(FileOutputFormat.getOutputPath(conf),
getStoreName() + "."
+ Integer.toString(chunkId) + "_"
+ this.taskId + INDEX_FILE_EXTENSION
+ fileExtension);
this.taskValueFileName[chunkId] = new Path(FileOutputFormat.getOutputPath(conf),
getStoreName() + "."
+ Integer.toString(chunkId) + "_"
+ this.taskId + DATA_FILE_EXTENSION
+ fileExtension);
if(this.fs == null)
this.fs = this.taskIndexFileName[chunkId].getFileSystem(conf);
if(isValidCompressionEnabled) {
this.indexFileStream[chunkId] = new DataOutputStream(new BufferedOutputStream(new GZIPOutputStream(fs.create(this.taskIndexFileName[chunkId]),
DEFAULT_BUFFER_SIZE)));
this.valueFileStream[chunkId] = new DataOutputStream(new BufferedOutputStream(new GZIPOutputStream(fs.create(this.taskValueFileName[chunkId]),
DEFAULT_BUFFER_SIZE)));
} else {
this.indexFileStream[chunkId] = fs.create(this.taskIndexFileName[chunkId]);
this.valueFileStream[chunkId] = fs.create(this.taskValueFileName[chunkId]);
}
fs.setPermission(this.taskIndexFileName[chunkId],
new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
logger.info("Setting permission to 755 for " + this.taskIndexFileName[chunkId]);
fs.setPermission(this.taskValueFileName[chunkId],
new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
logger.info("Setting permission to 755 for " + this.taskValueFileName[chunkId]);
logger.info("Opening " + this.taskIndexFileName[chunkId] + " and "
+ this.taskValueFileName[chunkId] + " for writing.");
} catch(IOException e) {
throw new RuntimeException("Failed to open Input/OutputStream", e);
}
}
}
|
java
|
{
"resource": ""
}
|
q4464
|
RestGetRequestValidator.parseAndValidateRequest
|
train
|
@Override
public boolean parseAndValidateRequest() {
if(!super.parseAndValidateRequest()) {
return false;
}
isGetVersionRequest = hasGetVersionRequestHeader();
if(isGetVersionRequest && this.parsedKeys.size() > 1) {
RestErrorHandler.writeErrorResponse(messageEvent,
HttpResponseStatus.BAD_REQUEST,
"Get version request cannot have multiple keys");
return false;
}
return true;
}
|
java
|
{
"resource": ""
}
|
q4465
|
HadoopUtils.getMetadataFromSequenceFile
|
train
|
public static Map<String, String> getMetadataFromSequenceFile(FileSystem fs, Path path) {
try {
Configuration conf = new Configuration();
conf.setInt("io.file.buffer.size", 4096);
SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, new Configuration());
SequenceFile.Metadata meta = reader.getMetadata();
reader.close();
TreeMap<Text, Text> map = meta.getMetadata();
Map<String, String> values = new HashMap<String, String>();
for(Map.Entry<Text, Text> entry: map.entrySet())
values.put(entry.getKey().toString(), entry.getValue().toString());
return values;
} catch(IOException e) {
throw new RuntimeException(e);
}
}
|
java
|
{
"resource": ""
}
|
q4466
|
BdbNativeBackup.recordBackupSet
|
train
|
private void recordBackupSet(File backupDir) throws IOException {
String[] filesInEnv = env.getHome().list();
SimpleDateFormat format = new SimpleDateFormat("yyyy_MM_dd_kk_mm_ss");
String recordFileName = "backupset-" + format.format(new Date());
File recordFile = new File(backupDir, recordFileName);
if(recordFile.exists()) {
recordFile.renameTo(new File(backupDir, recordFileName + ".old"));
}
PrintStream backupRecord = new PrintStream(new FileOutputStream(recordFile));
backupRecord.println("Lastfile:" + Long.toHexString(backupHelper.getLastFileInBackupSet()));
if(filesInEnv != null) {
for(String file: filesInEnv) {
if(file.endsWith(BDB_EXT))
backupRecord.println(file);
}
}
backupRecord.close();
}
|
java
|
{
"resource": ""
}
|
q4467
|
BdbNativeBackup.cleanStaleFiles
|
train
|
private void cleanStaleFiles(File backupDir, AsyncOperationStatus status) {
String[] filesInEnv = env.getHome().list();
String[] filesInBackupDir = backupDir.list();
if(filesInEnv != null && filesInBackupDir != null) {
HashSet<String> envFileSet = new HashSet<String>();
for(String file: filesInEnv)
envFileSet.add(file);
// delete all files in backup which are currently not in environment
for(String file: filesInBackupDir) {
if(file.endsWith(BDB_EXT) && !envFileSet.contains(file)) {
status.setStatus("Deleting stale jdb file :" + file);
File staleJdbFile = new File(backupDir, file);
staleJdbFile.delete();
}
}
}
}
|
java
|
{
"resource": ""
}
|
q4468
|
BdbNativeBackup.verifiedCopyFile
|
train
|
private void verifiedCopyFile(File sourceFile, File destFile) throws IOException {
if(!destFile.exists()) {
destFile.createNewFile();
}
FileInputStream source = null;
FileOutputStream destination = null;
LogVerificationInputStream verifyStream = null;
try {
source = new FileInputStream(sourceFile);
destination = new FileOutputStream(destFile);
verifyStream = new LogVerificationInputStream(env, source, sourceFile.getName());
final byte[] buf = new byte[LOGVERIFY_BUFSIZE];
while(true) {
final int len = verifyStream.read(buf);
if(len < 0) {
break;
}
destination.write(buf, 0, len);
}
} finally {
if(verifyStream != null) {
verifyStream.close();
}
if(destination != null) {
destination.close();
}
}
}
|
java
|
{
"resource": ""
}
|
q4469
|
ZoneRoutingStrategy.getReplicatingPartitionList
|
train
|
@Override
public List<Integer> getReplicatingPartitionList(int index) {
List<Node> preferenceNodesList = new ArrayList<Node>(getNumReplicas());
List<Integer> replicationPartitionsList = new ArrayList<Integer>(getNumReplicas());
// Copy Zone based Replication Factor
HashMap<Integer, Integer> requiredRepFactor = new HashMap<Integer, Integer>();
requiredRepFactor.putAll(zoneReplicationFactor);
// Cross-check if individual zone replication factor equals global
int sum = 0;
for(Integer zoneRepFactor: requiredRepFactor.values()) {
sum += zoneRepFactor;
}
if(sum != getNumReplicas())
throw new IllegalArgumentException("Number of zone replicas is not equal to the total replication factor");
if(getPartitionToNode().length == 0) {
return new ArrayList<Integer>(0);
}
for(int i = 0; i < getPartitionToNode().length; i++) {
// add this one if we haven't already, and it can satisfy some zone
// replicationFactor
Node currentNode = getNodeByPartition(index);
if(!preferenceNodesList.contains(currentNode)) {
preferenceNodesList.add(currentNode);
if(checkZoneRequirement(requiredRepFactor, currentNode.getZoneId()))
replicationPartitionsList.add(index);
}
// if we have enough, go home
if(replicationPartitionsList.size() >= getNumReplicas())
return replicationPartitionsList;
// move to next clockwise slot on the ring
index = (index + 1) % getPartitionToNode().length;
}
// we don't have enough, but that may be okay
return replicationPartitionsList;
}
|
java
|
{
"resource": ""
}
|
q4470
|
ZoneRoutingStrategy.checkZoneRequirement
|
train
|
private boolean checkZoneRequirement(HashMap<Integer, Integer> requiredRepFactor, int zoneId) {
if(requiredRepFactor.containsKey(zoneId)) {
if(requiredRepFactor.get(zoneId) == 0) {
return false;
} else {
requiredRepFactor.put(zoneId, requiredRepFactor.get(zoneId) - 1);
return true;
}
}
return false;
}
|
java
|
{
"resource": ""
}
|
q4471
|
CoordinatorAdminUtils.acceptsUrlMultiple
|
train
|
public static void acceptsUrlMultiple(OptionParser parser) {
parser.acceptsAll(Arrays.asList(OPT_U, OPT_URL), "coordinator bootstrap urls")
.withRequiredArg()
.describedAs("url-list")
.withValuesSeparatedBy(',')
.ofType(String.class);
}
|
java
|
{
"resource": ""
}
|
q4472
|
CoordinatorAdminUtils.copyArrayCutFirst
|
train
|
public static String[] copyArrayCutFirst(String[] arr) {
if(arr.length > 1) {
String[] arrCopy = new String[arr.length - 1];
System.arraycopy(arr, 1, arrCopy, 0, arrCopy.length);
return arrCopy;
} else {
return new String[0];
}
}
|
java
|
{
"resource": ""
}
|
q4473
|
CoordinatorAdminUtils.copyArrayAddFirst
|
train
|
public static String[] copyArrayAddFirst(String[] arr, String add) {
String[] arrCopy = new String[arr.length + 1];
arrCopy[0] = add;
System.arraycopy(arr, 0, arrCopy, 1, arr.length);
return arrCopy;
}
|
java
|
{
"resource": ""
}
|
q4474
|
MetadataStore.put
|
train
|
@SuppressWarnings("unchecked")
public void put(String key, Versioned<Object> value) {
// acquire write lock
writeLock.lock();
try {
if(this.storeNames.contains(key) || key.equals(STORES_KEY)) {
// Check for backwards compatibility
List<StoreDefinition> storeDefinitions = (List<StoreDefinition>) value.getValue();
StoreDefinitionUtils.validateSchemasAsNeeded(storeDefinitions);
// If the put is on the entire stores.xml key, delete the
// additional stores which do not exist in the specified
// stores.xml
Set<String> storeNamesToDelete = new HashSet<String>();
for(String storeName: this.storeNames) {
storeNamesToDelete.add(storeName);
}
// Add / update the list of store definitions specified in the
// value
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
// Update the STORES directory and the corresponding entry in
// metadata cache
Set<String> specifiedStoreNames = new HashSet<String>();
for(StoreDefinition storeDef: storeDefinitions) {
specifiedStoreNames.add(storeDef.getName());
String storeDefStr = mapper.writeStore(storeDef);
Versioned<String> versionedValueStr = new Versioned<String>(storeDefStr,
value.getVersion());
this.storeDefinitionsStorageEngine.put(storeDef.getName(),
versionedValueStr,
"");
// Update the metadata cache
this.metadataCache.put(storeDef.getName(),
new Versioned<Object>(storeDefStr, value.getVersion()));
}
if(key.equals(STORES_KEY)) {
storeNamesToDelete.removeAll(specifiedStoreNames);
resetStoreDefinitions(storeNamesToDelete);
}
// Re-initialize the store definitions
initStoreDefinitions(value.getVersion());
// Update routing strategies
updateRoutingStrategies(getCluster(), getStoreDefList());
} else if(METADATA_KEYS.contains(key)) {
// try inserting into inner store first
putInner(key, convertObjectToString(key, value));
// cache all keys if innerStore put succeeded
metadataCache.put(key, value);
// do special stuff if needed
if(CLUSTER_KEY.equals(key)) {
updateRoutingStrategies((Cluster) value.getValue(), getStoreDefList());
} else if(NODE_ID_KEY.equals(key)) {
initNodeId(getNodeIdNoLock());
} else if(SYSTEM_STORES_KEY.equals(key))
throw new VoldemortException("Cannot overwrite system store definitions");
} else {
throw new VoldemortException("Unhandled Key:" + key + " for MetadataStore put()");
}
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4475
|
MetadataStore.updateStoreDefinitions
|
train
|
@SuppressWarnings("unchecked")
public void updateStoreDefinitions(Versioned<byte[]> valueBytes) {
// acquire write lock
writeLock.lock();
try {
Versioned<String> value = new Versioned<String>(ByteUtils.getString(valueBytes.getValue(),
"UTF-8"),
valueBytes.getVersion());
Versioned<Object> valueObject = convertStringToObject(STORES_KEY, value);
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefinitions = (List<StoreDefinition>) valueObject.getValue();
// Check for backwards compatibility
StoreDefinitionUtils.validateSchemasAsNeeded(storeDefinitions);
StoreDefinitionUtils.validateNewStoreDefsAreNonBreaking(getStoreDefList(), storeDefinitions);
// Go through each store definition and do a corresponding put
for(StoreDefinition storeDef: storeDefinitions) {
if(!this.storeNames.contains(storeDef.getName())) {
throw new VoldemortException("Cannot update a store which does not exist !");
}
String storeDefStr = mapper.writeStore(storeDef);
Versioned<String> versionedValueStr = new Versioned<String>(storeDefStr,
value.getVersion());
this.storeDefinitionsStorageEngine.put(storeDef.getName(), versionedValueStr, "");
// Update the metadata cache
this.metadataCache.put(storeDef.getName(),
new Versioned<Object>(storeDefStr, value.getVersion()));
}
// Re-initialize the store definitions
initStoreDefinitions(value.getVersion());
// Update routing strategies
// TODO: Make this more fine grained.. i.e only update listeners for
// a specific store.
updateRoutingStrategies(getCluster(), getStoreDefList());
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4476
|
MetadataStore.put
|
train
|
@Override
public void put(ByteArray keyBytes, Versioned<byte[]> valueBytes, byte[] transforms)
throws VoldemortException {
// acquire write lock
writeLock.lock();
try {
String key = ByteUtils.getString(keyBytes.get(), "UTF-8");
Versioned<String> value = new Versioned<String>(ByteUtils.getString(valueBytes.getValue(),
"UTF-8"),
valueBytes.getVersion());
Versioned<Object> valueObject = convertStringToObject(key, value);
this.put(key, valueObject);
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4477
|
MetadataStore.makeStoreDefinitionMap
|
train
|
private HashMap<String, StoreDefinition> makeStoreDefinitionMap(List<StoreDefinition> storeDefs) {
HashMap<String, StoreDefinition> storeDefMap = new HashMap<String, StoreDefinition>();
for(StoreDefinition storeDef: storeDefs)
storeDefMap.put(storeDef.getName(), storeDef);
return storeDefMap;
}
|
java
|
{
"resource": ""
}
|
q4478
|
MetadataStore.updateRoutingStrategies
|
train
|
private void updateRoutingStrategies(Cluster cluster, List<StoreDefinition> storeDefs) {
// acquire write lock
writeLock.lock();
try {
VectorClock clock = new VectorClock();
if(metadataCache.containsKey(ROUTING_STRATEGY_KEY))
clock = (VectorClock) metadataCache.get(ROUTING_STRATEGY_KEY).getVersion();
logger.info("Updating routing strategy for all stores");
HashMap<String, StoreDefinition> storeDefMap = makeStoreDefinitionMap(storeDefs);
HashMap<String, RoutingStrategy> routingStrategyMap = createRoutingStrategyMap(cluster,
storeDefMap);
this.metadataCache.put(ROUTING_STRATEGY_KEY,
new Versioned<Object>(routingStrategyMap,
clock.incremented(getNodeId(),
System.currentTimeMillis())));
for(String storeName: storeNameTolisteners.keySet()) {
RoutingStrategy updatedRoutingStrategy = routingStrategyMap.get(storeName);
if(updatedRoutingStrategy != null) {
try {
for(MetadataStoreListener listener: storeNameTolisteners.get(storeName)) {
listener.updateRoutingStrategy(updatedRoutingStrategy);
listener.updateStoreDefinition(storeDefMap.get(storeName));
}
} catch(Exception e) {
if(logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
}
}
}
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4479
|
MetadataStore.addRebalancingState
|
train
|
public void addRebalancingState(final RebalanceTaskInfo stealInfo) {
// acquire write lock
writeLock.lock();
try {
// Move into rebalancing state
if(ByteUtils.getString(get(SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8")
.compareTo(VoldemortState.NORMAL_SERVER.toString()) == 0) {
put(SERVER_STATE_KEY, VoldemortState.REBALANCING_MASTER_SERVER);
initCache(SERVER_STATE_KEY);
}
// Add the steal information
RebalancerState rebalancerState = getRebalancerState();
if(!rebalancerState.update(stealInfo)) {
throw new VoldemortException("Could not add steal information " + stealInfo
+ " since a plan for the same donor node "
+ stealInfo.getDonorId() + " ( "
+ rebalancerState.find(stealInfo.getDonorId())
+ " ) already exists");
}
put(MetadataStore.REBALANCING_STEAL_INFO, rebalancerState);
initCache(REBALANCING_STEAL_INFO);
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4480
|
MetadataStore.deleteRebalancingState
|
train
|
public void deleteRebalancingState(RebalanceTaskInfo stealInfo) {
// acquire write lock
writeLock.lock();
try {
RebalancerState rebalancerState = getRebalancerState();
if(!rebalancerState.remove(stealInfo))
throw new IllegalArgumentException("Couldn't find " + stealInfo + " in "
+ rebalancerState + " while deleting");
if(rebalancerState.isEmpty()) {
logger.debug("Cleaning all rebalancing state");
cleanAllRebalancingState();
} else {
put(REBALANCING_STEAL_INFO, rebalancerState);
initCache(REBALANCING_STEAL_INFO);
}
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4481
|
MetadataStore.setOfflineState
|
train
|
public void setOfflineState(boolean setToOffline) {
// acquire write lock
writeLock.lock();
try {
String currentState = ByteUtils.getString(get(SERVER_STATE_KEY, null).get(0).getValue(),
"UTF-8");
if(setToOffline) {
// from NORMAL_SERVER to OFFLINE_SERVER
if(currentState.equals(VoldemortState.NORMAL_SERVER.toString())) {
put(SERVER_STATE_KEY, VoldemortState.OFFLINE_SERVER);
initCache(SERVER_STATE_KEY);
put(SLOP_STREAMING_ENABLED_KEY, false);
initCache(SLOP_STREAMING_ENABLED_KEY);
put(PARTITION_STREAMING_ENABLED_KEY, false);
initCache(PARTITION_STREAMING_ENABLED_KEY);
put(READONLY_FETCH_ENABLED_KEY, false);
initCache(READONLY_FETCH_ENABLED_KEY);
} else if(currentState.equals(VoldemortState.OFFLINE_SERVER.toString())) {
logger.warn("Already in OFFLINE_SERVER state.");
return;
} else {
logger.error("Cannot enter OFFLINE_SERVER state from " + currentState);
throw new VoldemortException("Cannot enter OFFLINE_SERVER state from "
+ currentState);
}
} else {
// from OFFLINE_SERVER to NORMAL_SERVER
if(currentState.equals(VoldemortState.NORMAL_SERVER.toString())) {
logger.warn("Already in NORMAL_SERVER state.");
return;
} else if(currentState.equals(VoldemortState.OFFLINE_SERVER.toString())) {
put(SERVER_STATE_KEY, VoldemortState.NORMAL_SERVER);
initCache(SERVER_STATE_KEY);
put(SLOP_STREAMING_ENABLED_KEY, true);
initCache(SLOP_STREAMING_ENABLED_KEY);
put(PARTITION_STREAMING_ENABLED_KEY, true);
initCache(PARTITION_STREAMING_ENABLED_KEY);
put(READONLY_FETCH_ENABLED_KEY, true);
initCache(READONLY_FETCH_ENABLED_KEY);
init();
initNodeId(getNodeIdNoLock());
} else {
logger.error("Cannot enter NORMAL_SERVER state from " + currentState);
throw new VoldemortException("Cannot enter NORMAL_SERVER state from "
+ currentState);
}
}
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4482
|
MetadataStore.addStoreDefinition
|
train
|
public void addStoreDefinition(StoreDefinition storeDef) {
// acquire write lock
writeLock.lock();
try {
// Check if store already exists
if(this.storeNames.contains(storeDef.getName())) {
throw new VoldemortException("Store already exists !");
}
// Check for backwards compatibility
StoreDefinitionUtils.validateSchemaAsNeeded(storeDef);
// Otherwise add to the STORES directory
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
String storeDefStr = mapper.writeStore(storeDef);
Versioned<String> versionedValueStr = new Versioned<String>(storeDefStr);
this.storeDefinitionsStorageEngine.put(storeDef.getName(), versionedValueStr, null);
// Update the metadata cache
this.metadataCache.put(storeDef.getName(), new Versioned<Object>(storeDefStr));
// Re-initialize the store definitions. This is primarily required
// to re-create the value for key: 'stores.xml'. This is necessary
// for backwards compatibility.
initStoreDefinitions(null);
updateRoutingStrategies(getCluster(), getStoreDefList());
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4483
|
MetadataStore.deleteStoreDefinition
|
train
|
public void deleteStoreDefinition(String storeName) {
// acquire write lock
writeLock.lock();
try {
// Check if store exists
if(!this.storeNames.contains(storeName)) {
throw new VoldemortException("Requested store to be deleted does not exist !");
}
// Otherwise remove from the STORES directory. Note: The version
// argument is not required here since the
// ConfigurationStorageEngine simply ignores this.
this.storeDefinitionsStorageEngine.delete(storeName, null);
// Update the metadata cache
this.metadataCache.remove(storeName);
// Re-initialize the store definitions. This is primarily required
// to re-create the value for key: 'stores.xml'. This is necessary
// for backwards compatibility.
initStoreDefinitions(null);
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4484
|
MetadataStore.isValidStore
|
train
|
public boolean isValidStore(String name) {
readLock.lock();
try {
if(this.storeNames.contains(name)) {
return true;
}
return false;
} finally {
readLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4485
|
MetadataStore.init
|
train
|
private void init() {
logger.info("metadata init().");
writeLock.lock();
try {
// Required keys
initCache(CLUSTER_KEY);
// If stores definition storage engine is not null, initialize metadata
// Add the mapping from key to the storage engine used
if(this.storeDefinitionsStorageEngine != null) {
initStoreDefinitions(null);
} else {
initCache(STORES_KEY);
}
// Initialize system store in the metadata cache
initSystemCache();
initSystemRoutingStrategies(getCluster());
// Initialize with default if not present
initCache(SLOP_STREAMING_ENABLED_KEY, true);
initCache(PARTITION_STREAMING_ENABLED_KEY, true);
initCache(READONLY_FETCH_ENABLED_KEY, true);
initCache(QUOTA_ENFORCEMENT_ENABLED_KEY, true);
initCache(REBALANCING_STEAL_INFO, new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
initCache(SERVER_STATE_KEY, VoldemortState.NORMAL_SERVER.toString());
initCache(REBALANCING_SOURCE_CLUSTER_XML, null);
initCache(REBALANCING_SOURCE_STORES_XML, null);
} finally {
writeLock.unlock();
}
}
|
java
|
{
"resource": ""
}
|
q4486
|
MetadataStore.initStoreDefinitions
|
train
|
private void initStoreDefinitions(Version storesXmlVersion) {
if(this.storeDefinitionsStorageEngine == null) {
throw new VoldemortException("The store definitions directory is empty");
}
String allStoreDefinitions = "<stores>";
Version finalStoresXmlVersion = null;
if(storesXmlVersion != null) {
finalStoresXmlVersion = storesXmlVersion;
}
this.storeNames.clear();
ClosableIterator<Pair<String, Versioned<String>>> storesIterator = this.storeDefinitionsStorageEngine.entries();
// Some test setups may result in duplicate entries for 'store' element.
// Do the de-dup here
Map<String, Versioned<String>> storeNameToDefMap = new HashMap<String, Versioned<String>>();
Version maxVersion = null;
while(storesIterator.hasNext()) {
Pair<String, Versioned<String>> storeDetail = storesIterator.next();
String storeName = storeDetail.getFirst();
Versioned<String> versionedStoreDef = storeDetail.getSecond();
storeNameToDefMap.put(storeName, versionedStoreDef);
Version curVersion = versionedStoreDef.getVersion();
// Get the highest version from all the store entries
if(maxVersion == null) {
maxVersion = curVersion;
} else if(maxVersion.compare(curVersion) == Occurred.BEFORE) {
maxVersion = curVersion;
}
}
// If the specified version is null, assign highest Version to
// 'stores.xml' key
if(finalStoresXmlVersion == null) {
finalStoresXmlVersion = maxVersion;
}
// Go through all the individual stores and update metadata
for(Entry<String, Versioned<String>> storeEntry: storeNameToDefMap.entrySet()) {
String storeName = storeEntry.getKey();
Versioned<String> versionedStoreDef = storeEntry.getValue();
// Add all the store names to the list of storeNames
this.storeNames.add(storeName);
this.metadataCache.put(storeName, new Versioned<Object>(versionedStoreDef.getValue(),
versionedStoreDef.getVersion()));
}
Collections.sort(this.storeNames);
for(String storeName: this.storeNames) {
Versioned<String> versionedStoreDef = storeNameToDefMap.get(storeName);
// Stitch together to form the complete store definition list.
allStoreDefinitions += versionedStoreDef.getValue();
}
allStoreDefinitions += "</stores>";
// Update cache with the composite store definition list.
metadataCache.put(STORES_KEY,
convertStringToObject(STORES_KEY,
new Versioned<String>(allStoreDefinitions,
finalStoresXmlVersion)));
}
|
java
|
{
"resource": ""
}
|
q4487
|
MetadataStore.resetStoreDefinitions
|
train
|
private void resetStoreDefinitions(Set<String> storeNamesToDelete) {
// Clear entries in the metadata cache
for(String storeName: storeNamesToDelete) {
this.metadataCache.remove(storeName);
this.storeDefinitionsStorageEngine.delete(storeName, null);
this.storeNames.remove(storeName);
}
}
|
java
|
{
"resource": ""
}
|
q4488
|
MetadataStore.initSystemCache
|
train
|
private synchronized void initSystemCache() {
List<StoreDefinition> value = storeMapper.readStoreList(new StringReader(SystemStoreConstants.SYSTEM_STORE_SCHEMA));
metadataCache.put(SYSTEM_STORES_KEY, new Versioned<Object>(value));
}
|
java
|
{
"resource": ""
}
|
q4489
|
DynamicTimeoutStoreClient.getWithCustomTimeout
|
train
|
public List<Versioned<V>> getWithCustomTimeout(CompositeVoldemortRequest<K, V> requestWrapper) {
validateTimeout(requestWrapper.getRoutingTimeoutInMs());
for(int attempts = 0; attempts < this.metadataRefreshAttempts; attempts++) {
try {
long startTimeInMs = System.currentTimeMillis();
String keyHexString = "";
if(logger.isDebugEnabled()) {
ByteArray key = (ByteArray) requestWrapper.getKey();
keyHexString = RestUtils.getKeyHexString(key);
debugLogStart("GET",
requestWrapper.getRequestOriginTimeInMs(),
startTimeInMs,
keyHexString);
}
List<Versioned<V>> items = store.get(requestWrapper);
if(logger.isDebugEnabled()) {
int vcEntrySize = 0;
for(Versioned<V> vc: items) {
vcEntrySize += ((VectorClock) vc.getVersion()).getVersionMap().size();
}
debugLogEnd("GET",
requestWrapper.getRequestOriginTimeInMs(),
startTimeInMs,
System.currentTimeMillis(),
keyHexString,
vcEntrySize);
}
return items;
} catch(InvalidMetadataException e) {
logger.info("Received invalid metadata exception during get [ " + e.getMessage()
+ " ] on store '" + storeName + "'. Rebootstrapping");
bootStrap();
}
}
throw new VoldemortException(this.metadataRefreshAttempts
+ " metadata refresh attempts failed.");
}
|
java
|
{
"resource": ""
}
|
q4490
|
DynamicTimeoutStoreClient.putWithCustomTimeout
|
train
|
public Version putWithCustomTimeout(CompositeVoldemortRequest<K, V> requestWrapper) {
validateTimeout(requestWrapper.getRoutingTimeoutInMs());
List<Versioned<V>> versionedValues;
long startTime = System.currentTimeMillis();
String keyHexString = "";
if(logger.isDebugEnabled()) {
ByteArray key = (ByteArray) requestWrapper.getKey();
keyHexString = RestUtils.getKeyHexString(key);
logger.debug("PUT requested for key: " + keyHexString + " , for store: "
+ this.storeName + " at time(in ms): " + startTime
+ " . Nested GET and PUT VERSION requests to follow ---");
}
// We use the full timeout for doing the Get. In this, we're being
// optimistic that the subsequent put might be faster such that all the
// steps might finish within the allotted time
requestWrapper.setResolveConflicts(true);
versionedValues = getWithCustomTimeout(requestWrapper);
Versioned<V> versioned = getItemOrThrow(requestWrapper.getKey(), null, versionedValues);
long endTime = System.currentTimeMillis();
if(versioned == null)
versioned = Versioned.value(requestWrapper.getRawValue(), new VectorClock());
else
versioned.setObject(requestWrapper.getRawValue());
// This should not happen unless there's a bug in the
// getWithCustomTimeout
long timeLeft = requestWrapper.getRoutingTimeoutInMs() - (endTime - startTime);
if(timeLeft <= 0) {
throw new StoreTimeoutException("PUT request timed out");
}
CompositeVersionedPutVoldemortRequest<K, V> putVersionedRequestObject = new CompositeVersionedPutVoldemortRequest<K, V>(requestWrapper.getKey(),
versioned,
timeLeft);
putVersionedRequestObject.setRequestOriginTimeInMs(requestWrapper.getRequestOriginTimeInMs());
Version result = putVersionedWithCustomTimeout(putVersionedRequestObject);
long endTimeInMs = System.currentTimeMillis();
if(logger.isDebugEnabled()) {
logger.debug("PUT response received for key: " + keyHexString + " , for store: "
+ this.storeName + " at time(in ms): " + endTimeInMs);
}
return result;
}
|
java
|
{
"resource": ""
}
|
q4491
|
DynamicTimeoutStoreClient.putVersionedWithCustomTimeout
|
train
|
public Version putVersionedWithCustomTimeout(CompositeVoldemortRequest<K, V> requestWrapper)
throws ObsoleteVersionException {
validateTimeout(requestWrapper.getRoutingTimeoutInMs());
for(int attempts = 0; attempts < this.metadataRefreshAttempts; attempts++) {
try {
String keyHexString = "";
long startTimeInMs = System.currentTimeMillis();
if(logger.isDebugEnabled()) {
ByteArray key = (ByteArray) requestWrapper.getKey();
keyHexString = RestUtils.getKeyHexString(key);
debugLogStart("PUT_VERSION",
requestWrapper.getRequestOriginTimeInMs(),
startTimeInMs,
keyHexString);
}
store.put(requestWrapper);
if(logger.isDebugEnabled()) {
debugLogEnd("PUT_VERSION",
requestWrapper.getRequestOriginTimeInMs(),
startTimeInMs,
System.currentTimeMillis(),
keyHexString,
0);
}
return requestWrapper.getValue().getVersion();
} catch(InvalidMetadataException e) {
logger.info("Received invalid metadata exception during put [ " + e.getMessage()
+ " ] on store '" + storeName + "'. Rebootstrapping");
bootStrap();
}
}
throw new VoldemortException(this.metadataRefreshAttempts
+ " metadata refresh attempts failed.");
}
|
java
|
{
"resource": ""
}
|
q4492
|
DynamicTimeoutStoreClient.getAllWithCustomTimeout
|
train
|
public Map<K, List<Versioned<V>>> getAllWithCustomTimeout(CompositeVoldemortRequest<K, V> requestWrapper) {
validateTimeout(requestWrapper.getRoutingTimeoutInMs());
Map<K, List<Versioned<V>>> items = null;
for(int attempts = 0;; attempts++) {
if(attempts >= this.metadataRefreshAttempts)
throw new VoldemortException(this.metadataRefreshAttempts
+ " metadata refresh attempts failed.");
try {
String KeysHexString = "";
long startTimeInMs = System.currentTimeMillis();
if(logger.isDebugEnabled()) {
Iterable<ByteArray> keys = (Iterable<ByteArray>) requestWrapper.getIterableKeys();
KeysHexString = getKeysHexString(keys);
debugLogStart("GET_ALL",
requestWrapper.getRequestOriginTimeInMs(),
startTimeInMs,
KeysHexString);
}
items = store.getAll(requestWrapper);
if(logger.isDebugEnabled()) {
int vcEntrySize = 0;
for(List<Versioned<V>> item: items.values()) {
for(Versioned<V> vc: item) {
vcEntrySize += ((VectorClock) vc.getVersion()).getVersionMap().size();
}
}
debugLogEnd("GET_ALL",
requestWrapper.getRequestOriginTimeInMs(),
startTimeInMs,
System.currentTimeMillis(),
KeysHexString,
vcEntrySize);
}
return items;
} catch(InvalidMetadataException e) {
logger.info("Received invalid metadata exception during getAll [ "
+ e.getMessage() + " ] on store '" + storeName + "'. Rebootstrapping");
bootStrap();
}
}
}
|
java
|
{
"resource": ""
}
|
q4493
|
DynamicTimeoutStoreClient.deleteWithCustomTimeout
|
train
|
public boolean deleteWithCustomTimeout(CompositeVoldemortRequest<K, V> deleteRequestObject) {
List<Versioned<V>> versionedValues;
validateTimeout(deleteRequestObject.getRoutingTimeoutInMs());
boolean hasVersion = deleteRequestObject.getVersion() == null ? false : true;
String keyHexString = "";
if(!hasVersion) {
long startTimeInMs = System.currentTimeMillis();
if(logger.isDebugEnabled()) {
ByteArray key = (ByteArray) deleteRequestObject.getKey();
keyHexString = RestUtils.getKeyHexString(key);
logger.debug("DELETE without version requested for key: " + keyHexString
+ " , for store: " + this.storeName + " at time(in ms): "
+ startTimeInMs + " . Nested GET and DELETE requests to follow ---");
}
// We use the full timeout for doing the Get. In this, we're being
// optimistic that the subsequent delete might be faster all the
// steps might finish within the allotted time
deleteRequestObject.setResolveConflicts(true);
versionedValues = getWithCustomTimeout(deleteRequestObject);
Versioned<V> versioned = getItemOrThrow(deleteRequestObject.getKey(),
null,
versionedValues);
if(versioned == null) {
return false;
}
long timeLeft = deleteRequestObject.getRoutingTimeoutInMs()
- (System.currentTimeMillis() - startTimeInMs);
// This should not happen unless there's a bug in the
// getWithCustomTimeout
if(timeLeft < 0) {
throw new StoreTimeoutException("DELETE request timed out");
}
// Update the version and the new timeout
deleteRequestObject.setVersion(versioned.getVersion());
deleteRequestObject.setRoutingTimeoutInMs(timeLeft);
}
long deleteVersionStartTimeInNs = System.currentTimeMillis();
if(logger.isDebugEnabled()) {
ByteArray key = (ByteArray) deleteRequestObject.getKey();
keyHexString = RestUtils.getKeyHexString(key);
debugLogStart("DELETE",
deleteRequestObject.getRequestOriginTimeInMs(),
deleteVersionStartTimeInNs,
keyHexString);
}
boolean result = store.delete(deleteRequestObject);
if(logger.isDebugEnabled()) {
debugLogEnd("DELETE",
deleteRequestObject.getRequestOriginTimeInMs(),
deleteVersionStartTimeInNs,
System.currentTimeMillis(),
keyHexString,
0);
}
if(!hasVersion && logger.isDebugEnabled()) {
logger.debug("DELETE without version response received for key: " + keyHexString
+ ", for store: " + this.storeName + " at time(in ms): "
+ System.currentTimeMillis());
}
return result;
}
|
java
|
{
"resource": ""
}
|
q4494
|
DynamicTimeoutStoreClient.debugLogStart
|
train
|
private void debugLogStart(String operationType,
Long originTimeInMS,
Long requestReceivedTimeInMs,
String keyString) {
long durationInMs = requestReceivedTimeInMs - originTimeInMS;
logger.debug("Received a new request. Operation Type: " + operationType + " , key(s): "
+ keyString + " , Store: " + this.storeName + " , Origin time (in ms): "
+ originTimeInMS + " . Request received at time(in ms): "
+ requestReceivedTimeInMs
+ " , Duration from RESTClient to CoordinatorFatClient(in ms): "
+ durationInMs);
}
|
java
|
{
"resource": ""
}
|
q4495
|
DynamicTimeoutStoreClient.debugLogEnd
|
train
|
private void debugLogEnd(String operationType,
Long OriginTimeInMs,
Long RequestStartTimeInMs,
Long ResponseReceivedTimeInMs,
String keyString,
int numVectorClockEntries) {
long durationInMs = ResponseReceivedTimeInMs - RequestStartTimeInMs;
logger.debug("Received a response from voldemort server for Operation Type: "
+ operationType
+ " , For key(s): "
+ keyString
+ " , Store: "
+ this.storeName
+ " , Origin time of request (in ms): "
+ OriginTimeInMs
+ " , Response received at time (in ms): "
+ ResponseReceivedTimeInMs
+ " . Request sent at(in ms): "
+ RequestStartTimeInMs
+ " , Num vector clock entries: "
+ numVectorClockEntries
+ " , Duration from CoordinatorFatClient back to CoordinatorFatClient(in ms): "
+ durationInMs);
}
|
java
|
{
"resource": ""
}
|
q4496
|
UpdateClusterUtils.updateNode
|
train
|
public static Node updateNode(Node node, List<Integer> partitionsList) {
return new Node(node.getId(),
node.getHost(),
node.getHttpPort(),
node.getSocketPort(),
node.getAdminPort(),
node.getZoneId(),
partitionsList);
}
|
java
|
{
"resource": ""
}
|
q4497
|
UpdateClusterUtils.addPartitionToNode
|
train
|
public static Node addPartitionToNode(final Node node, Integer donatedPartition) {
return UpdateClusterUtils.addPartitionsToNode(node, Sets.newHashSet(donatedPartition));
}
|
java
|
{
"resource": ""
}
|
q4498
|
UpdateClusterUtils.removePartitionFromNode
|
train
|
public static Node removePartitionFromNode(final Node node, Integer donatedPartition) {
return UpdateClusterUtils.removePartitionsFromNode(node, Sets.newHashSet(donatedPartition));
}
|
java
|
{
"resource": ""
}
|
q4499
|
UpdateClusterUtils.addPartitionsToNode
|
train
|
public static Node addPartitionsToNode(final Node node, final Set<Integer> donatedPartitions) {
List<Integer> deepCopy = new ArrayList<Integer>(node.getPartitionIds());
deepCopy.addAll(donatedPartitions);
Collections.sort(deepCopy);
return updateNode(node, deepCopy);
}
|
java
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.