Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
113 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166e_ForkJoinPool.java
|
4,483 |
public class RecoveryTarget extends AbstractComponent {
public static class Actions {
public static final String FILES_INFO = "index/shard/recovery/filesInfo";
public static final String FILE_CHUNK = "index/shard/recovery/fileChunk";
public static final String CLEAN_FILES = "index/shard/recovery/cleanFiles";
public static final String TRANSLOG_OPS = "index/shard/recovery/translogOps";
public static final String PREPARE_TRANSLOG = "index/shard/recovery/prepareTranslog";
public static final String FINALIZE = "index/shard/recovery/finalize";
}
private final ThreadPool threadPool;
private final TransportService transportService;
private final IndicesService indicesService;
private final RecoverySettings recoverySettings;
private final ConcurrentMapLong<RecoveryStatus> onGoingRecoveries = ConcurrentCollections.newConcurrentMapLong();
@Inject
public RecoveryTarget(Settings settings, ThreadPool threadPool, TransportService transportService, IndicesService indicesService,
IndicesLifecycle indicesLifecycle, RecoverySettings recoverySettings) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
this.indicesService = indicesService;
this.recoverySettings = recoverySettings;
transportService.registerHandler(Actions.FILES_INFO, new FilesInfoRequestHandler());
transportService.registerHandler(Actions.FILE_CHUNK, new FileChunkTransportRequestHandler());
transportService.registerHandler(Actions.CLEAN_FILES, new CleanFilesRequestHandler());
transportService.registerHandler(Actions.PREPARE_TRANSLOG, new PrepareForTranslogOperationsRequestHandler());
transportService.registerHandler(Actions.TRANSLOG_OPS, new TranslogOperationsRequestHandler());
transportService.registerHandler(Actions.FINALIZE, new FinalizeRecoveryRequestHandler());
indicesLifecycle.addListener(new IndicesLifecycle.Listener() {
@Override
public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard) {
if (indexShard != null) {
removeAndCleanOnGoingRecovery(findRecoveryByShard(indexShard));
}
}
});
}
public RecoveryStatus peerRecoveryStatus(ShardId shardId) {
RecoveryStatus peerRecoveryStatus = findRecoveryByShardId(shardId);
if (peerRecoveryStatus == null) {
return null;
}
// update how long it takes if we are still recovering...
if (peerRecoveryStatus.startTime > 0 && peerRecoveryStatus.stage != RecoveryStatus.Stage.DONE) {
peerRecoveryStatus.time = System.currentTimeMillis() - peerRecoveryStatus.startTime;
}
return peerRecoveryStatus;
}
public void cancelRecovery(IndexShard indexShard) {
RecoveryStatus recoveryStatus = findRecoveryByShard(indexShard);
// it might be if the recovery source got canceled first
if (recoveryStatus == null) {
return;
}
if (recoveryStatus.sentCanceledToSource) {
return;
}
recoveryStatus.cancel();
try {
if (recoveryStatus.recoveryThread != null) {
recoveryStatus.recoveryThread.interrupt();
}
// give it a grace period of actually getting the sent ack part
final long sleepTime = 100;
final long maxSleepTime = 10000;
long rounds = Math.round(maxSleepTime / sleepTime);
while (!recoveryStatus.sentCanceledToSource && rounds > 0) {
rounds--;
try {
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break; // interrupted - step out!
}
}
} finally {
removeAndCleanOnGoingRecovery(recoveryStatus);
}
}
public void startRecovery(final StartRecoveryRequest request, final InternalIndexShard indexShard, final RecoveryListener listener) {
try {
indexShard.recovering("from " + request.sourceNode());
} catch (IllegalIndexShardStateException e) {
// that's fine, since we might be called concurrently, just ignore this, we are already recovering
listener.onIgnoreRecovery(false, "already in recovering process, " + e.getMessage());
return;
}
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
// create a new recovery status, and process...
RecoveryStatus recoveryStatus = new RecoveryStatus(request.recoveryId(), indexShard);
onGoingRecoveries.put(recoveryStatus.recoveryId, recoveryStatus);
doRecovery(request, recoveryStatus, listener);
}
});
}
public void retryRecovery(final StartRecoveryRequest request, final RecoveryStatus status, final RecoveryListener listener) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
doRecovery(request, status, listener);
}
});
}
private void doRecovery(final StartRecoveryRequest request, final RecoveryStatus recoveryStatus, final RecoveryListener listener) {
if (request.sourceNode() == null) {
listener.onIgnoreRecovery(false, "No node to recover from, retry on next cluster state update");
return;
}
final InternalIndexShard shard = recoveryStatus.indexShard;
if (shard == null) {
listener.onIgnoreRecovery(false, "shard missing locally, stop recovery");
return;
}
if (shard.state() == IndexShardState.CLOSED) {
listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
return;
}
if (recoveryStatus.isCanceled()) {
// don't remove it, the cancellation code will remove it...
listener.onIgnoreRecovery(false, "canceled recovery");
return;
}
recoveryStatus.recoveryThread = Thread.currentThread();
try {
logger.trace("[{}][{}] starting recovery from {}", request.shardId().index().name(), request.shardId().id(), request.sourceNode());
StopWatch stopWatch = new StopWatch().start();
RecoveryResponse recoveryResponse = transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {
@Override
public RecoveryResponse newInstance() {
return new RecoveryResponse();
}
}).txGet();
if (shard.state() == IndexShardState.CLOSED) {
removeAndCleanOnGoingRecovery(recoveryStatus);
listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
return;
}
stopWatch.stop();
if (logger.isDebugEnabled()) {
logger.debug("recovery completed from [{}], took [{}]", request.shardId(), request.sourceNode(), stopWatch.totalTime());
} else if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append('[').append(request.shardId().index().name()).append(']').append('[').append(request.shardId().id()).append("] ");
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(stopWatch.totalTime()).append("]\n");
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]")
.append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']')
.append("\n");
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations")
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
.append("\n");
sb.append(" phase3: recovered [").append(recoveryResponse.phase3Operations).append("]").append(" transaction log operations")
.append(", took [").append(timeValueMillis(recoveryResponse.phase3Time)).append("]");
logger.trace(sb.toString());
}
removeAndCleanOnGoingRecovery(recoveryStatus);
listener.onRecoveryDone();
} catch (Throwable e) {
// logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().index().name(), request.shardId().id());
if (recoveryStatus.isCanceled()) {
// don't remove it, the cancellation code will remove it...
listener.onIgnoreRecovery(false, "canceled recovery");
return;
}
if (shard.state() == IndexShardState.CLOSED) {
removeAndCleanOnGoingRecovery(recoveryStatus);
listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
return;
}
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// do it twice, in case we have double transport exception
cause = ExceptionsHelper.unwrapCause(cause);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// here, we would add checks against exception that need to be retried (and not removeAndClean in this case)
if (cause instanceof IndexShardNotStartedException || cause instanceof IndexMissingException || cause instanceof IndexShardMissingException) {
// if the target is not ready yet, retry
listener.onRetryRecovery(TimeValue.timeValueMillis(500), recoveryStatus);
return;
}
if (cause instanceof DelayRecoveryException) {
listener.onRetryRecovery(TimeValue.timeValueMillis(500), recoveryStatus);
return;
}
// here, we check against ignore recovery options
// in general, no need to clean the shard on ignored recovery, since we want to try and reuse it later
// it will get deleted in the IndicesStore if all are allocated and no shard exists on this node...
removeAndCleanOnGoingRecovery(recoveryStatus);
if (cause instanceof ConnectTransportException) {
listener.onIgnoreRecovery(true, "source node disconnected (" + request.sourceNode() + ")");
return;
}
if (cause instanceof IndexShardClosedException) {
listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")");
return;
}
if (cause instanceof AlreadyClosedException) {
listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")");
return;
}
logger.trace("[{}][{}] recovery from [{}] failed", e, request.shardId().index().name(), request.shardId().id(), request.sourceNode());
listener.onRecoveryFailure(new RecoveryFailedException(request, e), true);
}
}
public static interface RecoveryListener {
void onRecoveryDone();
void onRetryRecovery(TimeValue retryAfter, RecoveryStatus status);
void onIgnoreRecovery(boolean removeShard, String reason);
void onRecoveryFailure(RecoveryFailedException e, boolean sendShardFailure);
}
@Nullable
private RecoveryStatus findRecoveryByShardId(ShardId shardId) {
for (RecoveryStatus recoveryStatus : onGoingRecoveries.values()) {
if (recoveryStatus.shardId.equals(shardId)) {
return recoveryStatus;
}
}
return null;
}
@Nullable
private RecoveryStatus findRecoveryByShard(IndexShard indexShard) {
for (RecoveryStatus recoveryStatus : onGoingRecoveries.values()) {
if (recoveryStatus.indexShard == indexShard) {
return recoveryStatus;
}
}
return null;
}
private void removeAndCleanOnGoingRecovery(@Nullable RecoveryStatus status) {
if (status == null) {
return;
}
// clean it from the on going recoveries since it is being closed
status = onGoingRecoveries.remove(status.recoveryId);
if (status == null) {
return;
}
// just mark it as canceled as well, just in case there are in flight requests
// coming from the recovery target
status.cancel();
// clean open index outputs
Set<Entry<String, IndexOutput>> entrySet = status.cancleAndClearOpenIndexInputs();
Iterator<Entry<String, IndexOutput>> iterator = entrySet.iterator();
while (iterator.hasNext()) {
Map.Entry<String, IndexOutput> entry = iterator.next();
synchronized (entry.getValue()) {
IOUtils.closeWhileHandlingException(entry.getValue());
}
iterator.remove();
}
status.checksums = null;
}
class PrepareForTranslogOperationsRequestHandler extends BaseTransportRequestHandler<RecoveryPrepareForTranslogOperationsRequest> {
@Override
public RecoveryPrepareForTranslogOperationsRequest newInstance() {
return new RecoveryPrepareForTranslogOperationsRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
onGoingRecovery.stage = RecoveryStatus.Stage.TRANSLOG;
onGoingRecovery.indexShard.performRecoveryPrepareForTranslog();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class FinalizeRecoveryRequestHandler extends BaseTransportRequestHandler<RecoveryFinalizeRecoveryRequest> {
@Override
public RecoveryFinalizeRecoveryRequest newInstance() {
return new RecoveryFinalizeRecoveryRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
onGoingRecovery.stage = RecoveryStatus.Stage.FINALIZE;
onGoingRecovery.indexShard.performRecoveryFinalization(false, onGoingRecovery);
onGoingRecovery.time = System.currentTimeMillis() - onGoingRecovery.startTime;
onGoingRecovery.stage = RecoveryStatus.Stage.DONE;
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class TranslogOperationsRequestHandler extends BaseTransportRequestHandler<RecoveryTranslogOperationsRequest> {
@Override
public RecoveryTranslogOperationsRequest newInstance() {
return new RecoveryTranslogOperationsRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryTranslogOperationsRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
for (Translog.Operation operation : request.operations()) {
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
shard.performRecoveryOperation(operation);
onGoingRecovery.currentTranslogOperations++;
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class FilesInfoRequestHandler extends BaseTransportRequestHandler<RecoveryFilesInfoRequest> {
@Override
public RecoveryFilesInfoRequest newInstance() {
return new RecoveryFilesInfoRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
onGoingRecovery.phase1FileNames = request.phase1FileNames;
onGoingRecovery.phase1FileSizes = request.phase1FileSizes;
onGoingRecovery.phase1ExistingFileNames = request.phase1ExistingFileNames;
onGoingRecovery.phase1ExistingFileSizes = request.phase1ExistingFileSizes;
onGoingRecovery.phase1TotalSize = request.phase1TotalSize;
onGoingRecovery.phase1ExistingTotalSize = request.phase1ExistingTotalSize;
onGoingRecovery.stage = RecoveryStatus.Stage.INDEX;
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class CleanFilesRequestHandler extends BaseTransportRequestHandler<RecoveryCleanFilesRequest> {
@Override
public RecoveryCleanFilesRequest newInstance() {
return new RecoveryCleanFilesRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
Store store = onGoingRecovery.indexShard.store();
// first, we go and move files that were created with the recovery id suffix to
// the actual names, its ok if we have a corrupted index here, since we have replicas
// to recover from in case of a full cluster shutdown just when this code executes...
String prefix = "recovery." + onGoingRecovery.startTime + ".";
Set<String> filesToRename = Sets.newHashSet();
for (String existingFile : store.directory().listAll()) {
if (existingFile.startsWith(prefix)) {
filesToRename.add(existingFile.substring(prefix.length(), existingFile.length()));
}
}
Exception failureToRename = null;
if (!filesToRename.isEmpty()) {
// first, go and delete the existing ones
final Directory directory = store.directory();
for (String file : filesToRename) {
try {
directory.deleteFile(file);
} catch (Throwable ex) {
logger.debug("failed to delete file [{}]", ex, file);
}
}
for (String fileToRename : filesToRename) {
// now, rename the files... and fail it it won't work
store.renameFile(prefix + fileToRename, fileToRename);
}
}
// now write checksums
store.writeChecksums(onGoingRecovery.checksums);
for (String existingFile : store.directory().listAll()) {
// don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
if (!request.snapshotFiles().contains(existingFile) && !Store.isChecksum(existingFile)) {
try {
store.directory().deleteFile(existingFile);
} catch (Exception e) {
// ignore, we don't really care, will get deleted later on
}
}
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class FileChunkTransportRequestHandler extends BaseTransportRequestHandler<RecoveryFileChunkRequest> {
@Override
public RecoveryFileChunkRequest newInstance() {
return new RecoveryFileChunkRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
Store store = onGoingRecovery.indexShard.store();
IndexOutput indexOutput;
if (request.position() == 0) {
// first request
onGoingRecovery.checksums.remove(request.name());
indexOutput = onGoingRecovery.removeOpenIndexOutputs(request.name());
IOUtils.closeWhileHandlingException(indexOutput);
// we create an output with no checksum, this is because the pure binary data of the file is not
// the checksum (because of seek). We will create the checksum file once copying is done
// also, we check if the file already exists, if it does, we create a file name based
// on the current recovery "id" and later we make the switch, the reason for that is that
// we only want to overwrite the index files once we copied all over, and not create a
// case where the index is half moved
String fileName = request.name();
if (store.directory().fileExists(fileName)) {
fileName = "recovery." + onGoingRecovery.startTime + "." + fileName;
}
indexOutput = onGoingRecovery.openAndPutIndexOutput(request.name(), fileName, store);
} else {
indexOutput = onGoingRecovery.getOpenIndexOutput(request.name());
}
if (indexOutput == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
boolean success = false;
synchronized (indexOutput) {
try {
if (recoverySettings.rateLimiter() != null) {
recoverySettings.rateLimiter().pause(request.content().length());
}
BytesReference content = request.content();
if (!content.hasArray()) {
content = content.toBytesArray();
}
indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
onGoingRecovery.currentFilesSize.addAndGet(request.length());
if (indexOutput.getFilePointer() == request.length()) {
// we are done
indexOutput.close();
// write the checksum
if (request.checksum() != null) {
onGoingRecovery.checksums.put(request.name(), request.checksum());
}
store.directory().sync(Collections.singleton(request.name()));
IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
assert remove == indexOutput;
}
success = true;
} finally {
if (!success || onGoingRecovery.isCanceled()) {
IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
assert remove == indexOutput;
IOUtils.closeWhileHandlingException(indexOutput);
}
}
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_recovery_RecoveryTarget.java
|
423 |
public class TransportRestoreSnapshotAction extends TransportMasterNodeOperationAction<RestoreSnapshotRequest, RestoreSnapshotResponse> {
private final RestoreService restoreService;
@Inject
public TransportRestoreSnapshotAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, RestoreService restoreService) {
super(settings, transportService, clusterService, threadPool);
this.restoreService = restoreService;
}
@Override
protected String executor() {
return ThreadPool.Names.SNAPSHOT;
}
@Override
protected String transportAction() {
return RestoreSnapshotAction.NAME;
}
@Override
protected RestoreSnapshotRequest newRequest() {
return new RestoreSnapshotRequest();
}
@Override
protected RestoreSnapshotResponse newResponse() {
return new RestoreSnapshotResponse();
}
@Override
protected ClusterBlockException checkBlock(RestoreSnapshotRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
}
@Override
protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) throws ElasticsearchException {
RestoreService.RestoreRequest restoreRequest =
new RestoreService.RestoreRequest("restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot())
.indices(request.indices())
.indicesOptions(request.indicesOptions())
.renamePattern(request.renamePattern())
.renameReplacement(request.renameReplacement())
.includeGlobalState(request.includeGlobalState())
.settings(request.settings())
.masterNodeTimeout(request.masterNodeTimeout());
restoreService.restoreSnapshot(restoreRequest, new RestoreSnapshotListener() {
@Override
public void onResponse(RestoreInfo restoreInfo) {
if (restoreInfo == null) {
if (request.waitForCompletion()) {
restoreService.addListener(new RestoreService.RestoreCompletionListener() {
SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
@Override
public void onRestoreCompletion(SnapshotId snapshotId, RestoreInfo snapshot) {
if (this.snapshotId.equals(snapshotId)) {
listener.onResponse(new RestoreSnapshotResponse(snapshot));
restoreService.removeListener(this);
}
}
});
} else {
listener.onResponse(new RestoreSnapshotResponse(null));
}
} else {
listener.onResponse(new RestoreSnapshotResponse(restoreInfo));
}
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_TransportRestoreSnapshotAction.java
|
2,165 |
class FixedBitSetIterator extends FilteredDocIdSetIterator {
FixedBitSetIterator(DocIdSetIterator innerIter) {
super(innerIter);
}
@Override
protected boolean match(int doc) {
return matchDoc(doc);
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_docset_MatchDocIdSet.java
|
82 |
@SuppressWarnings("serial")
static final class MapReduceValuesToDoubleTask<K,V>
extends BulkTask<K,V,Double> {
final ObjectToDouble<? super V> transformer;
final DoubleByDoubleToDouble reducer;
final double basis;
double result;
MapReduceValuesToDoubleTask<K,V> rights, nextRight;
MapReduceValuesToDoubleTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceValuesToDoubleTask<K,V> nextRight,
ObjectToDouble<? super V> transformer,
double basis,
DoubleByDoubleToDouble reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Double getRawResult() { return result; }
public final void compute() {
final ObjectToDouble<? super V> transformer;
final DoubleByDoubleToDouble reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
double r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceValuesToDoubleTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V>
t = (MapReduceValuesToDoubleTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
65 |
public interface TitanIndexQuery {
/**
* Specifies the maxium number of elements to return
*
* @param limit
* @return
*/
public TitanIndexQuery limit(int limit);
/**
* Specifies the offset of the query. Query results will be retrieved starting at the given offset.
* @param offset
* @return
*/
public TitanIndexQuery offset(int offset);
/**
* Adds the given parameter to the list of parameters of this query.
* Parameters are passed right through to the indexing backend to modify the query behavior.
* @param para
* @return
*/
public TitanIndexQuery addParameter(Parameter para);
/**
* Adds the given parameters to the list of parameters of this query.
* Parameters are passed right through to the indexing backend to modify the query behavior.
* @param paras
* @return
*/
public TitanIndexQuery addParameters(Iterable<Parameter> paras);
/**
* Adds the given parameters to the list of parameters of this query.
* Parameters are passed right through to the indexing backend to modify the query behavior.
* @param paras
* @return
*/
public TitanIndexQuery addParameters(Parameter... paras);
/**
* Sets the element identifier string that is used by this query builder as the token to identifier key references
* in the query string.
* <p/>
* For example, in the query 'v.name: Tom' the element identifier is 'v.'
*
*
* @param identifier The element identifier which must not be blank
* @return This query builder
*/
public TitanIndexQuery setElementIdentifier(String identifier);
/**
* Returns all vertices that match the query in the indexing backend.
*
* @return
*/
public Iterable<Result<Vertex>> vertices();
/**
* Returns all edges that match the query in the indexing backend.
*
* @return
*/
public Iterable<Result<Edge>> edges();
/**
* Returns all properties that match the query in the indexing backend.
*
* @return
*/
public Iterable<Result<TitanProperty>> properties();
/**
* Container of a query result with its score.
* @param <V>
*/
public interface Result<V extends Element> {
/**
* Returns the element that matches the query
*
* @return
*/
public V getElement();
/**
* Returns the score of the result with respect to the query (if available)
* @return
*/
public double getScore();
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanIndexQuery.java
|
87 |
public class ODFACommandStream implements OCommandStream {
public static final int BUFFER_SIZE = 1024;
private Reader reader;
private CharBuffer buffer;
private final Set<Character> separators = new HashSet<Character>(Arrays.asList(';', '\n'));
private int position;
private int start;
private int end;
private StringBuilder partialResult;
private State state;
public ODFACommandStream(String commands) {
reader = new StringReader(commands);
init();
}
public ODFACommandStream(File file) throws FileNotFoundException {
reader = new BufferedReader(new FileReader(file));
init();
}
private void init() {
buffer = CharBuffer.allocate(BUFFER_SIZE);
buffer.flip();
}
@Override
public boolean hasNext() {
try {
fillBuffer();
return buffer.hasRemaining();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void fillBuffer() throws IOException {
if (!buffer.hasRemaining()) {
buffer.clear();
reader.read(buffer);
buffer.flip();
}
}
@Override
public String nextCommand() {
try {
fillBuffer();
partialResult = new StringBuilder();
state = State.S;
start = 0;
end = -1;
position = 0;
Symbol s = null;
while (state != State.E) {
s = nextSymbol();
final State newState = transition(state, s);
if (state == State.S && newState != State.S)
start = position;
if (newState == State.A)
end = position;
if (newState == State.F)
throw new IllegalStateException("Unexpected end of file");
state = newState;
position++;
}
if (s == Symbol.EOF) {
position--;
if (end == -1) {
start = 0;
end = 0;
}
}
final String result;
if (partialResult.length() > 0) {
if (end > 0) {
result = partialResult.append(buffer.subSequence(start, end + 1).toString()).toString();
} else {
partialResult.setLength(partialResult.length() + end + 1);
result = partialResult.toString();
}
} else {
result = buffer.subSequence(start, end + 1).toString();
}
buffer.position(buffer.position() + position);
return result;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private Symbol nextSymbol() throws IOException {
Symbol s;
if (buffer.position() + position < buffer.limit()) {
s = symbol(buffer.charAt(position));
} else {
buffer.compact();
int read = reader.read(buffer);
buffer.flip();
if (read == 0) {
// There is something in source, but buffer is full
if (state != State.S)
partialResult.append(buffer.subSequence(start, position).toString());
start = 0;
end = end - position;
buffer.clear();
read = reader.read(buffer);
buffer.flip();
position = 0;
}
if (read == -1) {
s = Symbol.EOF;
} else {
s = symbol(buffer.charAt(position));
}
}
return s;
}
private State transition(State s, Symbol c) {
switch (s) {
case S:
switch (c) {
case LATTER:
return State.A;
case WS:
return State.S;
case AP:
return State.B;
case QT:
return State.C;
case SEP:
return State.S;
case EOF:
return State.E;
}
break;
case A:
case D:
switch (c) {
case LATTER:
return State.A;
case WS:
return State.D;
case AP:
return State.B;
case QT:
return State.C;
case SEP:
return State.E;
case EOF:
return State.E;
}
break;
case B:
switch (c) {
case LATTER:
return State.B;
case WS:
return State.B;
case AP:
return State.A;
case QT:
return State.B;
case SEP:
return State.B;
case EOF:
return State.F;
}
break;
case C:
switch (c) {
case LATTER:
return State.C;
case WS:
return State.C;
case AP:
return State.C;
case QT:
return State.A;
case SEP:
return State.C;
case EOF:
return State.F;
}
break;
case E:
return State.E;
case F:
return State.F;
}
throw new IllegalStateException();
}
@Override
public void close() {
try {
reader.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public Symbol symbol(Character c) {
if (c.equals('\''))
return Symbol.AP;
if (c.equals('"'))
return Symbol.QT;
if (separators.contains(c))
return Symbol.SEP;
if (Character.isWhitespace(c))
return Symbol.WS;
return Symbol.LATTER;
}
private enum State {
S, A, B, C, D, E, F
}
private enum Symbol {
LATTER, WS, QT, AP, SEP, EOF
}
}
| 1no label
|
commons_src_main_java_com_orientechnologies_common_console_ODFACommandStream.java
|
5,877 |
public class QueryParseElement implements SearchParseElement {
@Override
public void parse(XContentParser parser, SearchContext context) throws Exception {
context.parsedQuery(context.queryParserService().parse(parser));
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_query_QueryParseElement.java
|
857 |
@SuppressWarnings("unchecked")
public class ORole extends ODocumentWrapper {
private static final long serialVersionUID = 1L;
public static final String ADMIN = "admin";
public static final String CLASS_NAME = "ORole";
public enum ALLOW_MODES {
DENY_ALL_BUT, ALLOW_ALL_BUT
}
// CRUD OPERATIONS
private static Map<Integer, String> PERMISSION_BIT_NAMES;
public final static int PERMISSION_NONE = 0;
public final static int PERMISSION_CREATE = registerPermissionBit(0, "Create");
public final static int PERMISSION_READ = registerPermissionBit(1, "Read");
public final static int PERMISSION_UPDATE = registerPermissionBit(2, "Update");
public final static int PERMISSION_DELETE = registerPermissionBit(3, "Delete");
public final static int PERMISSION_ALL = PERMISSION_CREATE + PERMISSION_READ + PERMISSION_UPDATE
+ PERMISSION_DELETE;
protected final static byte STREAM_DENY = 0;
protected final static byte STREAM_ALLOW = 1;
protected ALLOW_MODES mode = ALLOW_MODES.DENY_ALL_BUT;
protected ORole parentRole;
protected Map<String, Byte> rules = new LinkedHashMap<String, Byte>();
/**
* Constructor used in unmarshalling.
*/
public ORole() {
}
public ORole(final String iName, final ORole iParent, final ALLOW_MODES iAllowMode) {
super(CLASS_NAME);
document.field("name", iName);
parentRole = iParent;
document.field("inheritedRole", iParent != null ? iParent.getDocument() : null);
setMode(iAllowMode);
document.field("rules", new HashMap<String, Number>());
}
/**
* Create the role by reading the source document.
*/
public ORole(final ODocument iSource) {
fromStream(iSource);
}
@Override
@OBeforeDeserialization
public void fromStream(final ODocument iSource) {
if (document != null)
return;
document = iSource;
try {
mode = ((Number) document.field("mode")).byteValue() == STREAM_ALLOW ? ALLOW_MODES.ALLOW_ALL_BUT : ALLOW_MODES.DENY_ALL_BUT;
} catch (Exception ex) {
OLogManager.instance().error(this, "illegal mode " + ex.getMessage());
mode = ALLOW_MODES.DENY_ALL_BUT;
}
final OIdentifiable role = document.field("inheritedRole");
parentRole = role != null ? document.getDatabase().getMetadata().getSecurity().getRole(role) : null;
final Map<String, Number> storedRules = document.field("rules");
if (storedRules != null)
for (Entry<String, Number> a : storedRules.entrySet()) {
rules.put(a.getKey().toLowerCase(), a.getValue().byteValue());
}
if (getName().equals("admin") && !hasRule(ODatabaseSecurityResources.BYPASS_RESTRICTED))
// FIX 1.5.1 TO ASSIGN database.bypassRestricted rule to the role
addRule(ODatabaseSecurityResources.BYPASS_RESTRICTED, ORole.PERMISSION_ALL).save();
}
public boolean allow(final String iResource, final int iCRUDOperation) {
// CHECK FOR SECURITY AS DIRECT RESOURCE
final Byte access = rules.get(iResource);
if (access != null) {
final byte mask = (byte) iCRUDOperation;
return (access.byteValue() & mask) == mask;
} else if (parentRole != null)
// DELEGATE TO THE PARENT ROLE IF ANY
return parentRole.allow(iResource, iCRUDOperation);
return mode == ALLOW_MODES.ALLOW_ALL_BUT;
}
public boolean hasRule(final String iResource) {
return rules.containsKey(iResource.toLowerCase());
}
public ORole addRule(final String iResource, final int iOperation) {
rules.put(iResource.toLowerCase(), (byte) iOperation);
document.field("rules", rules);
return this;
}
/**
* Grant a permission to the resource.
*
* @param iResource
* Requested resource
* @param iOperation
* Permission to grant/add
* @return
*/
public ORole grant(final String iResource, final int iOperation) {
final Byte current = rules.get(iResource);
byte currentValue = current == null ? PERMISSION_NONE : current.byteValue();
currentValue |= (byte) iOperation;
rules.put(iResource.toLowerCase(), currentValue);
document.field("rules", rules);
return this;
}
/**
* Revoke a permission to the resource.
*
* @param iResource
* Requested resource
* @param iOperation
* Permission to grant/remove
*/
public ORole revoke(final String iResource, final int iOperation) {
if (iOperation == PERMISSION_NONE)
return this;
final Byte current = rules.get(iResource);
byte currentValue;
if (current == null)
currentValue = PERMISSION_NONE;
else {
currentValue = current.byteValue();
currentValue &= ~(byte) iOperation;
}
rules.put(iResource.toLowerCase(), currentValue);
document.field("rules", rules);
return this;
}
public String getName() {
return document.field("name");
}
public ALLOW_MODES getMode() {
return mode;
}
public ORole setMode(final ALLOW_MODES iMode) {
this.mode = iMode;
document.field("mode", mode == ALLOW_MODES.ALLOW_ALL_BUT ? STREAM_ALLOW : STREAM_DENY);
return this;
}
public ORole getParentRole() {
return parentRole;
}
public ORole setParentRole(final ORole iParent) {
this.parentRole = iParent;
document.field("inheritedRole", parentRole != null ? parentRole.getDocument() : null);
return this;
}
@Override
public ORole save() {
document.save(ORole.class.getSimpleName());
return this;
}
public Map<String, Byte> getRules() {
return Collections.unmodifiableMap(rules);
}
@Override
public String toString() {
return getName();
}
/**
* Convert the permission code to a readable string.
*
* @param iPermission
* Permission to convert
* @return String representation of the permission
*/
public static String permissionToString(final int iPermission) {
int permission = iPermission;
final StringBuilder returnValue = new StringBuilder();
for (Entry<Integer, String> p : PERMISSION_BIT_NAMES.entrySet()) {
if ((permission & p.getKey()) == p.getKey()) {
if (returnValue.length() > 0)
returnValue.append(", ");
returnValue.append(p.getValue());
permission &= ~p.getKey();
}
}
if (permission != 0) {
if (returnValue.length() > 0)
returnValue.append(", ");
returnValue.append("Unknown 0x");
returnValue.append(Integer.toHexString(permission));
}
return returnValue.toString();
}
public static int registerPermissionBit(final int iBitNo, final String iName) {
if (iBitNo < 0 || iBitNo > 31)
throw new IndexOutOfBoundsException("Permission bit number must be positive and less than 32");
final int value = 1 << iBitNo;
if (PERMISSION_BIT_NAMES == null)
PERMISSION_BIT_NAMES = new HashMap<Integer, String>();
if (PERMISSION_BIT_NAMES.containsKey(value))
throw new IndexOutOfBoundsException("Permission bit number " + String.valueOf(iBitNo) + " already in use");
PERMISSION_BIT_NAMES.put(value, iName);
return value;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_metadata_security_ORole.java
|
66 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_FLD_GROUP")
@Cache(usage= CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blCMSElements")
public class FieldGroupImpl implements FieldGroup {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "FieldGroupId")
@GenericGenerator(
name="FieldGroupId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="FieldGroupImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.field.domain.FieldGroupImpl")
}
)
@Column(name = "FLD_GROUP_ID")
protected Long id;
@Column (name = "NAME")
protected String name;
@Column (name = "INIT_COLLAPSED_FLAG")
protected Boolean initCollapsedFlag = false;
@OneToMany(mappedBy = "fieldGroup", targetEntity = FieldDefinitionImpl.class, cascade = {CascadeType.ALL})
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blCMSElements")
@OrderBy("fieldOrder")
@BatchSize(size = 20)
protected List<FieldDefinition> fieldDefinitions;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public Boolean getInitCollapsedFlag() {
return initCollapsedFlag;
}
@Override
public void setInitCollapsedFlag(Boolean initCollapsedFlag) {
this.initCollapsedFlag = initCollapsedFlag;
}
@Override
public List<FieldDefinition> getFieldDefinitions() {
return fieldDefinitions;
}
@Override
public void setFieldDefinitions(List<FieldDefinition> fieldDefinitions) {
this.fieldDefinitions = fieldDefinitions;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_field_domain_FieldGroupImpl.java
|
251 |
public class BufferedChecksumIndexOutput extends BufferedIndexOutput {
private final IndexOutput delegate;
private final BufferedIndexOutput bufferedDelegate;
private final Checksum digest;
public BufferedChecksumIndexOutput(IndexOutput delegate, Checksum digest) {
super(delegate instanceof BufferedIndexOutput ? ((BufferedIndexOutput) delegate).getBufferSize() : BufferedIndexOutput.DEFAULT_BUFFER_SIZE);
if (delegate instanceof BufferedIndexOutput) {
bufferedDelegate = (BufferedIndexOutput) delegate;
this.delegate = delegate;
} else {
this.delegate = delegate;
bufferedDelegate = null;
}
this.digest = digest;
}
public Checksum digest() {
return digest;
}
public IndexOutput underlying() {
return this.delegate;
}
// don't override it, base class method simple reads from input and writes to this output
// @Override public void copyBytes(IndexInput input, long numBytes) throws IOException {
// delegate.copyBytes(input, numBytes);
// }
@Override
public void close() throws IOException {
try {
super.close();
} finally {
delegate.close();
}
}
@Override
protected void flushBuffer(byte[] b, int offset, int len) throws IOException {
if (bufferedDelegate != null) {
bufferedDelegate.flushBuffer(b, offset, len);
} else {
delegate.writeBytes(b, offset, len);
}
digest.update(b, offset, len);
}
// don't override it, base class method simple reads from input and writes to this output
// @Override public void copyBytes(IndexInput input, long numBytes) throws IOException {
// delegate.copyBytes(input, numBytes);
// }
@Override
public void flush() throws IOException {
try {
super.flush();
} finally {
delegate.flush();
}
}
@Override
public void seek(long pos) throws IOException {
// seek might be called on files, which means that the checksum is not file checksum
// but a checksum of the bytes written to this stream, which is the same for each
// type of file in lucene
super.seek(pos);
delegate.seek(pos);
}
@Override
public long length() throws IOException {
return delegate.length();
}
@Override
public void setLength(long length) throws IOException {
delegate.setLength(length);
}
@Override
public String toString() {
return delegate.toString();
}
}
| 0true
|
src_main_java_org_apache_lucene_store_BufferedChecksumIndexOutput.java
|
581 |
class ShardOptimizeResponse extends BroadcastShardOperationResponse {
ShardOptimizeResponse() {
}
public ShardOptimizeResponse(String index, int shardId) {
super(index, shardId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_optimize_ShardOptimizeResponse.java
|
204 |
public abstract class AbstractHydratedCacheManager implements CacheEventListener, HydratedCacheManager, HydratedAnnotationManager {
private static final Log LOG = LogFactory.getLog(AbstractHydratedCacheManager.class);
private Map<String, HydrationDescriptor> hydrationDescriptors = Collections.synchronizedMap(new HashMap(100));
@Override
public HydrationDescriptor getHydrationDescriptor(Object entity) {
if (hydrationDescriptors.containsKey(entity.getClass().getName())) {
return hydrationDescriptors.get(entity.getClass().getName());
}
HydrationDescriptor descriptor = new HydrationDescriptor();
Class<?> topEntityClass = getTopEntityClass(entity);
HydrationScanner scanner = new HydrationScanner(topEntityClass, entity.getClass());
scanner.init();
descriptor.setHydratedMutators(scanner.getCacheMutators());
Map<String, Method[]> mutators = scanner.getIdMutators();
if (mutators.size() != 1) {
throw new RuntimeException("Broadleaf Commerce Hydrated Cache currently only supports entities with a single @Id annotation.");
}
Method[] singleMutators = mutators.values().iterator().next();
descriptor.setIdMutators(singleMutators);
String cacheRegion = scanner.getCacheRegion();
if (cacheRegion == null || "".equals(cacheRegion)) {
cacheRegion = topEntityClass.getName();
}
descriptor.setCacheRegion(cacheRegion);
hydrationDescriptors.put(entity.getClass().getName(), descriptor);
return descriptor;
}
protected Class<?> getTopEntityClass(Object entity) {
Class<?> myClass = entity.getClass();
Class<?> superClass = entity.getClass().getSuperclass();
while (superClass != null && superClass.getName().startsWith("org.broadleaf")) {
myClass = superClass;
superClass = superClass.getSuperclass();
}
return myClass;
}
@Override
public void dispose() {
if (LOG.isInfoEnabled()) {
LOG.info("Disposing of all hydrated cache members");
}
hydrationDescriptors.clear();
}
@Override
public Object clone() throws CloneNotSupportedException {
return this;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_cache_engine_AbstractHydratedCacheManager.java
|
3,741 |
public class WanReplicationServiceImpl
implements WanReplicationService {
private final Node node;
private final ILogger logger;
private final Map<String, WanReplicationPublisherDelegate> wanReplications = initializeWanReplicationPublisherMapping();
public WanReplicationServiceImpl(Node node) {
this.node = node;
this.logger = node.getLogger(WanReplicationServiceImpl.class.getName());
}
@Override
@SuppressWarnings("SynchronizeOnThis")
public WanReplicationPublisher getWanReplicationPublisher(String name) {
WanReplicationPublisherDelegate wr = wanReplications.get(name);
if (wr != null) {
return wr;
}
synchronized (this) {
wr = wanReplications.get(name);
if (wr != null) {
return wr;
}
WanReplicationConfig wanReplicationConfig = node.getConfig().getWanReplicationConfig(name);
if (wanReplicationConfig == null) {
return null;
}
List<WanTargetClusterConfig> targets = wanReplicationConfig.getTargetClusterConfigs();
WanReplicationEndpoint[] targetEndpoints = new WanReplicationEndpoint[targets.size()];
int count = 0;
for (WanTargetClusterConfig targetClusterConfig : targets) {
WanReplicationEndpoint target;
if (targetClusterConfig.getReplicationImpl() != null) {
try {
target = ClassLoaderUtil
.newInstance(node.getConfigClassLoader(), targetClusterConfig.getReplicationImpl());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
} else {
target = new WanNoDelayReplication();
}
String groupName = targetClusterConfig.getGroupName();
String password = targetClusterConfig.getGroupPassword();
String[] addresses = new String[targetClusterConfig.getEndpoints().size()];
targetClusterConfig.getEndpoints().toArray(addresses);
target.init(node, groupName, password, addresses);
targetEndpoints[count++] = target;
}
wr = new WanReplicationPublisherDelegate(name, targetEndpoints);
wanReplications.put(name, wr);
return wr;
}
}
@Override
public void handleEvent(final Packet packet) {
// todo execute in which thread
node.nodeEngine.getExecutionService().execute("hz:wan", new Runnable() {
@Override
public void run() {
final Data data = packet.getData();
try {
WanReplicationEvent replicationEvent = (WanReplicationEvent) node.nodeEngine.toObject(data);
String serviceName = replicationEvent.getServiceName();
ReplicationSupportingService service = node.nodeEngine.getService(serviceName);
service.onReplicationEvent(replicationEvent);
} catch (Exception e) {
logger.severe(e);
}
}
});
}
@Override
public void shutdown() {
synchronized (this) {
for (WanReplicationPublisherDelegate wanReplication : wanReplications.values()) {
WanReplicationEndpoint[] wanReplicationEndpoints = wanReplication.getEndpoints();
if (wanReplicationEndpoints != null) {
for (WanReplicationEndpoint wanReplicationEndpoint : wanReplicationEndpoints) {
if (wanReplicationEndpoint != null) {
wanReplicationEndpoint.shutdown();
}
}
}
}
wanReplications.clear();
}
}
private ConcurrentHashMap<String, WanReplicationPublisherDelegate> initializeWanReplicationPublisherMapping() {
return new ConcurrentHashMap<String, WanReplicationPublisherDelegate>(2);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_wan_impl_WanReplicationServiceImpl.java
|
670 |
public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder<DeleteWarmerRequest, DeleteWarmerResponse, DeleteWarmerRequestBuilder> {
public DeleteWarmerRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new DeleteWarmerRequest());
}
public DeleteWarmerRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers.
*/
public DeleteWarmerRequestBuilder setNames(String... names) {
request.names(names);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
public DeleteWarmerRequestBuilder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return this;
}
@Override
protected void doExecute(ActionListener<DeleteWarmerResponse> listener) {
((IndicesAdminClient) client).deleteWarmer(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_warmer_delete_DeleteWarmerRequestBuilder.java
|
1,464 |
public class IndexRoutingTable implements Iterable<IndexShardRoutingTable> {
private final String index;
// note, we assume that when the index routing is created, ShardRoutings are created for all possible number of
// shards with state set to UNASSIGNED
private final ImmutableOpenIntMap<IndexShardRoutingTable> shards;
private final ImmutableList<ShardRouting> allShards;
private final ImmutableList<ShardRouting> allActiveShards;
private final AtomicInteger counter = new AtomicInteger();
IndexRoutingTable(String index, ImmutableOpenIntMap<IndexShardRoutingTable> shards) {
this.index = index;
this.shards = shards;
ImmutableList.Builder<ShardRouting> allShards = ImmutableList.builder();
ImmutableList.Builder<ShardRouting> allActiveShards = ImmutableList.builder();
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
for (ShardRouting shardRouting : cursor.value) {
allShards.add(shardRouting);
if (shardRouting.active()) {
allActiveShards.add(shardRouting);
}
}
}
this.allShards = allShards.build();
this.allActiveShards = allActiveShards.build();
}
/**
* Return the index id
*
* @return id of the index
*/
public String index() {
return this.index;
}
/**
* Return the index id
*
* @return id of the index
*/
public String getIndex() {
return index();
}
/**
* creates a new {@link IndexRoutingTable} with all shard versions normalized
*
* @return new {@link IndexRoutingTable}
*/
public IndexRoutingTable normalizeVersions() {
IndexRoutingTable.Builder builder = new Builder(this.index);
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
builder.addIndexShard(cursor.value.normalizeVersions());
}
return builder.build();
}
public void validate(RoutingTableValidation validation, MetaData metaData) {
if (!metaData.hasIndex(index())) {
validation.addIndexFailure(index(), "Exists in routing does not exists in metadata");
return;
}
IndexMetaData indexMetaData = metaData.index(index());
for (String failure : validate(indexMetaData)) {
validation.addIndexFailure(index, failure);
}
}
/**
* validate based on a meta data, returning failures found
*/
public List<String> validate(IndexMetaData indexMetaData) {
ArrayList<String> failures = new ArrayList<String>();
// check the number of shards
if (indexMetaData.numberOfShards() != shards().size()) {
Set<Integer> expected = Sets.newHashSet();
for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
expected.add(i);
}
for (IndexShardRoutingTable indexShardRoutingTable : this) {
expected.remove(indexShardRoutingTable.shardId().id());
}
failures.add("Wrong number of shards in routing table, missing: " + expected);
}
// check the replicas
for (IndexShardRoutingTable indexShardRoutingTable : this) {
int routingNumberOfReplicas = indexShardRoutingTable.size() - 1;
if (routingNumberOfReplicas != indexMetaData.numberOfReplicas()) {
failures.add("Shard [" + indexShardRoutingTable.shardId().id()
+ "] routing table has wrong number of replicas, expected [" + indexMetaData.numberOfReplicas() + "], got [" + routingNumberOfReplicas + "]");
}
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (!shardRouting.index().equals(index())) {
failures.add("shard routing has an index [" + shardRouting.index() + "] that is different than the routing table");
}
}
}
return failures;
}
@Override
public UnmodifiableIterator<IndexShardRoutingTable> iterator() {
return shards.valuesIt();
}
/**
* Calculates the number of nodes that hold one or more shards of this index
* {@link IndexRoutingTable} excluding the nodes with the node ids give as
* the <code>excludedNodes</code> parameter.
*
* @param excludedNodes id of nodes that will be excluded
* @return number of distinct nodes this index has at least one shard allocated on
*/
public int numberOfNodesShardsAreAllocatedOn(String... excludedNodes) {
Set<String> nodes = Sets.newHashSet();
for (IndexShardRoutingTable shardRoutingTable : this) {
for (ShardRouting shardRouting : shardRoutingTable) {
if (shardRouting.assignedToNode()) {
String currentNodeId = shardRouting.currentNodeId();
boolean excluded = false;
if (excludedNodes != null) {
for (String excludedNode : excludedNodes) {
if (currentNodeId.equals(excludedNode)) {
excluded = true;
break;
}
}
}
if (!excluded) {
nodes.add(currentNodeId);
}
}
}
}
return nodes.size();
}
public ImmutableOpenIntMap<IndexShardRoutingTable> shards() {
return shards;
}
public ImmutableOpenIntMap<IndexShardRoutingTable> getShards() {
return shards();
}
public IndexShardRoutingTable shard(int shardId) {
return shards.get(shardId);
}
/**
* Returns <code>true</code> if all shards are primary and active. Otherwise <code>false</code>.
*/
public boolean allPrimaryShardsActive() {
return primaryShardsActive() == shards().size();
}
/**
* Calculates the number of primary shards in active state in routing table
*
* @return number of active primary shards
*/
public int primaryShardsActive() {
int counter = 0;
for (IndexShardRoutingTable shardRoutingTable : this) {
if (shardRoutingTable.primaryShard().active()) {
counter++;
}
}
return counter;
}
/**
* Returns <code>true</code> if all primary shards are in
* {@link ShardRoutingState#UNASSIGNED} state. Otherwise <code>false</code>.
*/
public boolean allPrimaryShardsUnassigned() {
return primaryShardsUnassigned() == shards.size();
}
/**
* Calculates the number of primary shards in the routing table the are in
* {@link ShardRoutingState#UNASSIGNED} state.
*/
public int primaryShardsUnassigned() {
int counter = 0;
for (IndexShardRoutingTable shardRoutingTable : this) {
if (shardRoutingTable.primaryShard().unassigned()) {
counter++;
}
}
return counter;
}
/**
* Returns a {@link List} of shards that match one of the states listed in {@link ShardRoutingState states}
*
* @param states a set of {@link ShardRoutingState states}
* @return a {@link List} of shards that match one of the given {@link ShardRoutingState states}
*/
public List<ShardRouting> shardsWithState(ShardRoutingState... states) {
List<ShardRouting> shards = newArrayList();
for (IndexShardRoutingTable shardRoutingTable : this) {
shards.addAll(shardRoutingTable.shardsWithState(states));
}
return shards;
}
/**
* Returns an unordered iterator over all shards (including replicas).
*/
public ShardsIterator randomAllShardsIt() {
return new PlainShardsIterator(allShards, counter.incrementAndGet());
}
/**
* Returns an unordered iterator over all active shards (including replicas).
*/
public ShardsIterator randomAllActiveShardsIt() {
return new PlainShardsIterator(allActiveShards, counter.incrementAndGet());
}
/**
* A group shards iterator where each group ({@link ShardIterator}
* is an iterator across shard replication group.
*/
public GroupShardsIterator groupByShardsIt() {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<ShardIterator>(shards.size());
for (IndexShardRoutingTable indexShard : this) {
set.add(indexShard.shardsIt());
}
return new GroupShardsIterator(set);
}
/**
* A groups shards iterator where each groups is a single {@link ShardRouting} and a group
* is created for each shard routing.
* <p/>
* <p>This basically means that components that use the {@link GroupShardsIterator} will iterate
* over *all* the shards (all the replicas) within the index.</p>
*/
public GroupShardsIterator groupByAllIt() {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
for (IndexShardRoutingTable indexShard : this) {
for (ShardRouting shardRouting : indexShard) {
set.add(shardRouting.shardsIt());
}
}
return new GroupShardsIterator(set);
}
public void validate() throws RoutingValidationException {
}
public static Builder builder(String index) {
return new Builder(index);
}
public static class Builder {
private final String index;
private final ImmutableOpenIntMap.Builder<IndexShardRoutingTable> shards = ImmutableOpenIntMap.builder();
public Builder(String index) {
this.index = index;
}
/**
* Reads an {@link IndexRoutingTable} from an {@link StreamInput}
*
* @param in {@link StreamInput} to read the {@link IndexRoutingTable} from
* @return {@link IndexRoutingTable} read
* @throws IOException if something happens during read
*/
public static IndexRoutingTable readFrom(StreamInput in) throws IOException {
String index = in.readString();
Builder builder = new Builder(index);
int size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index));
}
return builder.build();
}
/**
* Writes an {@link IndexRoutingTable} to a {@link StreamOutput}.
*
* @param index {@link IndexRoutingTable} to write
* @param out {@link StreamOutput} to write to
* @throws IOException if something happens during write
*/
public static void writeTo(IndexRoutingTable index, StreamOutput out) throws IOException {
out.writeString(index.index());
out.writeVInt(index.shards.size());
for (IndexShardRoutingTable indexShard : index) {
IndexShardRoutingTable.Builder.writeToThin(indexShard, out);
}
}
/**
* Initializes a new empty index, as if it was created from an API.
*/
public Builder initializeAsNew(IndexMetaData indexMetaData) {
return initializeEmpty(indexMetaData, true);
}
/**
* Initializes a new empty index, as if it was created from an API.
*/
public Builder initializeAsRecovery(IndexMetaData indexMetaData) {
return initializeEmpty(indexMetaData, false);
}
/**
* Initializes a new empty index, to be restored from a snapshot
*/
public Builder initializeAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
return initializeAsRestore(indexMetaData, restoreSource, true);
}
/**
* Initializes an existing index, to be restored from a snapshot
*/
public Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
return initializeAsRestore(indexMetaData, restoreSource, false);
}
/**
* Initializes an index, to be restored from snapshot
*/
private Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, boolean asNew) {
if (!shards.isEmpty()) {
throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
}
for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) {
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true);
for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, shardId, null, null, i == 0 ? restoreSource : null, i == 0, ShardRoutingState.UNASSIGNED, 0));
}
shards.put(shardId, indexShardRoutingBuilder.build());
}
return this;
}
/**
* Initializes a new empty index, with an option to control if its from an API or not.
*/
private Builder initializeEmpty(IndexMetaData indexMetaData, boolean asNew) {
if (!shards.isEmpty()) {
throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
}
for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) {
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true);
for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, shardId, null, i == 0, ShardRoutingState.UNASSIGNED, 0));
}
shards.put(shardId, indexShardRoutingBuilder.build());
}
return this;
}
public Builder addReplica() {
for (IntCursor cursor : shards.keys()) {
int shardId = cursor.value;
// version 0, will get updated when reroute will happen
ImmutableShardRouting shard = new ImmutableShardRouting(index, shardId, null, false, ShardRoutingState.UNASSIGNED, 0);
shards.put(shardId,
new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()
);
}
return this;
}
public Builder removeReplica() {
for (IntCursor cursor : shards.keys()) {
int shardId = cursor.value;
IndexShardRoutingTable indexShard = shards.get(shardId);
if (indexShard.replicaShards().isEmpty()) {
// nothing to do here!
return this;
}
// re-add all the current ones
IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(indexShard.shardId(), indexShard.primaryAllocatedPostApi());
for (ShardRouting shardRouting : indexShard) {
builder.addShard(new ImmutableShardRouting(shardRouting));
}
// first check if there is one that is not assigned to a node, and remove it
boolean removed = false;
for (ShardRouting shardRouting : indexShard) {
if (!shardRouting.primary() && !shardRouting.assignedToNode()) {
builder.removeShard(shardRouting);
removed = true;
break;
}
}
if (!removed) {
for (ShardRouting shardRouting : indexShard) {
if (!shardRouting.primary()) {
builder.removeShard(shardRouting);
removed = true;
break;
}
}
}
shards.put(shardId, builder.build());
}
return this;
}
public Builder addIndexShard(IndexShardRoutingTable indexShard) {
shards.put(indexShard.shardId().id(), indexShard);
return this;
}
/**
* Clears the post allocation flag for the specified shard
*/
public Builder clearPostAllocationFlag(ShardId shardId) {
assert this.index.equals(shardId.index().name());
IndexShardRoutingTable indexShard = shards.get(shardId.id());
shards.put(indexShard.shardId().id(), new IndexShardRoutingTable(indexShard.shardId(), indexShard.shards(), false));
return this;
}
/**
* Adds a new shard routing (makes a copy of it), with reference data used from the index shard routing table
* if it needs to be created.
*/
public Builder addShard(IndexShardRoutingTable refData, ShardRouting shard) {
IndexShardRoutingTable indexShard = shards.get(shard.id());
if (indexShard == null) {
indexShard = new IndexShardRoutingTable.Builder(refData.shardId(), refData.primaryAllocatedPostApi()).addShard(new ImmutableShardRouting(shard)).build();
} else {
indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(new ImmutableShardRouting(shard)).build();
}
shards.put(indexShard.shardId().id(), indexShard);
return this;
}
public IndexRoutingTable build() throws RoutingValidationException {
IndexRoutingTable indexRoutingTable = new IndexRoutingTable(index, shards.build());
indexRoutingTable.validate();
return indexRoutingTable;
}
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder("-- index [" + index + "]\n");
for (IndexShardRoutingTable indexShard : this) {
sb.append("----shard_id [").append(indexShard.shardId().index().name()).append("][").append(indexShard.shardId().id()).append("]\n");
for (ShardRouting shard : indexShard) {
sb.append("--------").append(shard.shortSummary()).append("\n");
}
}
return sb.toString();
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_IndexRoutingTable.java
|
355 |
future.andThen(new ExecutionCallback<Map<String, List<Integer>>>() {
@Override
public void onResponse(Map<String, List<Integer>> response) {
listenerResults.putAll(response);
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
181 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166y_LinkedTransferQueue.java
|
711 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_PRODUCT_OPTION")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "blStandardElements")
@AdminPresentationClass(friendlyName = "ProductOptionImpl_baseProductOption", populateToOneFields=PopulateToOneFieldsEnum.TRUE)
public class ProductOptionImpl implements ProductOption, AdminMainEntity {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "ProductOptionId")
@GenericGenerator(
name="ProductOptionId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="ProductOptionImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.catalog.domain.ProductOptionImpl")
}
)
@Column(name = "PRODUCT_OPTION_ID")
protected Long id;
@Column(name = "OPTION_TYPE")
@AdminPresentation(friendlyName = "productOption_Type", fieldType = SupportedFieldType.BROADLEAF_ENUMERATION, broadleafEnumeration = "org.broadleafcommerce.core.catalog.service.type.ProductOptionType")
protected String type;
@Column(name = "ATTRIBUTE_NAME")
@AdminPresentation(friendlyName = "productOption_name", helpText = "productOption_nameHelp")
protected String attributeName;
@Column(name = "LABEL")
@AdminPresentation(friendlyName = "productOption_Label", helpText = "productOption_labelHelp",
prominent = true,
translatable = true)
protected String label;
@Column(name = "REQUIRED")
@AdminPresentation(friendlyName = "productOption_Required")
protected Boolean required;
@Column(name = "USE_IN_SKU_GENERATION")
@AdminPresentation(friendlyName = "productOption_UseInSKUGeneration")
private Boolean useInSkuGeneration;
@Column(name = "DISPLAY_ORDER")
@AdminPresentation(friendlyName = "productOption_displayOrder")
protected Integer displayOrder;
@Column(name = "VALIDATION_TYPE")
@AdminPresentation(friendlyName = "productOption_validationType", group = "productOption_validation", fieldType = SupportedFieldType.BROADLEAF_ENUMERATION, broadleafEnumeration = "org.broadleafcommerce.core.catalog.service.type.ProductOptionValidationType")
private String productOptionValidationType;
@Column(name = "VALIDATION_STRING")
@AdminPresentation(friendlyName = "productOption_validationSring", group = "productOption_validation")
protected String validationString;
@Column(name = "ERROR_CODE")
@AdminPresentation(friendlyName = "productOption_errorCode", group = "productOption_validation")
protected String errorCode;
@Column(name = "ERROR_MESSAGE")
@AdminPresentation(friendlyName = "productOption_errorMessage", group = "productOption_validation")
protected String errorMessage;
@OneToMany(mappedBy = "productOption", targetEntity = ProductOptionValueImpl.class, cascade = {CascadeType.ALL})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@OrderBy(value = "displayOrder")
@AdminPresentationCollection(addType = AddMethodType.PERSIST, friendlyName = "ProductOptionImpl_Allowed_Values")
protected List<ProductOptionValue> allowedValues = new ArrayList<ProductOptionValue>();
@ManyToMany(fetch = FetchType.LAZY, targetEntity = ProductImpl.class)
@JoinTable(name = "BLC_PRODUCT_OPTION_XREF", joinColumns = @JoinColumn(name = "PRODUCT_OPTION_ID", referencedColumnName = "PRODUCT_OPTION_ID"), inverseJoinColumns = @JoinColumn(name = "PRODUCT_ID", referencedColumnName = "PRODUCT_ID"))
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
protected List<Product> products = new ArrayList<Product>();
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public ProductOptionType getType() {
return ProductOptionType.getInstance(type);
}
@Override
public void setType(ProductOptionType type) {
this.type = type == null ? null : type.getType();
}
@Override
public String getAttributeName() {
return attributeName;
}
@Override
public void setAttributeName(String attributeName) {
this.attributeName = attributeName;
}
@Override
public String getLabel() {
return DynamicTranslationProvider.getValue(this, "label", label);
}
@Override
public void setLabel(String label) {
this.label = label;
}
@Override
public Boolean getRequired() {
return required;
}
@Override
public void setRequired(Boolean required) {
this.required = required;
}
@Override
public Integer getDisplayOrder() {
return displayOrder;
}
@Override
public void setDisplayOrder(Integer displayOrder) {
this.displayOrder = displayOrder;
}
@Override
public List<Product> getProducts() {
return products;
}
@Override
public void setProducts(List<Product> products){
this.products = products;
}
@Override
public List<ProductOptionValue> getAllowedValues() {
return allowedValues;
}
@Override
public void setAllowedValues(List<ProductOptionValue> allowedValues) {
this.allowedValues = allowedValues;
}
@Override
public Boolean getUseInSkuGeneration() {
return (useInSkuGeneration == null) ? true : useInSkuGeneration;
}
@Override
public void setUseInSkuGeneration(Boolean useInSkuGeneration) {
this.useInSkuGeneration = useInSkuGeneration;
}
@Override
public ProductOptionValidationType getProductOptionValidationType() {
return ProductOptionValidationType.getInstance(productOptionValidationType);
}
@Override
public void setProductOptionValidationType(ProductOptionValidationType productOptionValidationType) {
this.productOptionValidationType = productOptionValidationType == null ? null : productOptionValidationType.getType();
}
@Override
public String getValidationString() {
return validationString;
}
@Override
public void setValidationString(String validationString) {
this.validationString = validationString;
}
@Override
public String getErrorCode() {
return errorCode;
}
@Override
public void setErrorCode(String errorCode) {
this.errorCode = errorCode;
}
@Override
public String getErrorMessage() {
return errorMessage;
}
@Override
public void setErrorMessage(String errorMessage) {
this.errorMessage = errorMessage;
}
@Override
public String getMainEntityName() {
return getLabel();
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_ProductOptionImpl.java
|
645 |
public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder<DeleteIndexTemplateRequest, DeleteIndexTemplateResponse, DeleteIndexTemplateRequestBuilder> {
public DeleteIndexTemplateRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new DeleteIndexTemplateRequest());
}
public DeleteIndexTemplateRequestBuilder(IndicesAdminClient indicesClient, String name) {
super((InternalIndicesAdminClient) indicesClient, new DeleteIndexTemplateRequest(name));
}
@Override
protected void doExecute(ActionListener<DeleteIndexTemplateResponse> listener) {
((IndicesAdminClient) client).deleteTemplate(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_delete_DeleteIndexTemplateRequestBuilder.java
|
3,550 |
public class TopicService implements ManagedService, RemoteService, EventPublishingService {
public static final String SERVICE_NAME = "hz:impl:topicService";
public static final int ORDERING_LOCKS_LENGTH = 1000;
final ConcurrentMap<String, LocalTopicStatsImpl> statsMap = new ConcurrentHashMap<String, LocalTopicStatsImpl>();
private final Lock[] orderingLocks = new Lock[ORDERING_LOCKS_LENGTH];
private NodeEngine nodeEngine;
private final ConstructorFunction<String, LocalTopicStatsImpl> localTopicStatsConstructorFunction =
new ConstructorFunction<String, LocalTopicStatsImpl>() {
public LocalTopicStatsImpl createNew(String mapName) {
return new LocalTopicStatsImpl();
}
};
private EventService eventService;
private ILogger logger;
@Override
public void init(NodeEngine nodeEngine, Properties properties) {
this.nodeEngine = nodeEngine;
for (int i = 0; i < orderingLocks.length; i++) {
orderingLocks[i] = new ReentrantLock();
}
eventService = nodeEngine.getEventService();
this.logger = nodeEngine.getLogger(TopicService.class);
}
@Override
public void reset() {
statsMap.clear();
}
@Override
public void shutdown(boolean terminate) {
reset();
}
public Lock getOrderLock(String key) {
int index = getOrderLockIndex(key);
return orderingLocks[index];
}
private int getOrderLockIndex(String key) {
int hash = key.hashCode();
if (hash == Integer.MIN_VALUE) {
return 0;
} else {
return Math.abs(hash) % orderingLocks.length;
}
}
@Override
public TopicProxy createDistributedObject(String name) {
if (isGlobalOrderingEnabled(name)) {
return new TotalOrderedTopicProxy(name, nodeEngine, this);
} else {
return new TopicProxy(name, nodeEngine, this);
}
}
private boolean isGlobalOrderingEnabled(String name) {
TopicConfig topicConfig = nodeEngine.getConfig().findTopicConfig(name);
return topicConfig.isGlobalOrderingEnabled();
}
@Override
public void destroyDistributedObject(String objectId) {
statsMap.remove(objectId);
}
@Override
public void dispatchEvent(Object event, Object listener) {
TopicEvent topicEvent = (TopicEvent) event;
Object msgObject = nodeEngine.toObject(topicEvent.data);
ClusterService clusterService = nodeEngine.getClusterService();
MemberImpl member = clusterService.getMember(topicEvent.publisherAddress);
if (member == null) {
if (logger.isLoggable(Level.INFO)) {
logger.info("Dropping message " + msgObject + " from unknown address:" + topicEvent.publisherAddress);
}
return;
}
Message message = new Message(topicEvent.name, msgObject, topicEvent.publishTime, member);
incrementReceivedMessages(topicEvent.name);
MessageListener messageListener = (MessageListener) listener;
messageListener.onMessage(message);
}
public LocalTopicStatsImpl getLocalTopicStats(String name) {
return getOrPutSynchronized(statsMap, name, statsMap, localTopicStatsConstructorFunction);
}
public void incrementPublishes(String topicName) {
getLocalTopicStats(topicName).incrementPublishes();
}
public void incrementReceivedMessages(String topicName) {
getLocalTopicStats(topicName).incrementReceives();
}
public void publishEvent(String name, TopicEvent event) {
Collection<EventRegistration> registrations = eventService.getRegistrations(TopicService.SERVICE_NAME, name);
eventService.publishEvent(TopicService.SERVICE_NAME, registrations, event, name.hashCode());
}
public String addMessageListener(String name, MessageListener listener) {
EventRegistration eventRegistration = eventService.registerListener(TopicService.SERVICE_NAME, name, listener);
return eventRegistration.getId();
}
public boolean removeMessageListener(String name, String registrationId) {
return eventService.deregisterListener(TopicService.SERVICE_NAME, name, registrationId);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_topic_TopicService.java
|
274 |
public final class ExceptionsHelper {
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
public static RuntimeException convertToRuntime(Throwable t) {
if (t instanceof RuntimeException) {
return (RuntimeException) t;
}
return new ElasticsearchException(t.getMessage(), t);
}
public static ElasticsearchException convertToElastic(Throwable t) {
if (t instanceof ElasticsearchException) {
return (ElasticsearchException) t;
}
return new ElasticsearchException(t.getMessage(), t);
}
public static RestStatus status(Throwable t) {
if (t instanceof ElasticsearchException) {
return ((ElasticsearchException) t).status();
}
return RestStatus.INTERNAL_SERVER_ERROR;
}
public static Throwable unwrapCause(Throwable t) {
int counter = 0;
Throwable result = t;
while (result instanceof ElasticsearchWrapperException) {
if (result.getCause() == null) {
return result;
}
if (result.getCause() == result) {
return result;
}
if (counter++ > 10) {
// dear god, if we got more than 10 levels down, WTF? just bail
logger.warn("Exception cause unwrapping ran for 10 levels...", t);
return result;
}
result = result.getCause();
}
return result;
}
public static String detailedMessage(Throwable t) {
return detailedMessage(t, false, 0);
}
public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) {
if (t == null) {
return "Unknown";
}
int counter = initialCounter + 1;
if (t.getCause() != null) {
StringBuilder sb = new StringBuilder();
while (t != null) {
sb.append(t.getClass().getSimpleName());
if (t.getMessage() != null) {
sb.append("[");
sb.append(t.getMessage());
sb.append("]");
}
if (!newLines) {
sb.append("; ");
}
t = t.getCause();
if (t != null) {
if (newLines) {
sb.append("\n");
for (int i = 0; i < counter; i++) {
sb.append("\t");
}
} else {
sb.append("nested: ");
}
}
counter++;
}
return sb.toString();
} else {
return t.getClass().getSimpleName() + "[" + t.getMessage() + "]";
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_ExceptionsHelper.java
|
3,425 |
public static class PostJoinProxyOperation
extends AbstractOperation {
private Collection<ProxyInfo> proxies;
public PostJoinProxyOperation() {
}
public PostJoinProxyOperation(Collection<ProxyInfo> proxies) {
this.proxies = proxies;
}
@Override
public void run()
throws Exception {
if (proxies != null && proxies.size() > 0) {
NodeEngine nodeEngine = getNodeEngine();
ProxyServiceImpl proxyService = getService();
for (ProxyInfo proxy : proxies) {
final ProxyRegistry registry = getOrPutIfAbsent(proxyService.registries, proxy.serviceName,
proxyService.registryConstructor);
DistributedObjectFuture future = registry.createProxy(proxy.objectName, false, false);
if (future != null) {
final DistributedObject object = future.get();
if (object instanceof InitializingObject) {
nodeEngine.getExecutionService().execute(ExecutionService.SYSTEM_EXECUTOR, new Runnable() {
public void run() {
try {
((InitializingObject) object).initialize();
} catch (Exception e) {
getLogger().warning("Error while initializing proxy: " + object, e);
}
}
});
}
}
}
}
}
@Override
public String getServiceName() {
return ProxyServiceImpl.SERVICE_NAME;
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
protected void writeInternal(ObjectDataOutput out)
throws IOException {
super.writeInternal(out);
int len = proxies != null ? proxies.size() : 0;
out.writeInt(len);
if (len > 0) {
for (ProxyInfo proxy : proxies) {
out.writeUTF(proxy.serviceName);
out.writeObject(proxy.objectName); // writing as object for backward-compatibility
}
}
}
@Override
protected void readInternal(ObjectDataInput in)
throws IOException {
super.readInternal(in);
int len = in.readInt();
if (len > 0) {
proxies = new ArrayList<ProxyInfo>(len);
for (int i = 0; i < len; i++) {
ProxyInfo proxy = new ProxyInfo(in.readUTF(), (String) in.readObject());
proxies.add(proxy);
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_ProxyServiceImpl.java
|
233 |
public abstract class OAbstractMapCache<T extends Map<ORID, ORecordInternal<?>>> implements OCache {
protected final OSharedResourceAdaptiveExternal lock = new OSharedResourceAdaptiveExternal(
OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), 0,
true);
protected final T cache;
private final AtomicBoolean enabled = new AtomicBoolean(false);
public OAbstractMapCache(T cache) {
this.cache = cache;
}
@Override
public void startup() {
enable();
}
@Override
public void shutdown() {
disable();
}
@Override
public boolean isEnabled() {
return enabled.get();
}
@Override
public boolean enable() {
return enabled.compareAndSet(false, true);
}
@Override
public boolean disable() {
clear();
return enabled.compareAndSet(true, false);
}
@Override
public ORecordInternal<?> get(final ORID id) {
if (!isEnabled())
return null;
lock.acquireExclusiveLock();
try {
return cache.get(id);
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public ORecordInternal<?> put(final ORecordInternal<?> record) {
if (!isEnabled())
return null;
lock.acquireExclusiveLock();
try {
return cache.put(record.getIdentity(), record);
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public ORecordInternal<?> remove(final ORID id) {
if (!isEnabled())
return null;
lock.acquireExclusiveLock();
try {
return cache.remove(id);
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public void clear() {
if (!isEnabled())
return;
lock.acquireExclusiveLock();
try {
cache.clear();
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public int size() {
lock.acquireSharedLock();
try {
return cache.size();
} finally {
lock.releaseSharedLock();
}
}
@Override
public Collection<ORID> keys() {
lock.acquireExclusiveLock();
try {
return new ArrayList<ORID>(cache.keySet());
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public void lock(final ORID id) {
lock.acquireExclusiveLock();
}
@Override
public void unlock(final ORID id) {
lock.releaseExclusiveLock();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_cache_OAbstractMapCache.java
|
2,053 |
public class GetEntryViewOperation extends KeyBasedMapOperation {
private EntryView<Data, Data> result;
public GetEntryViewOperation(String name, Data dataKey) {
super(name, dataKey);
}
public GetEntryViewOperation() {
}
public void run() {
MapService mapService = getService();
RecordStore recordStore = mapService.getRecordStore(getPartitionId(), name);
Record record = recordStore.getRecord(dataKey);
if (record != null) {
result = mapService.createSimpleEntryView(record.getKey(), mapService.toData(record.getValue()), record);
}
}
@Override
public Object getResponse() {
return result;
}
@Override
public String toString() {
return "GetEntryViewOperation{" +
'}';
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_GetEntryViewOperation.java
|
5,807 |
private static class CustomWeightedSpanTermExtractor extends WeightedSpanTermExtractor {
public CustomWeightedSpanTermExtractor() {
super();
}
public CustomWeightedSpanTermExtractor(String defaultField) {
super(defaultField);
}
@Override
protected void extractUnknownQuery(Query query,
Map<String, WeightedSpanTerm> terms) throws IOException {
if (query instanceof FunctionScoreQuery) {
query = ((FunctionScoreQuery) query).getSubQuery();
extract(query, terms);
} else if (query instanceof FiltersFunctionScoreQuery) {
query = ((FiltersFunctionScoreQuery) query).getSubQuery();
extract(query, terms);
} else if (query instanceof XFilteredQuery) {
query = ((XFilteredQuery) query).getQuery();
extract(query, terms);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_highlight_CustomQueryScorer.java
|
1,686 |
public abstract class OAbstractCommandResultListener implements OCommandResultListener {
private final Set<ODocument> fetchedRecordsToSend = new HashSet<ODocument>();
private Map<String, Integer> fetchPlan;
public abstract boolean isEmpty();
protected void fetchRecord(final Object iRecord) {
if (fetchPlan != null && iRecord instanceof ORecordInternal<?>) {
final ORecordInternal<?> record = (ORecordInternal<?>) iRecord;
final OFetchListener listener = new ORemoteFetchListener(fetchedRecordsToSend);
final OFetchContext context = new ORemoteFetchContext();
OFetchHelper.fetch(record, record, fetchPlan, listener, context, "");
}
}
@Override
public void end() {
}
public void setFetchPlan(final String iText) {
fetchPlan = iText != null ? OFetchHelper.buildFetchPlan(iText) : null;
}
public Set<ODocument> getFetchedRecordsToSend() {
return fetchedRecordsToSend;
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_network_protocol_binary_OAbstractCommandResultListener.java
|
487 |
public class ClearIndicesCacheAction extends IndicesAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ClearIndicesCacheRequestBuilder> {
public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction();
public static final String NAME = "indices/cache/clear";
private ClearIndicesCacheAction() {
super(NAME);
}
@Override
public ClearIndicesCacheResponse newResponse() {
return new ClearIndicesCacheResponse();
}
@Override
public ClearIndicesCacheRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new ClearIndicesCacheRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_cache_clear_ClearIndicesCacheAction.java
|
936 |
public class OfferRuleType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, OfferRuleType> TYPES = new LinkedHashMap<String, OfferRuleType>();
public static final OfferRuleType ORDER = new OfferRuleType("ORDER", "Order");
public static final OfferRuleType FULFILLMENT_GROUP = new OfferRuleType("FULFILLMENT_GROUP", "Fulfillment Group");
public static final OfferRuleType CUSTOMER = new OfferRuleType("CUSTOMER", "Customer");
public static final OfferRuleType TIME = new OfferRuleType("TIME", "Time");
public static final OfferRuleType REQUEST = new OfferRuleType("REQUEST", "Request");
public static OfferRuleType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public OfferRuleType() {
//do nothing
}
public OfferRuleType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferRuleType other = (OfferRuleType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_type_OfferRuleType.java
|
2,024 |
@Service("blUserDetailsService")
public class UserDetailsServiceImpl implements UserDetailsService {
@Resource(name = "blCustomerService")
protected CustomerService customerService;
@Resource(name = "blRoleService")
protected RoleService roleService;
@Override
public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException, DataAccessException {
Customer customer = customerService.readCustomerByUsername(username);
if (customer == null) {
throw new UsernameNotFoundException("The customer was not found");
}
List<GrantedAuthority> grantedAuthorities = createGrantedAuthorities(roleService.findCustomerRolesByCustomerId(customer.getId()));
return new CustomerUserDetails(customer.getId(), username, customer.getPassword(), !customer.isDeactivated(), true, !customer.isPasswordChangeRequired(), true, grantedAuthorities);
}
protected List<GrantedAuthority> createGrantedAuthorities(List<CustomerRole> customerRoles) {
boolean roleUserFound = false;
List<GrantedAuthority> grantedAuthorities = new ArrayList<GrantedAuthority>();
for (CustomerRole role : customerRoles) {
grantedAuthorities.add(new SimpleGrantedAuthority(role.getRoleName()));
if (role.getRoleName().equals("ROLE_USER")) {
roleUserFound = true;
}
}
if (!roleUserFound) {
grantedAuthorities.add(new SimpleGrantedAuthority("ROLE_USER"));
}
return grantedAuthorities;
}
}
| 1no label
|
core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_service_UserDetailsServiceImpl.java
|
176 |
public class PageHandlerMapping extends BLCAbstractHandlerMapping {
private String controllerName="blPageController";
public static final String BLC_RULE_MAP_PARAM = "blRuleMap";
// The following attribute is set in BroadleafProcessURLFilter
public static final String REQUEST_DTO = "blRequestDTO";
@Resource(name = "blPageService")
private PageService pageService;
public static final String PAGE_ATTRIBUTE_NAME = "BLC_PAGE";
@Override
protected Object getHandlerInternal(HttpServletRequest request) throws Exception {
BroadleafRequestContext context = BroadleafRequestContext.getBroadleafRequestContext();
if (context != null && context.getRequestURIWithoutContext() != null) {
PageDTO page = pageService.findPageByURI(context.getSandbox(), context.getLocale(), context.getRequestURIWithoutContext(), buildMvelParameters(request), context.isSecure());
if (page != null && ! (page instanceof NullPageDTO)) {
context.getRequest().setAttribute(PAGE_ATTRIBUTE_NAME, page);
return controllerName;
}
}
return null;
}
/**
* MVEL is used to process the content targeting rules.
*
*
* @param request
* @return
*/
private Map<String,Object> buildMvelParameters(HttpServletRequest request) {
TimeDTO timeDto = new TimeDTO(SystemTime.asCalendar());
RequestDTO requestDto = (RequestDTO) request.getAttribute(REQUEST_DTO);
Map<String, Object> mvelParameters = new HashMap<String, Object>();
mvelParameters.put("time", timeDto);
mvelParameters.put("request", requestDto);
Map<String,Object> blcRuleMap = (Map<String,Object>) request.getAttribute(BLC_RULE_MAP_PARAM);
if (blcRuleMap != null) {
for (String mapKey : blcRuleMap.keySet()) {
mvelParameters.put(mapKey, blcRuleMap.get(mapKey));
}
}
return mvelParameters;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_PageHandlerMapping.java
|
840 |
private class Async {
final DiscoveryNodes nodes;
final CountDown expectedOps;
final ClearScrollRequest request;
final List<Tuple<String, Long>[]> contexts = new ArrayList<Tuple<String, Long>[]>();
final AtomicReference<Throwable> expHolder;
final ActionListener<ClearScrollResponse> listener;
private Async(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener, ClusterState clusterState) {
int expectedOps = 0;
this.nodes = clusterState.nodes();
if (request.getScrollIds().size() == 1 && "_all".equals(request.getScrollIds().get(0))) {
expectedOps = nodes.size();
} else {
for (String parsedScrollId : request.getScrollIds()) {
Tuple<String, Long>[] context = parseScrollId(parsedScrollId).getContext();
expectedOps += context.length;
this.contexts.add(context);
}
}
this.request = request;
this.listener = listener;
this.expHolder = new AtomicReference<Throwable>();
this.expectedOps = new CountDown(expectedOps);
}
public void run() {
if (expectedOps.isCountedDown()) {
listener.onResponse(new ClearScrollResponse(true));
return;
}
if (contexts.isEmpty()) {
for (final DiscoveryNode node : nodes) {
searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean success) {
onFreedContext();
}
@Override
public void onFailure(Throwable e) {
onFailedFreedContext(e, node);
}
});
}
} else {
for (Tuple<String, Long>[] context : contexts) {
for (Tuple<String, Long> target : context) {
final DiscoveryNode node = nodes.get(target.v1());
if (node == null) {
onFreedContext();
continue;
}
searchServiceTransportAction.sendFreeContext(node, target.v2(), request, new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean success) {
onFreedContext();
}
@Override
public void onFailure(Throwable e) {
onFailedFreedContext(e, node);
}
});
}
}
}
}
void onFreedContext() {
if (expectedOps.countDown()) {
boolean succeeded = expHolder.get() == null;
listener.onResponse(new ClearScrollResponse(succeeded));
}
}
void onFailedFreedContext(Throwable e, DiscoveryNode node) {
logger.warn("Clear SC failed on node[{}]", e, node);
if (expectedOps.countDown()) {
listener.onResponse(new ClearScrollResponse(false));
} else {
expHolder.set(e);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_TransportClearScrollAction.java
|
250 |
public interface CurrencyCodeIdentifiable {
public String getCurrencyCode();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_currency_util_CurrencyCodeIdentifiable.java
|
3,903 |
public class NestedFilterParser implements FilterParser {
public static final String NAME = "nested";
@Inject
public NestedFilterParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
boolean queryFound = false;
Filter filter = null;
boolean filterFound = false;
float boost = 1.0f;
boolean join = true;
String path = null;
boolean cache = false;
CacheKeyFilter.Key cacheKey = null;
String filterName = null;
// we need a late binding filter so we can inject a parent nested filter inner nested queries
NestedQueryParser.LateBindingParentFilter currentParentFilterContext = NestedQueryParser.parentFilterContext.get();
NestedQueryParser.LateBindingParentFilter usAsParentFilter = new NestedQueryParser.LateBindingParentFilter();
NestedQueryParser.parentFilterContext.set(usAsParentFilter);
try {
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
queryFound = true;
query = parseContext.parseInnerQuery();
} else if ("filter".equals(currentFieldName)) {
filterFound = true;
filter = parseContext.parseInnerFilter();
} else {
throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("join".equals(currentFieldName)) {
join = parser.booleanValue();
} else if ("path".equals(currentFieldName)) {
path = parser.text();
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [nested] filter has been removed, use nested filter as a facet_filter in the relevant facet");
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else if ("_cache".equals(currentFieldName)) {
cache = parser.booleanValue();
} else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
cacheKey = new CacheKeyFilter.Key(parser.text());
} else {
throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound && !filterFound) {
throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field");
}
if (path == null) {
throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field");
}
if (query == null && filter == null) {
return null;
}
if (filter != null) {
query = new XConstantScoreQuery(filter);
}
query.setBoost(boost);
MapperService.SmartNameObjectMapper mapper = parseContext.smartObjectMapper(path);
if (mapper == null) {
throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
}
ObjectMapper objectMapper = mapper.mapper();
if (objectMapper == null) {
throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
}
if (!objectMapper.nested().isNested()) {
throw new QueryParsingException(parseContext.index(), "[nested] nested object under path [" + path + "] is not of nested type");
}
Filter childFilter = parseContext.cacheFilter(objectMapper.nestedTypeFilter(), null);
usAsParentFilter.filter = childFilter;
// wrap the child query to only work on the nested path type
query = new XFilteredQuery(query, childFilter);
Filter parentFilter = currentParentFilterContext;
if (parentFilter == null) {
parentFilter = NonNestedDocsFilter.INSTANCE;
// don't do special parent filtering, since we might have same nested mapping on two different types
//if (mapper.hasDocMapper()) {
// // filter based on the type...
// parentFilter = mapper.docMapper().typeFilter();
//}
parentFilter = parseContext.cacheFilter(parentFilter, null);
}
Filter nestedFilter;
if (join) {
ToParentBlockJoinQuery joinQuery = new ToParentBlockJoinQuery(query, parentFilter, ScoreMode.None);
nestedFilter = new QueryWrapperFilter(joinQuery);
} else {
nestedFilter = new QueryWrapperFilter(query);
}
if (cache) {
nestedFilter = parseContext.cacheFilter(nestedFilter, cacheKey);
}
if (filterName != null) {
parseContext.addNamedFilter(filterName, nestedFilter);
}
return nestedFilter;
} finally {
// restore the thread local one...
NestedQueryParser.parentFilterContext.set(currentParentFilterContext);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_NestedFilterParser.java
|
4,802 |
public class RestClusterStateAction extends BaseRestHandler {
private final SettingsFilter settingsFilter;
@Inject
public RestClusterStateAction(Settings settings, Client client, RestController controller,
SettingsFilter settingsFilter) {
super(settings, client);
controller.registerHandler(RestRequest.Method.GET, "/_cluster/state", this);
controller.registerHandler(RestRequest.Method.GET, "/_cluster/state/{metric}", this);
controller.registerHandler(RestRequest.Method.GET, "/_cluster/state/{metric}/{indices}", this);
this.settingsFilter = settingsFilter;
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel) {
final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest();
clusterStateRequest.listenerThreaded(false);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
final String[] indices = Strings.splitStringByCommaToArray(request.param("indices", "_all"));
boolean isAllIndicesOnly = indices.length == 1 && "_all".equals(indices[0]);
if (!isAllIndicesOnly) {
clusterStateRequest.indices(indices);
}
Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
boolean isAllMetricsOnly = metrics.size() == 1 && metrics.contains("_all");
if (!isAllMetricsOnly) {
clusterStateRequest.nodes(metrics.contains("nodes"));
clusterStateRequest.routingTable(metrics.contains("routing_table"));
clusterStateRequest.metaData(metrics.contains("metadata"));
clusterStateRequest.blocks(metrics.contains("blocks"));
clusterStateRequest.indexTemplates(request.paramAsStringArray("index_templates", Strings.EMPTY_ARRAY));
}
client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
@Override
public void onResponse(ClusterStateResponse response) {
try {
XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
builder.startObject();
builder.field(Fields.CLUSTER_NAME, response.getClusterName().value());
response.getState().settingsFilter(settingsFilter).toXContent(builder, request);
builder.endObject();
channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
if (logger.isDebugEnabled()) {
logger.debug("failed to handle cluster state", e);
}
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
static final class Fields {
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_admin_cluster_state_RestClusterStateAction.java
|
8 |
abstract static class Async extends ForkJoinTask<Void>
implements Runnable, AsynchronousCompletionTask {
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final void run() { exec(); }
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
547 |
final class TransactionProxy {
private static final ThreadLocal<Boolean> THREAD_FLAG = new ThreadLocal<Boolean>();
private final TransactionOptions options;
private final HazelcastClient client;
private final long threadId = Thread.currentThread().getId();
private final ClientConnection connection;
private SerializableXID sXid;
private String txnId;
private State state = NO_TXN;
private long startTime;
TransactionProxy(HazelcastClient client, TransactionOptions options, ClientConnection connection) {
this.options = options;
this.client = client;
this.connection = connection;
}
public String getTxnId() {
return txnId;
}
public State getState() {
return state;
}
public long getTimeoutMillis() {
return options.getTimeoutMillis();
}
void begin() {
try {
if (state == ACTIVE) {
throw new IllegalStateException("Transaction is already active");
}
checkThread();
if (THREAD_FLAG.get() != null) {
throw new IllegalStateException("Nested transactions are not allowed!");
}
THREAD_FLAG.set(Boolean.TRUE);
startTime = Clock.currentTimeMillis();
txnId = invoke(new CreateTransactionRequest(options, sXid));
state = ACTIVE;
} catch (Exception e) {
closeConnection();
throw ExceptionUtil.rethrow(e);
}
}
public void prepare() {
try {
if (state != ACTIVE) {
throw new TransactionNotActiveException("Transaction is not active");
}
checkThread();
checkTimeout();
invoke(new PrepareTransactionRequest());
state = PREPARED;
} catch (Exception e) {
state = ROLLING_BACK;
closeConnection();
throw ExceptionUtil.rethrow(e);
}
}
void commit(boolean prepareAndCommit) {
try {
if (prepareAndCommit && state != ACTIVE) {
throw new TransactionNotActiveException("Transaction is not active");
}
if (!prepareAndCommit && state != PREPARED) {
throw new TransactionNotActiveException("Transaction is not prepared");
}
checkThread();
checkTimeout();
invoke(new CommitTransactionRequest(prepareAndCommit));
state = COMMITTED;
} catch (Exception e) {
state = ROLLING_BACK;
throw ExceptionUtil.rethrow(e);
} finally {
closeConnection();
}
}
void rollback() {
try {
if (state == NO_TXN || state == ROLLED_BACK) {
throw new IllegalStateException("Transaction is not active");
}
if (state == ROLLING_BACK) {
state = ROLLED_BACK;
return;
}
checkThread();
try {
invoke(new RollbackTransactionRequest());
} catch (Exception ignored) {
}
state = ROLLED_BACK;
} finally {
closeConnection();
}
}
SerializableXID getXid() {
return sXid;
}
void setXid(SerializableXID xid) {
this.sXid = xid;
}
private void closeConnection() {
THREAD_FLAG.set(null);
// try {
// connection.release();
// } catch (IOException e) {
// IOUtil.closeResource(connection);
// }
}
private void checkThread() {
if (threadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Transaction cannot span multiple threads!");
}
}
private void checkTimeout() {
if (startTime + options.getTimeoutMillis() < Clock.currentTimeMillis()) {
throw new TransactionException("Transaction is timed-out!");
}
}
private <T> T invoke(ClientRequest request) {
if (request instanceof BaseTransactionRequest) {
((BaseTransactionRequest) request).setTxnId(txnId);
((BaseTransactionRequest) request).setClientThreadId(threadId);
}
final SerializationService ss = client.getSerializationService();
final ClientInvocationServiceImpl invocationService = (ClientInvocationServiceImpl) client.getInvocationService();
try {
final Future f = invocationService.send(request, connection);
return ss.toObject(f.get());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_txn_TransactionProxy.java
|
1,158 |
public class OSQLMethodSize extends OAbstractSQLMethod {
public static final String NAME = "size";
public OSQLMethodSize() {
super(NAME);
}
@Override
public Object execute(final OIdentifiable iCurrentRecord, final OCommandContext iContext, final Object ioResult,
final Object[] iMethodParams) {
final Number size;
if (ioResult != null) {
if (ioResult instanceof ORecord<?>)
size = 1;
else
size = OMultiValue.getSize(ioResult);
} else
size = 0;
return size;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodSize.java
|
1,089 |
public class OSQLPredicate extends OBaseParser implements OCommandPredicate {
protected Set<OProperty> properties = new HashSet<OProperty>();
protected OSQLFilterCondition rootCondition;
protected List<String> recordTransformed;
protected List<OSQLFilterItemParameter> parameterItems;
protected int braces;
protected OCommandContext context;
public OSQLPredicate() {
}
public OSQLPredicate(final String iText) {
text(iText);
}
protected void throwSyntaxErrorException(final String iText) {
final String syntax = getSyntax();
if (syntax.equals("?"))
throw new OCommandSQLParsingException(iText, parserText, parserGetPreviousPosition());
throw new OCommandSQLParsingException(iText + ". Use " + syntax, parserText, parserGetPreviousPosition());
}
public OSQLPredicate text(final String iText) {
if (iText == null)
throw new OCommandSQLParsingException("Query text is null");
try {
parserText = iText;
parserTextUpperCase = parserText.toUpperCase(Locale.ENGLISH);
parserSetCurrentPosition(0);
parserSkipWhiteSpaces();
rootCondition = (OSQLFilterCondition) extractConditions(null);
optimize();
} catch (OQueryParsingException e) {
if (e.getText() == null)
// QUERY EXCEPTION BUT WITHOUT TEXT: NEST IT
throw new OQueryParsingException("Error on parsing query", parserText, parserGetCurrentPosition(), e);
throw e;
} catch (Throwable t) {
throw new OQueryParsingException("Error on parsing query", parserText, parserGetCurrentPosition(), t);
}
return this;
}
public Object evaluate() {
return evaluate(null, null, null);
}
public Object evaluate(final OCommandContext iContext) {
return evaluate(null, null, iContext);
}
public Object evaluate(final ORecord<?> iRecord, ODocument iCurrentResult, final OCommandContext iContext) {
if (rootCondition == null)
return true;
return rootCondition.evaluate((ORecordSchemaAware<?>) iRecord, iCurrentResult, iContext);
}
private Object extractConditions(final OSQLFilterCondition iParentCondition) {
final int oldPosition = parserGetCurrentPosition();
parserNextWord(true, " )=><,\r\n");
final String word = parserGetLastWord();
if (word.length() > 0 && (word.equalsIgnoreCase("SELECT") || word.equalsIgnoreCase("TRAVERSE"))) {
// SUB QUERY
final StringBuilder embedded = new StringBuilder();
OStringSerializerHelper.getEmbedded(parserText, oldPosition - 1, -1, embedded);
parserSetCurrentPosition(oldPosition + embedded.length() + 1);
return new OSQLSynchQuery<Object>(embedded.toString());
}
parserSetCurrentPosition(oldPosition);
OSQLFilterCondition currentCondition = extractCondition();
// CHECK IF THERE IS ANOTHER CONDITION ON RIGHT
while (parserSkipWhiteSpaces()) {
if (!parserIsEnded() && parserGetCurrentChar() == ')')
return currentCondition;
final OQueryOperator nextOperator = extractConditionOperator();
if (nextOperator == null)
return currentCondition;
if (nextOperator.precedence > currentCondition.getOperator().precedence) {
// SWAP ITEMS
final OSQLFilterCondition subCondition = new OSQLFilterCondition(currentCondition.right, nextOperator);
currentCondition.right = subCondition;
subCondition.right = extractConditionItem(false, 1);
} else {
final OSQLFilterCondition parentCondition = new OSQLFilterCondition(currentCondition, nextOperator);
parentCondition.right = extractConditions(parentCondition);
currentCondition = parentCondition;
}
}
// END OF TEXT
return currentCondition;
}
protected OSQLFilterCondition extractCondition() {
if (!parserSkipWhiteSpaces())
// END OF TEXT
return null;
// EXTRACT ITEMS
Object left = extractConditionItem(true, 1);
if (left != null && checkForEnd(left.toString()))
return null;
OQueryOperator oper;
final Object right;
if (left instanceof OQueryOperator && ((OQueryOperator) left).isUnary()) {
oper = (OQueryOperator) left;
left = extractConditionItem(false, 1);
right = null;
} else {
oper = extractConditionOperator();
if (oper instanceof OQueryOperatorNot)
// SPECIAL CASE: READ NEXT OPERATOR
oper = new OQueryOperatorNot(extractConditionOperator());
right = oper != null ? extractConditionItem(false, oper.expectedRightWords) : null;
}
// CREATE THE CONDITION OBJECT
return new OSQLFilterCondition(left, oper, right);
}
protected boolean checkForEnd(final String iWord) {
if (iWord != null
&& (iWord.equals(OCommandExecutorSQLSelect.KEYWORD_ORDER) || iWord.equals(OCommandExecutorSQLSelect.KEYWORD_LIMIT) || iWord
.equals(OCommandExecutorSQLSelect.KEYWORD_SKIP))) {
parserMoveCurrentPosition(iWord.length() * -1);
return true;
}
return false;
}
private OQueryOperator extractConditionOperator() {
if (!parserSkipWhiteSpaces())
// END OF PARSING: JUST RETURN
return null;
if (parserGetCurrentChar() == ')')
// FOUND ')': JUST RETURN
return null;
final OQueryOperator[] operators = OSQLEngine.getInstance().getRecordOperators();
final String[] candidateOperators = new String[operators.length];
for (int i = 0; i < candidateOperators.length; ++i)
candidateOperators[i] = operators[i].keyword;
final int operatorPos = parserNextChars(true, false, candidateOperators);
if (operatorPos == -1) {
parserGoBack();
return null;
}
final OQueryOperator op = operators[operatorPos];
if (op.expectsParameters) {
// PARSE PARAMETERS IF ANY
parserGoBack();
parserNextWord(true, " 0123456789'\"");
final String word = parserGetLastWord();
final List<String> params = new ArrayList<String>();
// CHECK FOR PARAMETERS
if (word.length() > op.keyword.length() && word.charAt(op.keyword.length()) == OStringSerializerHelper.EMBEDDED_BEGIN) {
int paramBeginPos = parserGetCurrentPosition() - (word.length() - op.keyword.length());
parserSetCurrentPosition(OStringSerializerHelper.getParameters(parserText, paramBeginPos, -1, params));
} else if (!word.equals(op.keyword))
throw new OQueryParsingException("Malformed usage of operator '" + op.toString() + "'. Parsed operator is: " + word);
try {
// CONFIGURE COULD INSTANTIATE A NEW OBJECT: ACT AS A FACTORY
return op.configure(params);
} catch (Exception e) {
throw new OQueryParsingException("Syntax error using the operator '" + op.toString() + "'. Syntax is: " + op.getSyntax());
}
} else
parserMoveCurrentPosition(+1);
return op;
}
private Object extractConditionItem(final boolean iAllowOperator, final int iExpectedWords) {
final Object[] result = new Object[iExpectedWords];
for (int i = 0; i < iExpectedWords; ++i) {
parserNextWord(false, " =><,\r\n");
String word = parserGetLastWord();
if (word.length() == 0)
break;
final String uWord = word.toUpperCase();
final int lastPosition = parserIsEnded() ? parserText.length() : parserGetCurrentPosition();
if (word.length() > 0 && word.charAt(0) == OStringSerializerHelper.EMBEDDED_BEGIN) {
braces++;
// SUB-CONDITION
parserSetCurrentPosition(lastPosition - word.length() + 1);
final Object subCondition = extractConditions(null);
if (!parserSkipWhiteSpaces() || parserGetCurrentChar() == ')') {
braces--;
parserMoveCurrentPosition(+1);
}
result[i] = subCondition;
} else if (word.charAt(0) == OStringSerializerHelper.LIST_BEGIN) {
// COLLECTION OF ELEMENTS
parserSetCurrentPosition(lastPosition - word.length());
final List<String> stringItems = new ArrayList<String>();
parserSetCurrentPosition(OStringSerializerHelper.getCollection(parserText, parserGetCurrentPosition(), stringItems));
result[i] = convertCollectionItems(stringItems);
parserMoveCurrentPosition(+1);
} else if (uWord.startsWith(OSQLFilterItemFieldAll.NAME + OStringSerializerHelper.EMBEDDED_BEGIN)) {
result[i] = new OSQLFilterItemFieldAll(this, word);
} else if (uWord.startsWith(OSQLFilterItemFieldAny.NAME + OStringSerializerHelper.EMBEDDED_BEGIN)) {
result[i] = new OSQLFilterItemFieldAny(this, word);
} else {
if (uWord.equals("NOT")) {
if (iAllowOperator)
return new OQueryOperatorNot();
else {
// GET THE NEXT VALUE
parserNextWord(false, " )=><,\r\n");
final String nextWord = parserGetLastWord();
if (nextWord.length() > 0) {
word += " " + nextWord;
if (word.endsWith(")"))
word = word.substring(0, word.length() - 1);
}
}
} else if (uWord.equals("AND"))
// SPECIAL CASE IN "BETWEEN X AND Y"
result[i] = word;
while (word.endsWith(")")) {
final int openParenthesis = word.indexOf('(');
if (openParenthesis == -1) {
// DISCARD END PARENTHESIS
word = word.substring(0, word.length() - 1);
parserMoveCurrentPosition(-1);
} else
break;
}
result[i] = OSQLHelper.parseValue(this, this, word, context);
}
}
return iExpectedWords == 1 ? result[0] : result;
}
private List<Object> convertCollectionItems(List<String> stringItems) {
List<Object> coll = new ArrayList<Object>();
for (String s : stringItems) {
coll.add(OSQLHelper.parseValue(this, this, s, context));
}
return coll;
}
public OSQLFilterCondition getRootCondition() {
return rootCondition;
}
@Override
public String toString() {
if (rootCondition != null)
return "Parsed: " + rootCondition.toString();
return "Unparsed: " + parserText;
}
/**
* Binds parameters.
*
* @param iArgs
*/
public void bindParameters(final Map<Object, Object> iArgs) {
if (parameterItems == null || iArgs == null || iArgs.size() == 0)
return;
for (Entry<Object, Object> entry : iArgs.entrySet()) {
if (entry.getKey() instanceof Integer)
parameterItems.get(((Integer) entry.getKey())).setValue(entry.setValue(entry.getValue()));
else {
String paramName = entry.getKey().toString();
for (OSQLFilterItemParameter value : parameterItems) {
if (value.getName().equalsIgnoreCase(paramName)) {
value.setValue(entry.getValue());
break;
}
}
}
}
}
public OSQLFilterItemParameter addParameter(final String iName) {
final String name;
if (iName.charAt(0) == OStringSerializerHelper.PARAMETER_NAMED) {
name = iName.substring(1);
// CHECK THE PARAMETER NAME IS CORRECT
if (!OStringSerializerHelper.isAlphanumeric(name)) {
throw new OQueryParsingException("Parameter name '" + name + "' is invalid, only alphanumeric characters are allowed");
}
} else
name = iName;
final OSQLFilterItemParameter param = new OSQLFilterItemParameter(name);
if (parameterItems == null)
parameterItems = new ArrayList<OSQLFilterItemParameter>();
parameterItems.add(param);
return param;
}
public void setRootCondition(final OSQLFilterCondition iCondition) {
rootCondition = iCondition;
}
protected void optimize() {
if (rootCondition != null)
computePrefetchFieldList(rootCondition, new HashSet<String>());
}
protected Set<String> computePrefetchFieldList(final OSQLFilterCondition iCondition, final Set<String> iFields) {
Object left = iCondition.getLeft();
Object right = iCondition.getRight();
if (left instanceof OSQLFilterItemField) {
((OSQLFilterItemField) left).setPreLoadedFields(iFields);
iFields.add(((OSQLFilterItemField) left).getRoot());
} else if (left instanceof OSQLFilterCondition)
computePrefetchFieldList((OSQLFilterCondition) left, iFields);
if (right instanceof OSQLFilterItemField) {
((OSQLFilterItemField) right).setPreLoadedFields(iFields);
iFields.add(((OSQLFilterItemField) right).getRoot());
} else if (right instanceof OSQLFilterCondition)
computePrefetchFieldList((OSQLFilterCondition) right, iFields);
return iFields;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_filter_OSQLPredicate.java
|
125 |
final EntryAdapter<String, String> listener = new EntryAdapter<String, String>() {
public void onEntryEvent(EntryEvent<String, String> event) {
latch.countDown();
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientReconnectTest.java
|
1,692 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_ADMIN_PASSWORD_TOKEN")
public class ForgotPasswordSecurityTokenImpl implements ForgotPasswordSecurityToken {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "PASSWORD_TOKEN", nullable = false)
protected String token;
@Column(name = "CREATE_DATE", nullable = false)
@Temporal(TemporalType.TIMESTAMP)
protected Date createDate;
@Column(name = "TOKEN_USED_DATE")
@Temporal(TemporalType.TIMESTAMP)
protected Date tokenUsedDate;
@Column(name = "ADMIN_USER_ID", nullable = false)
protected Long adminUserId;
@Column(name = "TOKEN_USED_FLAG", nullable = false)
protected boolean tokenUsedFlag;
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public Date getCreateDate() {
return createDate;
}
public void setCreateDate(Date createDate) {
this.createDate = createDate;
}
public Date getTokenUsedDate() {
return tokenUsedDate;
}
public void setTokenUsedDate(Date tokenUsedDate) {
this.tokenUsedDate = tokenUsedDate;
}
public Long getAdminUserId() {
return adminUserId;
}
public void setAdminUserId(Long adminUserId) {
this.adminUserId = adminUserId;
}
public boolean isTokenUsedFlag() {
return tokenUsedFlag;
}
public void setTokenUsedFlag(boolean tokenUsedFlag) {
this.tokenUsedFlag = tokenUsedFlag;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ForgotPasswordSecurityTokenImpl that = (ForgotPasswordSecurityTokenImpl) o;
if (token != null ? !token.equals(that.token) : that.token != null) return false;
return true;
}
@Override
public int hashCode() {
return token != null ? token.hashCode() : 0;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_security_domain_ForgotPasswordSecurityTokenImpl.java
|
167 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_URL_HANDLER")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "blStandardElements")
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "URLHandlerImpl_friendyName")
public class URLHandlerImpl implements URLHandler, Serializable, AdminMainEntity {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "URLHandlerID")
@GenericGenerator(
name="URLHandlerID",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="URLHandlerImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.url.domain.URLHandlerImpl")
}
)
@Column(name = "URL_HANDLER_ID")
@AdminPresentation(friendlyName = "URLHandlerImpl_ID", order = 1, group = "URLHandlerImpl_friendyName", groupOrder = 1, visibility = VisibilityEnum.HIDDEN_ALL)
protected Long id;
@AdminPresentation(friendlyName = "URLHandlerImpl_incomingURL", order = 1, group = "URLHandlerImpl_friendyName", prominent = true, groupOrder = 1)
@Column(name = "INCOMING_URL", nullable = false)
@Index(name="INCOMING_URL_INDEX", columnNames={"INCOMING_URL"})
protected String incomingURL;
@Column(name = "NEW_URL", nullable = false)
@AdminPresentation(friendlyName = "URLHandlerImpl_newURL", order = 1, group = "URLHandlerImpl_friendyName", prominent = true, groupOrder = 1)
protected String newURL;
@Column(name = "URL_REDIRECT_TYPE")
@AdminPresentation(friendlyName = "URLHandlerImpl_redirectType", order = 4, group = "URLHandlerImpl_friendyName", fieldType = SupportedFieldType.BROADLEAF_ENUMERATION, broadleafEnumeration = "org.broadleafcommerce.cms.url.type.URLRedirectType", groupOrder = 2, prominent = true)
protected String urlRedirectType;
/* (non-Javadoc)
* @see org.broadleafcommerce.common.url.URLHandler#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.url.URLHandler#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.url.URLHandler#getIncomingURL()
*/
@Override
public String getIncomingURL() {
return incomingURL;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.url.URLHandler#setIncomingURL(java.lang.String)
*/
@Override
public void setIncomingURL(String incomingURL) {
this.incomingURL = incomingURL;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.url.URLHandler#getNewURL()
*/
@Override
public String getNewURL() {
return newURL;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.url.URLHandler#setNewURL(java.lang.String)
*/
@Override
public void setNewURL(String newURL) {
this.newURL = newURL;
}
@Override
public URLRedirectType getUrlRedirectType() {
return URLRedirectType.getInstance(urlRedirectType);
}
@Override
public void setUrlRedirectType(URLRedirectType redirectType) {
this.urlRedirectType = redirectType.getType();
}
@Override
public String getMainEntityName() {
return getIncomingURL();
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_url_domain_URLHandlerImpl.java
|
576 |
public class OptimizeRequest extends BroadcastOperationRequest<OptimizeRequest> {
public static final class Defaults {
public static final boolean WAIT_FOR_MERGE = true;
public static final int MAX_NUM_SEGMENTS = -1;
public static final boolean ONLY_EXPUNGE_DELETES = false;
public static final boolean FLUSH = true;
}
private boolean waitForMerge = Defaults.WAIT_FOR_MERGE;
private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS;
private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES;
private boolean flush = Defaults.FLUSH;
/**
* Constructs an optimization request over one or more indices.
*
* @param indices The indices to optimize, no indices passed means all indices will be optimized.
*/
public OptimizeRequest(String... indices) {
super(indices);
}
public OptimizeRequest() {
}
/**
* Should the call block until the optimize completes. Defaults to <tt>true</tt>.
*/
public boolean waitForMerge() {
return waitForMerge;
}
/**
* Should the call block until the optimize completes. Defaults to <tt>true</tt>.
*/
public OptimizeRequest waitForMerge(boolean waitForMerge) {
this.waitForMerge = waitForMerge;
return this;
}
/**
* Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
* process to optimize down to half the configured number of segments.
*/
public int maxNumSegments() {
return maxNumSegments;
}
/**
* Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
* process to optimize down to half the configured number of segments.
*/
public OptimizeRequest maxNumSegments(int maxNumSegments) {
this.maxNumSegments = maxNumSegments;
return this;
}
/**
* Should the optimization only expunge deletes from the index, without full optimization.
* Defaults to full optimization (<tt>false</tt>).
*/
public boolean onlyExpungeDeletes() {
return onlyExpungeDeletes;
}
/**
* Should the optimization only expunge deletes from the index, without full optimization.
* Defaults to full optimization (<tt>false</tt>).
*/
public OptimizeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) {
this.onlyExpungeDeletes = onlyExpungeDeletes;
return this;
}
/**
* Should flush be performed after the optimization. Defaults to <tt>true</tt>.
*/
public boolean flush() {
return flush;
}
/**
* Should flush be performed after the optimization. Defaults to <tt>true</tt>.
*/
public OptimizeRequest flush(boolean flush) {
this.flush = flush;
return this;
}
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
waitForMerge = in.readBoolean();
maxNumSegments = in.readInt();
onlyExpungeDeletes = in.readBoolean();
flush = in.readBoolean();
}
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(waitForMerge);
out.writeInt(maxNumSegments);
out.writeBoolean(onlyExpungeDeletes);
out.writeBoolean(flush);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_optimize_OptimizeRequest.java
|
208 |
private class ClusterAuthenticator implements Authenticator {
@Override
public void auth(ClientConnection connection) throws AuthenticationException, IOException {
authenticate(connection, credentials, principal, false, false);
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientConnectionManagerImpl.java
|
81 |
@SuppressWarnings("serial")
static final class MapReduceValuesTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
U result;
MapReduceValuesTask<K,V,U> rights, nextRight;
MapReduceValuesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceValuesTask<K,V,U> nextRight,
Fun<? super V, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.reducer = reducer;
}
public final U getRawResult() { return result; }
public final void compute() {
final Fun<? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceValuesTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, reducer)).fork();
}
U r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.val)) != null)
r = (r == null) ? u : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
t = (MapReduceValuesTask<K,V,U>)c,
s = t.rights;
while (s != null) {
U tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
531 |
@Deprecated
public class GatewaySnapshotAction extends IndicesAction<GatewaySnapshotRequest, GatewaySnapshotResponse, GatewaySnapshotRequestBuilder> {
public static final GatewaySnapshotAction INSTANCE = new GatewaySnapshotAction();
public static final String NAME = "indices/gateway/snapshot";
private GatewaySnapshotAction() {
super(NAME);
}
@Override
public GatewaySnapshotResponse newResponse() {
return new GatewaySnapshotResponse();
}
@Override
public GatewaySnapshotRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new GatewaySnapshotRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_gateway_snapshot_GatewaySnapshotAction.java
|
1,369 |
public class OTransactionIndexChanges {
public static enum OPERATION {
PUT, REMOVE, CLEAR
}
public NavigableMap<Object, OTransactionIndexChangesPerKey> changesPerKey = new TreeMap<Object, OTransactionIndexChangesPerKey>(
ODefaultComparator.INSTANCE);
public boolean cleared = false;
public OTransactionIndexChangesPerKey getChangesPerKey(final Object iKey) {
OTransactionIndexChangesPerKey changes = changesPerKey.get(iKey);
if (changes == null) {
changes = new OTransactionIndexChangesPerKey(iKey);
changesPerKey.put(iKey, changes);
}
return changes;
}
public Collection<OTransactionIndexChangesPerKey> getChangesForKeys(final Object firstKey, final Object lastKey) {
return changesPerKey.subMap(firstKey, lastKey).values();
}
public void setCleared() {
changesPerKey.clear();
cleared = true;
}
public boolean containsChangesPerKey(final Object iKey) {
return changesPerKey.containsKey(iKey);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionIndexChanges.java
|
1,077 |
public class CacheEdge extends AbstractEdge {
public CacheEdge(long id, EdgeLabel label, InternalVertex start, InternalVertex end, Entry data) {
super(id, label, start.it(), end.it());
assert data != null;
this.data = data;
}
public Direction getVertexCentricDirection() {
return data.getCache().direction;
}
//############## Similar code as CacheProperty but be careful when copying #############################
private final Entry data;
@Override
public InternalRelation it() {
InternalRelation it = null;
InternalVertex startVertex = getVertex(0);
if (startVertex.hasAddedRelations() && startVertex.hasRemovedRelations()) {
//Test whether this relation has been replaced
final long id = super.getLongId();
Iterable<InternalRelation> previous = startVertex.getAddedRelations(new Predicate<InternalRelation>() {
@Override
public boolean apply(@Nullable InternalRelation internalRelation) {
return (internalRelation instanceof StandardEdge) && ((StandardEdge) internalRelation).getPreviousID() == id;
}
});
assert Iterables.size(previous) <= 1 || (isLoop() && Iterables.size(previous) == 2);
it = Iterables.getFirst(previous, null);
}
if (it != null)
return it;
return super.it();
}
private void copyProperties(InternalRelation to) {
for (LongObjectCursor<Object> entry : getPropertyMap()) {
to.setPropertyDirect(tx().getExistingRelationType(entry.key), entry.value);
}
}
private synchronized InternalRelation update() {
StandardEdge copy = new StandardEdge(super.getLongId(), getEdgeLabel(), getVertex(0), getVertex(1), ElementLifeCycle.Loaded);
copyProperties(copy);
copy.remove();
StandardEdge u = (StandardEdge) tx().addEdge(getVertex(0), getVertex(1), getLabel());
if (type.getConsistencyModifier()!=ConsistencyModifier.FORK) u.setId(super.getLongId());
u.setPreviousID(super.getLongId());
copyProperties(u);
setId(u.getLongId());
return u;
}
private RelationCache getPropertyMap() {
RelationCache map = data.getCache();
if (map == null || !map.hasProperties()) {
map = RelationConstructor.readRelationCache(data, tx());
}
return map;
}
@Override
public <O> O getPropertyDirect(RelationType type) {
return getPropertyMap().get(type.getLongId());
}
@Override
public Iterable<RelationType> getPropertyKeysDirect() {
RelationCache map = getPropertyMap();
List<RelationType> types = new ArrayList<RelationType>(map.numProperties());
for (LongObjectCursor<Object> entry : map) {
types.add(tx().getExistingRelationType(entry.key));
}
return types;
}
@Override
public void setPropertyDirect(RelationType type, Object value) {
update().setPropertyDirect(type, value);
}
@Override
public <O> O removePropertyDirect(RelationType type) {
return update().removePropertyDirect(type);
}
@Override
public byte getLifeCycle() {
InternalVertex startVertex = getVertex(0);
return ((startVertex.hasRemovedRelations() || startVertex.isRemoved()) && tx().isRemovedRelation(super.getLongId()))
? ElementLifeCycle.Removed : ElementLifeCycle.Loaded;
}
@Override
public void remove() {
if (!tx().isRemovedRelation(super.getLongId())) {
tx().removeRelation(this);
}// else throw InvalidElementException.removedException(this);
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_relations_CacheEdge.java
|
2,907 |
public final class SampleObjects {
public static class ValueType implements Serializable {
String typeName;
public ValueType(String typeName) {
this.typeName = typeName;
}
public ValueType() {
}
public String getTypeName() {
return typeName;
}
}
public static class Value implements Serializable {
String name;
ValueType type;
State state;
int index;
public Value(String name, ValueType type, int index) {
this.name = name;
this.type = type;
this.index = index;
}
public Value(State state, ValueType type, int index) {
this.state = state;
this.type = type;
this.index = index;
}
public Value(String name, int index) {
this.name = name;
this.index = index;
}
public Value(String name) {
this(name, null, 0);
}
public State getState() {
return state;
}
public void setState(State state) {
this.state = state;
}
public String getName() {
return name;
}
public ValueType getType() {
return type;
}
public int getIndex() {
return index;
}
public void setIndex(final int index) {
this.index = index;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Value value = (Value) o;
if (index != value.index) return false;
if (name != null ? !name.equals(value.name) : value.name != null) return false;
if (type != null ? !type.equals(value.type) : value.type != null) return false;
return true;
}
@Override
public int hashCode() {
int result = name != null ? name.hashCode() : 0;
result = 31 * result + (type != null ? type.hashCode() : 0);
result = 31 * result + index;
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("Value");
sb.append("{name=").append(name);
sb.append(", index=").append(index);
sb.append(", type=").append(type);
sb.append('}');
return sb.toString();
}
}
public static enum State {
STATE1, STATE2
}
public static class Employee implements Serializable {
long id;
String name;
String city;
int age;
boolean active;
double salary;
Timestamp date;
Date createDate;
java.sql.Date sqlDate;
State state;
BigDecimal bigDecimal = new BigDecimal("1.23E3");
public Employee(long id, String name, int age, boolean live, double salary, State state) {
this(id,name,age,live,salary);
this.state = state;
}
public Employee(long id, String name, int age, boolean live, double salary) {
this(id, name, null, age, live, salary);
}
public Employee(String name, int age, boolean live, double salary) {
this(-1, name, age, live, salary);
}
public Employee(String name, String city, int age, boolean live, double salary) {
this(-1, name, city, age, live, salary);
}
public Employee(long id, String name, String city, int age, boolean live, double salary) {
this.id = id;
this.name = name;
this.city = city;
this.age = age;
this.active = live;
this.salary = salary;
this.createDate = new Date();
this.date = new Timestamp(createDate.getTime());
this.sqlDate = new java.sql.Date(createDate.getTime());
}
public Employee() {
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Date getCreateDate() {
return createDate;
}
public void setCreateDate(Date createDate) {
this.createDate = createDate;
}
public java.sql.Date getSqlDate() {
return sqlDate;
}
public void setSqlDate(java.sql.Date sqlDate) {
this.sqlDate = sqlDate;
}
public void setName(String name) {
this.name = name;
}
public void setCity(String city) {
this.city = city;
}
public void setAge(int age) {
this.age = age;
}
public void setActive(boolean active) {
this.active = active;
}
public void setSalary(double salary) {
this.salary = salary;
}
public void setDate(Timestamp date) {
this.date = date;
}
public void setBigDecimal(BigDecimal bigDecimal) {
this.bigDecimal = bigDecimal;
}
public BigDecimal getBigDecimal() {
return bigDecimal;
}
public Timestamp getDate() {
return date;
}
public String getName() {
return name;
}
public String getCity() {
return city;
}
public int getAge() {
return age;
}
public double getSalary() {
return salary;
}
public boolean isActive() {
return active;
}
public State getState() {
return state;
}
public void setState(State state) {
this.state = state;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Employee employee = (Employee) o;
if (active != employee.active) return false;
if (age != employee.age) return false;
if (Double.compare(employee.salary, salary) != 0) return false;
if (name != null ? !name.equals(employee.name) : employee.name != null) return false;
return true;
}
@Override
public int hashCode() {
int result;
long temp;
result = name != null ? name.hashCode() : 0;
result = 31 * result + age;
result = 31 * result + (active ? 1 : 0);
temp = salary != +0.0d ? Double.doubleToLongBits(salary) : 0L;
result = 31 * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("Employee");
sb.append("{name='").append(name).append('\'');
sb.append(", city=").append(city);
sb.append(", age=").append(age);
sb.append(", active=").append(active);
sb.append(", salary=").append(salary);
sb.append('}');
return sb.toString();
}
}
}
| 1no label
|
hazelcast_src_test_java_com_hazelcast_query_SampleObjects.java
|
3,104 |
public final class EngineSearcherTotalHitsMatcher extends TypeSafeMatcher<Engine.Searcher> {
private final Query query;
private final int totalHits;
public EngineSearcherTotalHitsMatcher(Query query, int totalHits) {
this.query = query;
this.totalHits = totalHits;
}
@Override
public boolean matchesSafely(Engine.Searcher searcher) {
try {
long count = Lucene.count(searcher.searcher(), query);
return count == totalHits;
} catch (IOException e) {
return false;
}
}
@Override
public void describeTo(Description description) {
description.appendText("total hits of size ").appendValue(totalHits).appendText(" with query ").appendValue(query);
}
public static Matcher<Engine.Searcher> engineSearcherTotalHits(Query query, int totalHits) {
return new EngineSearcherTotalHitsMatcher(query, totalHits);
}
public static Matcher<Engine.Searcher> engineSearcherTotalHits(int totalHits) {
return new EngineSearcherTotalHitsMatcher(Queries.newMatchAllQuery(), totalHits);
}
}
| 1no label
|
src_test_java_org_elasticsearch_index_engine_EngineSearcherTotalHitsMatcher.java
|
59 |
public interface FieldDefinition extends Serializable {
public Long getId();
public void setId(Long id);
public String getName();
public void setName(String name);
public SupportedFieldType getFieldType();
public void setFieldType(SupportedFieldType fieldType);
public String getSecurityLevel();
public void setSecurityLevel(String securityLevel);
public Boolean getHiddenFlag();
public void setHiddenFlag(Boolean hiddenFlag);
public String getValidationRegEx();
public void setValidationRegEx(String validationRegEx);
public Integer getMaxLength();
public void setMaxLength(Integer maxLength);
public String getColumnWidth();
public void setColumnWidth(String columnWidth);
public Boolean getTextAreaFlag();
public void setTextAreaFlag(Boolean textAreaFlag);
public FieldEnumeration getFieldEnumeration();
public void setFieldEnumeration(FieldEnumeration fieldEnumeration);
public Boolean getAllowMultiples();
public void setAllowMultiples(Boolean allowMultiples);
public String getFriendlyName();
public void setFriendlyName(String friendlyName);
public String getValidationErrorMesageKey();
public void setValidationErrorMesageKey(String validationErrorMesageKey);
public FieldGroup getFieldGroup();
public void setFieldGroup(FieldGroup fieldGroup);
public int getFieldOrder();
public void setFieldOrder(int fieldOrder);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_field_domain_FieldDefinition.java
|
215 |
public class ClientInSelectorImpl extends ClientAbstractIOSelector {
public ClientInSelectorImpl(ThreadGroup threadGroup) {
super(threadGroup, "InSelector");
}
protected void handleSelectionKey(SelectionKey sk) {
if (sk.isValid() && sk.isReadable()) {
final SelectionHandler handler = (SelectionHandler) sk.attachment();
handler.handle();
}
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientInSelectorImpl.java
|
609 |
public class OIndexManagerShared extends OIndexManagerAbstract implements OIndexManager {
private static final boolean useSBTree = OGlobalConfiguration.INDEX_USE_SBTREE_BY_DEFAULT.getValueAsBoolean();
private static final long serialVersionUID = 1L;
protected volatile Thread recreateIndexesThread = null;
private volatile boolean rebuildCompleted = false;
public OIndexManagerShared(final ODatabaseRecord iDatabase) {
super(iDatabase);
}
public OIndex<?> getIndexInternal(final String name) {
acquireSharedLock();
try {
return indexes.get(name.toLowerCase());
} finally {
releaseSharedLock();
}
}
/**
*
*
* @param iName
* - name of index
* @param iType
* @param clusterIdsToIndex
* @param iProgressListener
*/
public OIndex<?> createIndex(final String iName, final String iType, final OIndexDefinition indexDefinition,
final int[] clusterIdsToIndex, OProgressListener iProgressListener) {
if (getDatabase().getTransaction().isActive())
throw new IllegalStateException("Cannot create a new index inside a transaction");
final Character c = OSchemaShared.checkNameIfValid(iName);
if (c != null)
throw new IllegalArgumentException("Invalid index name '" + iName + "'. Character '" + c + "' is invalid");
ODatabase database = getDatabase();
OStorage storage = database.getStorage();
final String alghorithm;
if ((storage.getType().equals(OEngineLocal.NAME) || storage.getType().equals(OEngineLocalPaginated.NAME)) && useSBTree)
alghorithm = ODefaultIndexFactory.SBTREE_ALGORITHM;
else
alghorithm = ODefaultIndexFactory.MVRBTREE_ALGORITHM;
final String valueContainerAlgorithm;
if (OClass.INDEX_TYPE.NOTUNIQUE.toString().equals(iType) || OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString().equals(iType)
|| OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString().equals(iType) || OClass.INDEX_TYPE.FULLTEXT.toString().equals(iType)) {
if ((storage.getType().equals(OEngineLocalPaginated.NAME) || storage.getType().equals(OEngineLocal.NAME))
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean()) {
valueContainerAlgorithm = ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER;
} else {
valueContainerAlgorithm = ODefaultIndexFactory.MVRBTREE_VALUE_CONTAINER;
}
} else {
valueContainerAlgorithm = ODefaultIndexFactory.NONE_VALUE_CONTAINER;
}
acquireExclusiveLock();
try {
final OIndexInternal<?> index = OIndexes.createIndex(getDatabase(), iType, alghorithm, valueContainerAlgorithm);
// decide which cluster to use ("index" - for automatic and "manindex" for manual)
final String clusterName = indexDefinition != null && indexDefinition.getClassName() != null ? defaultClusterName
: manualClusterName;
if (iProgressListener == null)
// ASSIGN DEFAULT PROGRESS LISTENER
iProgressListener = new OIndexRebuildOutputListener(index);
Set<String> clustersToIndex = new HashSet<String>();
if (clusterIdsToIndex != null) {
for (int clusterId : clusterIdsToIndex) {
final String clusterNameToIndex = database.getClusterNameById(clusterId);
if (clusterNameToIndex == null)
throw new OIndexException("Cluster with id " + clusterId + " does not exist.");
clustersToIndex.add(clusterNameToIndex);
}
}
index.create(iName, indexDefinition, clusterName, clustersToIndex, true, iProgressListener);
addIndexInternal(index);
setDirty();
save();
return index;
} finally {
releaseExclusiveLock();
}
}
public OIndexManager dropIndex(final String iIndexName) {
if (getDatabase().getTransaction().isActive())
throw new IllegalStateException("Cannot drop an index inside a transaction");
acquireExclusiveLock();
try {
final OIndex<?> idx = indexes.remove(iIndexName.toLowerCase());
if (idx != null) {
removeClassPropertyIndex(idx);
getDatabase().unregisterListener(idx.getInternal());
idx.delete();
setDirty();
save();
}
return this;
} finally {
releaseExclusiveLock();
}
}
private void removeClassPropertyIndex(final OIndex<?> idx) {
final OIndexDefinition indexDefinition = idx.getDefinition();
if (indexDefinition == null || indexDefinition.getClassName() == null)
return;
final Map<OMultiKey, Set<OIndex<?>>> map = classPropertyIndex.get(indexDefinition.getClassName().toLowerCase());
if (map == null) {
return;
}
final int paramCount = indexDefinition.getParamCount();
for (int i = 1; i <= paramCount; i++) {
final List<String> fields = normalizeFieldNames(indexDefinition.getFields().subList(0, i));
final OMultiKey multiKey = new OMultiKey(fields);
final Set<OIndex<?>> indexSet = map.get(multiKey);
if (indexSet == null)
continue;
indexSet.remove(idx);
if (indexSet.isEmpty()) {
map.remove(multiKey);
}
}
if (map.isEmpty())
classPropertyIndex.remove(indexDefinition.getClassName().toLowerCase());
}
@Override
protected void fromStream() {
acquireExclusiveLock();
try {
final Map<String, OIndex<?>> oldIndexes = new HashMap<String, OIndex<?>>(indexes);
clearMetadata();
final Collection<ODocument> idxs = document.field(CONFIG_INDEXES);
if (idxs != null) {
OIndexInternal<?> index;
boolean configUpdated = false;
Iterator<ODocument> indexConfigurationIterator = idxs.iterator();
while (indexConfigurationIterator.hasNext()) {
final ODocument d = indexConfigurationIterator.next();
try {
index = OIndexes.createIndex(getDatabase(), (String) d.field(OIndexInternal.CONFIG_TYPE),
(String) d.field(OIndexInternal.ALGORITHM), d.<String> field(OIndexInternal.VALUE_CONTAINER_ALGORITHM));
OIndexInternal.IndexMetadata newIndexMetadata = index.loadMetadata(d);
final String normalizedName = newIndexMetadata.getName().toLowerCase();
OIndex<?> oldIndex = oldIndexes.get(normalizedName);
if (oldIndex != null) {
OIndexInternal.IndexMetadata oldIndexMetadata = oldIndex.getInternal().loadMetadata(oldIndex.getConfiguration());
if (oldIndexMetadata.equals(newIndexMetadata)) {
addIndexInternal(oldIndex.getInternal());
oldIndexes.remove(normalizedName);
} else if (newIndexMetadata.getIndexDefinition() == null
&& d.field(OIndexAbstract.CONFIG_MAP_RID)
.equals(oldIndex.getConfiguration().field(OIndexAbstract.CONFIG_MAP_RID))) {
// index is manual and index definition was just detected
addIndexInternal(oldIndex.getInternal());
oldIndexes.remove(normalizedName);
}
} else {
if (((OIndexInternal<?>) index).loadFromConfiguration(d)) {
addIndexInternal(index);
} else {
indexConfigurationIterator.remove();
configUpdated = true;
}
}
} catch (Exception e) {
indexConfigurationIterator.remove();
configUpdated = true;
OLogManager.instance().error(this, "Error on loading index by configuration: %s", e, d);
}
}
for (OIndex<?> oldIndex : oldIndexes.values())
try {
OLogManager.instance().warn(this, "Index %s was not found after reload and will be removed", oldIndex.getName());
getDatabase().unregisterListener(oldIndex.getInternal());
oldIndex.delete();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on deletion of index %s", e, oldIndex.getName());
}
if (configUpdated) {
document.field(CONFIG_INDEXES, idxs);
save();
}
}
} finally {
releaseExclusiveLock();
}
}
/**
* Binds POJO to ODocument.
*/
@Override
public ODocument toStream() {
acquireExclusiveLock();
try {
document.setInternalStatus(ORecordElement.STATUS.UNMARSHALLING);
try {
final ORecordTrackedSet idxs = new ORecordTrackedSet(document);
for (final OIndex<?> i : indexes.values()) {
idxs.add(((OIndexInternal<?>) i).updateConfiguration());
}
document.field(CONFIG_INDEXES, idxs, OType.EMBEDDEDSET);
} finally {
document.setInternalStatus(ORecordElement.STATUS.LOADED);
}
document.setDirty();
return document;
} finally {
releaseExclusiveLock();
}
}
@Override
public void recreateIndexes() {
acquireExclusiveLock();
try {
if (recreateIndexesThread != null && recreateIndexesThread.isAlive())
// BUILDING ALREADY IN PROGRESS
return;
final ODatabaseRecord db = getDatabase();
document = db.load(new ORecordId(getDatabase().getStorage().getConfiguration().indexMgrRecordId));
final ODocument doc = new ODocument();
document.copyTo(doc);
// USE A NEW DB INSTANCE
final ODatabaseDocumentTx newDb = new ODatabaseDocumentTx(db.getURL());
Runnable recreateIndexesTask = new Runnable() {
@Override
public void run() {
try {
// START IT IN BACKGROUND
newDb.setProperty(ODatabase.OPTIONS.SECURITY.toString(), Boolean.FALSE);
newDb.open("admin", "nopass");
ODatabaseRecordThreadLocal.INSTANCE.set(newDb);
try {
// DROP AND RE-CREATE 'INDEX' DATA-SEGMENT AND CLUSTER IF ANY
final int dataId = newDb.getStorage().getDataSegmentIdByName(OMetadataDefault.DATASEGMENT_INDEX_NAME);
if (dataId > -1)
newDb.getStorage().dropDataSegment(OMetadataDefault.DATASEGMENT_INDEX_NAME);
final int clusterId = newDb.getStorage().getClusterIdByName(OMetadataDefault.CLUSTER_INDEX_NAME);
if (clusterId > -1)
newDb.dropCluster(clusterId, false);
newDb.addDataSegment(OMetadataDefault.DATASEGMENT_INDEX_NAME, null);
newDb.getStorage().addCluster(OClusterLocal.TYPE, OMetadataDefault.CLUSTER_INDEX_NAME, null,
OMetadataDefault.DATASEGMENT_INDEX_NAME, true);
} catch (IllegalArgumentException ex) {
// OLD DATABASE: CREATE SEPARATE DATASEGMENT AND LET THE INDEX CLUSTER TO POINT TO IT
OLogManager.instance().info(this, "Creating 'index' data-segment to store all the index content...");
newDb.addDataSegment(OMetadataDefault.DATASEGMENT_INDEX_NAME, null);
final OCluster indexCluster = newDb.getStorage().getClusterById(
newDb.getStorage().getClusterIdByName(OMetadataDefault.CLUSTER_INDEX_NAME));
try {
indexCluster.set(ATTRIBUTES.DATASEGMENT, OMetadataDefault.DATASEGMENT_INDEX_NAME);
OLogManager.instance().info(this,
"Data-segment 'index' create correctly. Indexes will store content into this data-segment");
} catch (IOException e) {
OLogManager.instance().error(this, "Error changing data segment for cluster 'index'", e);
}
}
final Collection<ODocument> idxs = doc.field(CONFIG_INDEXES);
if (idxs == null) {
OLogManager.instance().warn(this, "List of indexes is empty.");
return;
}
int ok = 0;
int errors = 0;
for (ODocument idx : idxs) {
try {
String indexType = idx.field(OIndexInternal.CONFIG_TYPE);
String algorithm = idx.field(OIndexInternal.ALGORITHM);
String valueContainerAlgorithm = idx.field(OIndexInternal.VALUE_CONTAINER_ALGORITHM);
if (indexType == null) {
OLogManager.instance().error(this, "Index type is null, will process other record.");
errors++;
continue;
}
final OIndexInternal<?> index = OIndexes.createIndex(newDb, indexType, algorithm, valueContainerAlgorithm);
OIndexInternal.IndexMetadata indexMetadata = index.loadMetadata(idx);
OIndexDefinition indexDefinition = indexMetadata.getIndexDefinition();
if (indexDefinition == null || !indexDefinition.isAutomatic()) {
OLogManager.instance().info(this, "Index %s is not automatic index and will be added as is.",
indexMetadata.getName());
if (index.loadFromConfiguration(idx)) {
addIndexInternal(index);
setDirty();
save();
ok++;
} else {
getDatabase().unregisterListener(index.getInternal());
index.delete();
errors++;
}
OLogManager.instance().info(this, "Index %s was added in DB index list.", index.getName());
} else {
String indexName = indexMetadata.getName();
Set<String> clusters = indexMetadata.getClustersToIndex();
String type = indexMetadata.getType();
if (indexName != null && indexDefinition != null && clusters != null && !clusters.isEmpty() && type != null) {
OLogManager.instance().info(this, "Start creation of index %s", indexName);
if (algorithm.equals(ODefaultIndexFactory.SBTREE_ALGORITHM) || indexType.endsWith("HASH_INDEX"))
index.deleteWithoutIndexLoad(indexName);
index.create(indexName, indexDefinition, defaultClusterName, clusters, false, new OIndexRebuildOutputListener(
index));
index.setRebuildingFlag();
addIndexInternal(index);
OLogManager.instance().info(this, "Index %s was successfully created and rebuild is going to be started.",
indexName);
index.rebuild(new OIndexRebuildOutputListener(index));
index.flush();
setDirty();
save();
ok++;
OLogManager.instance().info(this, "Rebuild of %s index was successfully finished.", indexName);
} else {
errors++;
OLogManager.instance().error(
this,
"Information about index was restored incorrectly, following data were loaded : "
+ "index name - %s, index definition %s, clusters %s, type %s.", indexName, indexDefinition, clusters,
type);
}
}
} catch (Exception e) {
OLogManager.instance().error(this, "Error during addition of index %s", e, idx);
errors++;
}
}
rebuildCompleted = true;
newDb.close();
OLogManager.instance().info(this, "%d indexes were restored successfully, %d errors", ok, errors);
} catch (Exception e) {
OLogManager.instance().error(this, "Error when attempt to restore indexes after crash was performed.", e);
}
}
};
recreateIndexesThread = new Thread(recreateIndexesTask);
recreateIndexesThread.start();
} finally {
releaseExclusiveLock();
}
if (OGlobalConfiguration.INDEX_SYNCHRONOUS_AUTO_REBUILD.getValueAsBoolean())
waitTillIndexRestore();
}
@Override
public void waitTillIndexRestore() {
if (recreateIndexesThread != null && recreateIndexesThread.isAlive()) {
if (Thread.currentThread().equals(recreateIndexesThread))
return;
OLogManager.instance().info(this, "Wait till indexes restore after crash was finished.");
while (recreateIndexesThread.isAlive())
try {
recreateIndexesThread.join();
OLogManager.instance().info(this, "Indexes restore after crash was finished.");
} catch (InterruptedException e) {
OLogManager.instance().info(this, "Index rebuild task was interrupted.");
}
}
}
public boolean autoRecreateIndexesAfterCrash() {
if (rebuildCompleted)
return false;
final ODatabaseRecord database = ODatabaseRecordThreadLocal.INSTANCE.get();
if (!OGlobalConfiguration.INDEX_AUTO_REBUILD_AFTER_NOTSOFTCLOSE.getValueAsBoolean())
return false;
OStorage storage = database.getStorage().getUnderlying();
if (storage instanceof OStorageLocal)
return !((OStorageLocal) storage).wasClusterSoftlyClosed(OMetadataDefault.CLUSTER_INDEX_NAME);
else if (storage instanceof OLocalPaginatedStorage) {
return ((OLocalPaginatedStorage) storage).wereDataRestoredAfterOpen();
}
return false;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexManagerShared.java
|
416 |
static final class Fields {
static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_get_GetSnapshotsResponse.java
|
444 |
public static class OsStats implements ToXContent, Streamable {
int availableProcessors;
long availableMemory;
ObjectIntOpenHashMap<OsInfo.Cpu> cpus;
public OsStats() {
cpus = new ObjectIntOpenHashMap<org.elasticsearch.monitor.os.OsInfo.Cpu>();
}
public void addNodeInfo(NodeInfo nodeInfo) {
availableProcessors += nodeInfo.getOs().availableProcessors();
if (nodeInfo.getOs() == null) {
return;
}
if (nodeInfo.getOs().cpu() != null) {
cpus.addTo(nodeInfo.getOs().cpu(), 1);
}
if (nodeInfo.getOs().getMem() != null && nodeInfo.getOs().getMem().getTotal().bytes() != -1) {
availableMemory += nodeInfo.getOs().getMem().getTotal().bytes();
}
}
public int getAvailableProcessors() {
return availableProcessors;
}
public ByteSizeValue getAvailableMemory() {
return new ByteSizeValue(availableMemory);
}
public ObjectIntOpenHashMap<OsInfo.Cpu> getCpus() {
return cpus;
}
@Override
public void readFrom(StreamInput in) throws IOException {
availableProcessors = in.readVInt();
availableMemory = in.readLong();
int size = in.readVInt();
cpus = new ObjectIntOpenHashMap<OsInfo.Cpu>(size);
for (; size > 0; size--) {
cpus.addTo(OsInfo.Cpu.readCpu(in), in.readVInt());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(availableProcessors);
out.writeLong(availableMemory);
out.writeVInt(cpus.size());
for (ObjectIntCursor<OsInfo.Cpu> c : cpus) {
c.key.writeTo(out);
out.writeVInt(c.value);
}
}
public static OsStats readOsStats(StreamInput in) throws IOException {
OsStats os = new OsStats();
os.readFrom(in);
return os;
}
static final class Fields {
static final XContentBuilderString AVAILABLE_PROCESSORS = new XContentBuilderString("available_processors");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes");
static final XContentBuilderString CPU = new XContentBuilderString("cpu");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, availableMemory);
builder.endObject();
builder.startArray(Fields.CPU);
for (ObjectIntCursor<OsInfo.Cpu> cpu : cpus) {
builder.startObject();
cpu.key.toXContent(builder, params);
builder.field(Fields.COUNT, cpu.value);
builder.endObject();
}
builder.endArray();
return builder;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java
|
5,670 |
public class TermsFacetParser extends AbstractComponent implements FacetParser {
private final int ordinalsCacheAbove;
@Inject
public TermsFacetParser(Settings settings) {
super(settings);
InternalTermsFacet.registerStreams();
this.ordinalsCacheAbove = componentSettings.getAsInt("ordinals_cache_above", 10000); // above 40k we want to cache
}
@Override
public String[] types() {
return new String[]{TermsFacet.TYPE};
}
@Override
public FacetExecutor.Mode defaultMainMode() {
return FacetExecutor.Mode.COLLECTOR;
}
@Override
public FacetExecutor.Mode defaultGlobalMode() {
return FacetExecutor.Mode.COLLECTOR;
}
@Override
public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
String field = null;
int size = 10;
int shardSize = -1;
String[] fieldsNames = null;
ImmutableSet<BytesRef> excluded = ImmutableSet.of();
String regex = null;
String regexFlags = null;
TermsFacet.ComparatorType comparatorType = TermsFacet.ComparatorType.COUNT;
String scriptLang = null;
String script = null;
Map<String, Object> params = null;
boolean allTerms = false;
String executionHint = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("params".equals(currentFieldName)) {
params = parser.map();
} else {
throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "] while parsing terms facet [" + facetName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("exclude".equals(currentFieldName)) {
ImmutableSet.Builder<BytesRef> builder = ImmutableSet.builder();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
builder.add(parser.bytes());
}
excluded = builder.build();
} else if ("fields".equals(currentFieldName)) {
List<String> fields = Lists.newArrayListWithCapacity(4);
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
}
fieldsNames = fields.toArray(new String[fields.size()]);
} else {
throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "] while parsing terms facet [" + facetName + "]");
}
} else if (token.isValue()) {
if ("field".equals(currentFieldName)) {
field = parser.text();
} else if ("script_field".equals(currentFieldName) || "scriptField".equals(currentFieldName)) {
script = parser.text();
} else if ("size".equals(currentFieldName)) {
size = parser.intValue();
} else if ("shard_size".equals(currentFieldName) || "shardSize".equals(currentFieldName)) {
shardSize = parser.intValue();
} else if ("all_terms".equals(currentFieldName) || "allTerms".equals(currentFieldName)) {
allTerms = parser.booleanValue();
} else if ("regex".equals(currentFieldName)) {
regex = parser.text();
} else if ("regex_flags".equals(currentFieldName) || "regexFlags".equals(currentFieldName)) {
regexFlags = parser.text();
} else if ("order".equals(currentFieldName) || "comparator".equals(currentFieldName)) {
comparatorType = TermsFacet.ComparatorType.fromString(parser.text());
} else if ("script".equals(currentFieldName)) {
script = parser.text();
} else if ("lang".equals(currentFieldName)) {
scriptLang = parser.text();
} else if ("execution_hint".equals(currentFieldName) || "executionHint".equals(currentFieldName)) {
executionHint = parser.textOrNull();
} else {
throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "] while parsing terms facet [" + facetName + "]");
}
}
}
if ("_index".equals(field)) {
return new IndexNameFacetExecutor(context.shardTarget().index(), comparatorType, size);
}
if (fieldsNames != null && fieldsNames.length == 1) {
field = fieldsNames[0];
fieldsNames = null;
}
Pattern pattern = null;
if (regex != null) {
pattern = Regex.compile(regex, regexFlags);
}
SearchScript searchScript = null;
if (script != null) {
searchScript = context.scriptService().search(context.lookup(), scriptLang, script, params);
}
// shard_size cannot be smaller than size as we need to at least fetch <size> entries from every shards in order to return <size>
if (shardSize < size) {
shardSize = size;
}
if (fieldsNames != null) {
// in case of multi files, we only collect the fields that are mapped and facet on them.
ArrayList<FieldMapper> mappers = new ArrayList<FieldMapper>(fieldsNames.length);
for (int i = 0; i < fieldsNames.length; i++) {
FieldMapper mapper = context.smartNameFieldMapper(fieldsNames[i]);
if (mapper != null) {
mappers.add(mapper);
}
}
if (mappers.isEmpty()) {
// non of the fields is mapped
return new UnmappedFieldExecutor(size, comparatorType);
}
return new FieldsTermsStringFacetExecutor(mappers.toArray(new FieldMapper[mappers.size()]), size, shardSize, comparatorType, allTerms, context, excluded, pattern, searchScript);
}
if (field == null && script != null) {
return new ScriptTermsStringFieldFacetExecutor(size, shardSize, comparatorType, context, excluded, pattern, scriptLang, script, params, context.cacheRecycler());
}
if (field == null) {
throw new ElasticsearchParseException("terms facet [" + facetName + "] must have a field, fields or script parameter");
}
FieldMapper fieldMapper = context.smartNameFieldMapper(field);
if (fieldMapper == null) {
return new UnmappedFieldExecutor(size, comparatorType);
}
IndexFieldData indexFieldData = context.fieldData().getForField(fieldMapper);
if (indexFieldData instanceof IndexNumericFieldData) {
IndexNumericFieldData indexNumericFieldData = (IndexNumericFieldData) indexFieldData;
if (indexNumericFieldData.getNumericType().isFloatingPoint()) {
return new TermsDoubleFacetExecutor(indexNumericFieldData, size, shardSize, comparatorType, allTerms, context, excluded, searchScript, context.cacheRecycler());
} else {
return new TermsLongFacetExecutor(indexNumericFieldData, size, shardSize, comparatorType, allTerms, context, excluded, searchScript, context.cacheRecycler());
}
} else {
if (script != null || "map".equals(executionHint)) {
return new TermsStringFacetExecutor(indexFieldData, size, shardSize, comparatorType, allTerms, context, excluded, pattern, searchScript);
} else if (indexFieldData instanceof IndexFieldData.WithOrdinals) {
return new TermsStringOrdinalsFacetExecutor((IndexFieldData.WithOrdinals) indexFieldData, size, shardSize, comparatorType, allTerms, context, excluded, pattern, ordinalsCacheAbove);
} else {
return new TermsStringFacetExecutor(indexFieldData, size, shardSize, comparatorType, allTerms, context, excluded, pattern, searchScript);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_facet_terms_TermsFacetParser.java
|
2,544 |
public final class Address implements IdentifiedDataSerializable {
public static final int ID = 1;
private static final byte IPV4 = 4;
private static final byte IPV6 = 6;
private int port = -1;
private String host;
private byte type;
private String scopeId;
private boolean hostSet;
public Address() {
}
public Address(String host, int port) throws UnknownHostException {
this(host, InetAddress.getByName(host), port);
}
public Address(InetAddress inetAddress, int port) {
this(null, inetAddress, port);
hostSet = false;
}
public Address(InetSocketAddress inetSocketAddress) {
this(inetSocketAddress.getAddress(), inetSocketAddress.getPort());
}
public Address(String hostname, InetAddress inetAddress, int port) {
type = (inetAddress instanceof Inet4Address) ? IPV4 : IPV6;
String[] addressArgs = inetAddress.getHostAddress().split("\\%");
host = hostname != null ? hostname : addressArgs[0];
if (addressArgs.length == 2) {
scopeId = addressArgs[1];
}
this.port = port;
hostSet = !AddressUtil.isIpAddress(host);
}
public Address(Address address) {
this.host = address.host;
this.port = address.port;
this.type = address.type;
this.scopeId = address.scopeId;
this.hostSet = address.hostSet;
}
public void writeData(ObjectDataOutput out) throws IOException {
out.writeInt(port);
out.write(type);
if (host != null) {
byte[] address = stringToBytes(host);
out.writeInt(address.length);
out.write(address);
} else {
out.writeInt(0);
}
}
public void readData(ObjectDataInput in) throws IOException {
port = in.readInt();
type = in.readByte();
int len = in.readInt();
if (len > 0) {
byte[] address = new byte[len];
in.readFully(address);
host = bytesToString(address);
}
}
public String getHost() {
return host;
}
@Override
public String toString() {
return "Address[" + getHost() + "]:" + port;
}
public int getPort() {
return port;
}
public InetAddress getInetAddress() throws UnknownHostException {
return InetAddress.getByName(getScopedHost());
}
public InetSocketAddress getInetSocketAddress() throws UnknownHostException {
return new InetSocketAddress(getInetAddress(), port);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Address)) {
return false;
}
final Address address = (Address) o;
return port == address.port && this.type == address.type && this.host.equals(address.host);
}
@Override
public int hashCode() {
int result = port;
result = 31 * result + host.hashCode();
return result;
}
public boolean isIPv4() {
return type == IPV4;
}
public boolean isIPv6() {
return type == IPV6;
}
public String getScopeId() {
return isIPv6() ? scopeId : null;
}
public void setScopeId(final String scopeId) {
if (isIPv6()) {
this.scopeId = scopeId;
}
}
public String getScopedHost() {
return (isIPv4() || hostSet || scopeId == null) ? getHost()
: getHost() + "%" + scopeId;
}
@Override
public int getFactoryId() {
return Data.FACTORY_ID;
}
@Override
public int getId() {
return ID;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_Address.java
|
667 |
public class ValidateQueryResponse extends BroadcastOperationResponse {
private boolean valid;
private List<QueryExplanation> queryExplanations;
ValidateQueryResponse() {
}
ValidateQueryResponse(boolean valid, List<QueryExplanation> queryExplanations, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.valid = valid;
this.queryExplanations = queryExplanations;
if (queryExplanations == null) {
this.queryExplanations = ImmutableList.of();
}
}
/**
* A boolean denoting whether the query is valid.
*/
public boolean isValid() {
return valid;
}
/**
* The list of query explanations.
*/
public List<? extends QueryExplanation> getQueryExplanation() {
if (queryExplanations == null) {
return ImmutableList.of();
}
return queryExplanations;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
valid = in.readBoolean();
int size = in.readVInt();
if (size > 0) {
queryExplanations = new ArrayList<QueryExplanation>(size);
for (int i = 0; i < size; i++) {
queryExplanations.add(readQueryExplanation(in));
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(valid);
out.writeVInt(queryExplanations.size());
for (QueryExplanation exp : queryExplanations) {
exp.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_validate_query_ValidateQueryResponse.java
|
131 |
assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(1, clientService.getConnectedClients().size());
}
}, 4);
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientServiceTest.java
|
29 |
@Service("blFulfillmentGroupFieldService")
public class FulfillmentGroupFieldServiceImpl extends AbstractRuleBuilderFieldService {
@Override
public void init() {
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupFirstName")
.name("address.firstName")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupLastName")
.name("address.lastName")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupAddresLine1")
.name("address.addressLine1")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupAddressLine2")
.name("address.addressLine2")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupCity")
.name("address.city")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupCounty")
.name("address.county")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupState")
.name("address.state.name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupPostalCode")
.name("address.postalCode")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupCountry")
.name("address.country.name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupPrimaryPhone")
.name("address.phonePrimary.phoneNumber")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupSecondaryPhone")
.name("address.phoneSecondary.phoneNumber")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupFax")
.name("address.phoneFax.phoneNumber")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupTotal")
.name("total")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupPrice")
.name("fulfillmentPrice")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupRetailPrice")
.name("retailFulfillmentPrice")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupSalePrice")
.name("saleFulfillmentPrice")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupType")
.name("type")
.operators("blcOperators_Enumeration")
.options("blcOptions_FulfillmentType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupMerchandiseTotal")
.name("merchandiseTotal")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_fulfillmentGroupFulfillmentOption")
.name("fulfillmentOption.name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
}
@Override
public String getName() {
return RuleIdentifier.FULFILLMENTGROUP;
}
@Override
public String getDtoClassName() {
return "org.broadleafcommerce.core.order.domain.FulfillmentGroupImpl";
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_FulfillmentGroupFieldServiceImpl.java
|
777 |
public class StandardTitanGraph extends TitanBlueprintsGraph {
private static final Logger log =
LoggerFactory.getLogger(StandardTitanGraph.class);
private final GraphDatabaseConfiguration config;
private final Backend backend;
private final IDManager idManager;
private final VertexIDAssigner idAssigner;
private final TimestampProvider times;
//Serializers
protected final IndexSerializer indexSerializer;
protected final EdgeSerializer edgeSerializer;
protected final Serializer serializer;
//Caches
public final SliceQuery vertexExistenceQuery;
private final RelationQueryCache queryCache;
private final SchemaCache schemaCache;
//Log
private final ManagementLogger mgmtLogger;
//Shutdown hook
private final ShutdownThread shutdownHook;
private volatile boolean isOpen = true;
private AtomicLong txCounter;
private Set<StandardTitanTx> openTransactions;
public StandardTitanGraph(GraphDatabaseConfiguration configuration) {
this.config = configuration;
this.backend = configuration.getBackend();
this.idAssigner = config.getIDAssigner(backend);
this.idManager = idAssigner.getIDManager();
this.serializer = config.getSerializer();
StoreFeatures storeFeatures = backend.getStoreFeatures();
this.indexSerializer = new IndexSerializer(configuration.getConfiguration(), this.serializer,
this.backend.getIndexInformation(),storeFeatures.isDistributed() && storeFeatures.isKeyOrdered());
this.edgeSerializer = new EdgeSerializer(this.serializer);
this.vertexExistenceQuery = edgeSerializer.getQuery(BaseKey.VertexExists, Direction.OUT, new EdgeSerializer.TypedInterval[0]).setLimit(1);
this.queryCache = new RelationQueryCache(this.edgeSerializer);
this.schemaCache = configuration.getTypeCache(typeCacheRetrieval);
this.times = configuration.getTimestampProvider();
isOpen = true;
txCounter = new AtomicLong(0);
openTransactions = Collections.newSetFromMap(new ConcurrentHashMap<StandardTitanTx, Boolean>(100,0.75f,1));
//Register instance and ensure uniqueness
String uniqueInstanceId = configuration.getUniqueGraphId();
ModifiableConfiguration globalConfig = GraphDatabaseConfiguration.getGlobalSystemConfig(backend);
if (globalConfig.has(REGISTRATION_TIME,uniqueInstanceId)) {
throw new TitanException(String.format("A Titan graph with the same instance id [%s] is already open. Might required forced shutdown.",uniqueInstanceId));
}
globalConfig.set(REGISTRATION_TIME, times.getTime(), uniqueInstanceId);
Log mgmtLog = backend.getSystemMgmtLog();
mgmtLogger = new ManagementLogger(this,mgmtLog,schemaCache,this.times);
mgmtLog.registerReader(ReadMarker.fromNow(),mgmtLogger);
shutdownHook = new ShutdownThread(this);
Runtime.getRuntime().addShutdownHook(shutdownHook);
}
@Override
public boolean isOpen() {
return isOpen;
}
@Override
public boolean isClosed() {
return !isOpen();
}
@Override
public synchronized void shutdown() throws TitanException {
if (!isOpen) return;
try {
//Unregister instance
ModifiableConfiguration globalConfig = GraphDatabaseConfiguration.getGlobalSystemConfig(backend);
globalConfig.remove(REGISTRATION_TIME,config.getUniqueGraphId());
super.shutdown();
idAssigner.close();
backend.close();
queryCache.close();
// Remove shutdown hook to avoid reference retention
Runtime.getRuntime().removeShutdownHook(shutdownHook);
} catch (BackendException e) {
throw new TitanException("Could not close storage backend", e);
} finally {
isOpen = false;
}
}
// ################### Simple Getters #########################
@Override
public Features getFeatures() {
return TitanFeatures.getFeatures(getConfiguration(), backend.getStoreFeatures());
}
public IndexSerializer getIndexSerializer() {
return indexSerializer;
}
public Backend getBackend() {
return backend;
}
public IDInspector getIDInspector() {
return idManager.getIdInspector();
}
public IDManager getIDManager() {
return idManager;
}
public EdgeSerializer getEdgeSerializer() {
return edgeSerializer;
}
public Serializer getDataSerializer() {
return serializer;
}
//TODO: premature optimization, re-evaluate later
// public RelationQueryCache getQueryCache() {
// return queryCache;
// }
public SchemaCache getSchemaCache() {
return schemaCache;
}
public GraphDatabaseConfiguration getConfiguration() {
return config;
}
@Override
public TitanManagement getManagementSystem() {
return new ManagementSystem(this,backend.getGlobalSystemConfig(),backend.getSystemMgmtLog(), mgmtLogger, schemaCache);
}
public Set<? extends TitanTransaction> getOpenTransactions() {
return Sets.newHashSet(openTransactions);
}
// ################### TRANSACTIONS #########################
@Override
public TitanTransaction newTransaction() {
return buildTransaction().start();
}
@Override
public StandardTransactionBuilder buildTransaction() {
return new StandardTransactionBuilder(getConfiguration(), this);
}
@Override
public TitanTransaction newThreadBoundTransaction() {
return buildTransaction().threadBound().start();
}
public StandardTitanTx newTransaction(final TransactionConfiguration configuration) {
if (!isOpen) ExceptionFactory.graphShutdown();
try {
StandardTitanTx tx = new StandardTitanTx(this, configuration);
tx.setBackendTransaction(openBackendTransaction(tx));
openTransactions.add(tx);
return tx;
} catch (BackendException e) {
throw new TitanException("Could not start new transaction", e);
}
}
private BackendTransaction openBackendTransaction(StandardTitanTx tx) throws BackendException {
IndexSerializer.IndexInfoRetriever retriever = indexSerializer.getIndexInfoRetriever(tx);
return backend.beginTransaction(tx.getConfiguration(),retriever);
}
public void closeTransaction(StandardTitanTx tx) {
openTransactions.remove(tx);
}
// ################### READ #########################
private final SchemaCache.StoreRetrieval typeCacheRetrieval = new SchemaCache.StoreRetrieval() {
@Override
public Long retrieveSchemaByName(String typeName, StandardTitanTx tx) {
tx.getTxHandle().disableCache(); //Disable cache to make sure that schema is only cached once and cache eviction works!
TitanVertex v = Iterables.getOnlyElement(tx.getVertices(BaseKey.SchemaName, typeName),null);
tx.getTxHandle().enableCache();
return v!=null?v.getLongId():null;
}
@Override
public EntryList retrieveSchemaRelations(final long schemaId, final BaseRelationType type, final Direction dir, final StandardTitanTx tx) {
SliceQuery query = queryCache.getQuery(type,dir);
tx.getTxHandle().disableCache(); //Disable cache to make sure that schema is only cached once!
EntryList result = edgeQuery(schemaId, query, tx.getTxHandle());
tx.getTxHandle().enableCache();
return result;
}
};
public RecordIterator<Long> getVertexIDs(final BackendTransaction tx) {
Preconditions.checkArgument(backend.getStoreFeatures().hasOrderedScan() ||
backend.getStoreFeatures().hasUnorderedScan(),
"The configured storage backend does not support global graph operations - use Faunus instead");
final KeyIterator keyiter;
if (backend.getStoreFeatures().hasUnorderedScan()) {
keyiter = tx.edgeStoreKeys(vertexExistenceQuery);
} else {
keyiter = tx.edgeStoreKeys(new KeyRangeQuery(IDHandler.MIN_KEY, IDHandler.MAX_KEY, vertexExistenceQuery));
}
return new RecordIterator<Long>() {
@Override
public boolean hasNext() {
return keyiter.hasNext();
}
@Override
public Long next() {
return idManager.getKeyID(keyiter.next());
}
@Override
public void close() throws IOException {
keyiter.close();
}
@Override
public void remove() {
throw new UnsupportedOperationException("Removal not supported");
}
};
}
public EntryList edgeQuery(long vid, SliceQuery query, BackendTransaction tx) {
Preconditions.checkArgument(vid > 0);
return tx.edgeStoreQuery(new KeySliceQuery(idManager.getKey(vid), query));
}
public List<EntryList> edgeMultiQuery(LongArrayList vids, SliceQuery query, BackendTransaction tx) {
Preconditions.checkArgument(vids != null && !vids.isEmpty());
List<StaticBuffer> vertexIds = new ArrayList<StaticBuffer>(vids.size());
for (int i = 0; i < vids.size(); i++) {
Preconditions.checkArgument(vids.get(i) > 0);
vertexIds.add(idManager.getKey(vids.get(i)));
}
Map<StaticBuffer,EntryList> result = tx.edgeStoreMultiQuery(vertexIds, query);
List<EntryList> resultList = new ArrayList<EntryList>(result.size());
for (StaticBuffer v : vertexIds) resultList.add(result.get(v));
return resultList;
}
// ################### WRITE #########################
public void assignID(InternalRelation relation) {
idAssigner.assignID(relation);
}
public void assignID(InternalVertex vertex, VertexLabel label) {
idAssigner.assignID(vertex,label);
}
public static boolean acquireLock(InternalRelation relation, int pos, boolean acquireLocksConfig) {
InternalRelationType type = (InternalRelationType)relation.getType();
return acquireLocksConfig && type.getConsistencyModifier()== ConsistencyModifier.LOCK &&
( type.getMultiplicity().isUnique(EdgeDirection.fromPosition(pos))
|| pos==0 && type.getMultiplicity()== Multiplicity.SIMPLE);
}
public static boolean acquireLock(CompositeIndexType index, boolean acquireLocksConfig) {
return acquireLocksConfig && index.getConsistencyModifier()==ConsistencyModifier.LOCK
&& index.getCardinality()!= Cardinality.LIST;
}
/**
* The TTL of a relation (edge or property) is the minimum of:
* 1) The TTL configured of the relation type (if exists)
* 2) The TTL configured for the label any of the relation end point vertices (if exists)
*
* @param rel relation to determine the TTL for
* @return
*/
public static int getTTL(InternalRelation rel) {
assert rel.isNew();
InternalRelationType baseType = (InternalRelationType) rel.getType();
assert baseType.getBaseType()==null;
int ttl = 0;
Integer ettl = baseType.getTTL();
if (ettl>0) ttl = ettl;
for (int i=0;i<rel.getArity();i++) {
int vttl = getTTL(rel.getVertex(i));
if (vttl>0 && (vttl<ttl || ttl<=0)) ttl = vttl;
}
return ttl;
}
public static int getTTL(InternalVertex v) {
assert v.hasId();
if (IDManager.VertexIDType.UnmodifiableVertex.is(v.getLongId())) {
assert v.isNew() : "Should not be able to add relations to existing static vertices: " + v;
return ((InternalVertexLabel)v.getVertexLabel()).getTTL();
} else return 0;
}
private static class ModificationSummary {
final boolean hasModifications;
final boolean has2iModifications;
private ModificationSummary(boolean hasModifications, boolean has2iModifications) {
this.hasModifications = hasModifications;
this.has2iModifications = has2iModifications;
}
}
public ModificationSummary prepareCommit(final Collection<InternalRelation> addedRelations,
final Collection<InternalRelation> deletedRelations,
final Predicate<InternalRelation> filter,
final BackendTransaction mutator, final StandardTitanTx tx,
final boolean acquireLocks) throws BackendException {
ListMultimap<Long, InternalRelation> mutations = ArrayListMultimap.create();
ListMultimap<InternalVertex, InternalRelation> mutatedProperties = ArrayListMultimap.create();
List<IndexSerializer.IndexUpdate> indexUpdates = Lists.newArrayList();
//1) Collect deleted edges and their index updates and acquire edge locks
for (InternalRelation del : Iterables.filter(deletedRelations,filter)) {
Preconditions.checkArgument(del.isRemoved());
for (int pos = 0; pos < del.getLen(); pos++) {
InternalVertex vertex = del.getVertex(pos);
if (pos == 0 || !del.isLoop()) {
if (del.isProperty()) mutatedProperties.put(vertex,del);
mutations.put(vertex.getLongId(), del);
}
if (acquireLock(del,pos,acquireLocks)) {
Entry entry = edgeSerializer.writeRelation(del, pos, tx);
mutator.acquireEdgeLock(idManager.getKey(vertex.getLongId()), entry);
}
}
indexUpdates.addAll(indexSerializer.getIndexUpdates(del));
}
//2) Collect added edges and their index updates and acquire edge locks
for (InternalRelation add : Iterables.filter(addedRelations,filter)) {
Preconditions.checkArgument(add.isNew());
for (int pos = 0; pos < add.getLen(); pos++) {
InternalVertex vertex = add.getVertex(pos);
if (pos == 0 || !add.isLoop()) {
if (add.isProperty()) mutatedProperties.put(vertex,add);
mutations.put(vertex.getLongId(), add);
}
if (!vertex.isNew() && acquireLock(add,pos,acquireLocks)) {
Entry entry = edgeSerializer.writeRelation(add, pos, tx);
mutator.acquireEdgeLock(idManager.getKey(vertex.getLongId()), entry.getColumn());
}
}
indexUpdates.addAll(indexSerializer.getIndexUpdates(add));
}
//3) Collect all index update for vertices
for (InternalVertex v : mutatedProperties.keySet()) {
indexUpdates.addAll(indexSerializer.getIndexUpdates(v,mutatedProperties.get(v)));
}
//4) Acquire index locks (deletions first)
for (IndexSerializer.IndexUpdate update : indexUpdates) {
if (!update.isCompositeIndex() || !update.isDeletion()) continue;
CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
if (acquireLock(iIndex,acquireLocks)) {
mutator.acquireIndexLock((StaticBuffer)update.getKey(), (Entry)update.getEntry());
}
}
for (IndexSerializer.IndexUpdate update : indexUpdates) {
if (!update.isCompositeIndex() || !update.isAddition()) continue;
CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
if (acquireLock(iIndex,acquireLocks)) {
mutator.acquireIndexLock((StaticBuffer)update.getKey(), ((Entry)update.getEntry()).getColumn());
}
}
//5) Add relation mutations
for (Long vertexid : mutations.keySet()) {
Preconditions.checkArgument(vertexid > 0, "Vertex has no id: %s", vertexid);
List<InternalRelation> edges = mutations.get(vertexid);
List<Entry> additions = new ArrayList<Entry>(edges.size());
List<Entry> deletions = new ArrayList<Entry>(Math.max(10, edges.size() / 10));
for (InternalRelation edge : edges) {
InternalRelationType baseType = (InternalRelationType) edge.getType();
assert baseType.getBaseType()==null;
for (InternalRelationType type : baseType.getRelationIndexes()) {
if (type.getStatus()== SchemaStatus.DISABLED) continue;
for (int pos = 0; pos < edge.getArity(); pos++) {
if (!type.isUnidirected(Direction.BOTH) && !type.isUnidirected(EdgeDirection.fromPosition(pos)))
continue; //Directionality is not covered
if (edge.getVertex(pos).getLongId()==vertexid) {
StaticArrayEntry entry = edgeSerializer.writeRelation(edge, type, pos, tx);
if (edge.isRemoved()) {
deletions.add(entry);
} else {
Preconditions.checkArgument(edge.isNew());
int ttl = getTTL(edge);
if (ttl > 0) {
entry.setMetaData(EntryMetaData.TTL, ttl);
}
additions.add(entry);
}
}
}
}
}
StaticBuffer vertexKey = idManager.getKey(vertexid);
mutator.mutateEdges(vertexKey, additions, deletions);
}
//6) Add index updates
boolean has2iMods = false;
for (IndexSerializer.IndexUpdate indexUpdate : indexUpdates) {
assert indexUpdate.isAddition() || indexUpdate.isDeletion();
if (indexUpdate.isCompositeIndex()) {
IndexSerializer.IndexUpdate<StaticBuffer,Entry> update = indexUpdate;
if (update.isAddition())
mutator.mutateIndex(update.getKey(), Lists.newArrayList(update.getEntry()), KCVSCache.NO_DELETIONS);
else
mutator.mutateIndex(update.getKey(), KeyColumnValueStore.NO_ADDITIONS, Lists.newArrayList(update.getEntry()));
} else {
IndexSerializer.IndexUpdate<String,IndexEntry> update = indexUpdate;
has2iMods = true;
IndexTransaction itx = mutator.getIndexTransaction(update.getIndex().getBackingIndexName());
String indexStore = ((MixedIndexType)update.getIndex()).getStoreName();
if (update.isAddition())
itx.add(indexStore, update.getKey(), update.getEntry(), update.getElement().isNew());
else
itx.delete(indexStore,update.getKey(),update.getEntry().field,update.getEntry().value,update.getElement().isRemoved());
}
}
return new ModificationSummary(!mutations.isEmpty(),has2iMods);
}
private static final Predicate<InternalRelation> SCHEMA_FILTER = new Predicate<InternalRelation>() {
@Override
public boolean apply(@Nullable InternalRelation internalRelation) {
return internalRelation.getType() instanceof BaseRelationType && internalRelation.getVertex(0) instanceof TitanSchemaVertex;
}
};
private static final Predicate<InternalRelation> NO_SCHEMA_FILTER = new Predicate<InternalRelation>() {
@Override
public boolean apply(@Nullable InternalRelation internalRelation) {
return !SCHEMA_FILTER.apply(internalRelation);
}
};
private static final Predicate<InternalRelation> NO_FILTER = Predicates.alwaysTrue();
public void commit(final Collection<InternalRelation> addedRelations,
final Collection<InternalRelation> deletedRelations, final StandardTitanTx tx) {
if (addedRelations.isEmpty() && deletedRelations.isEmpty()) return;
//1. Finalize transaction
log.debug("Saving transaction. Added {}, removed {}", addedRelations.size(), deletedRelations.size());
if (!tx.getConfiguration().hasCommitTime()) tx.getConfiguration().setCommitTime(times.getTime());
final Timepoint txTimestamp = tx.getConfiguration().getCommitTime();
final long transactionId = txCounter.incrementAndGet();
//2. Assign TitanVertex IDs
if (!tx.getConfiguration().hasAssignIDsImmediately())
idAssigner.assignIDs(addedRelations);
//3. Commit
BackendTransaction mutator = tx.getTxHandle();
final boolean acquireLocks = tx.getConfiguration().hasAcquireLocks();
final boolean hasTxIsolation = backend.getStoreFeatures().hasTxIsolation();
final boolean logTransaction = config.hasLogTransactions() && !tx.getConfiguration().hasEnabledBatchLoading();
final KCVSLog txLog = logTransaction?backend.getSystemTxLog():null;
final TransactionLogHeader txLogHeader = new TransactionLogHeader(transactionId,txTimestamp);
ModificationSummary commitSummary;
try {
//3.1 Log transaction (write-ahead log) if enabled
if (logTransaction) {
//[FAILURE] Inability to log transaction fails the transaction by escalation since it's likely due to unavailability of primary
//storage backend.
txLog.add(txLogHeader.serializeModifications(serializer, LogTxStatus.PRECOMMIT, tx, addedRelations, deletedRelations),txLogHeader.getLogKey());
}
//3.2 Commit schema elements and their associated relations in a separate transaction if backend does not support
// transactional isolation
boolean hasSchemaElements = !Iterables.isEmpty(Iterables.filter(deletedRelations,SCHEMA_FILTER))
|| !Iterables.isEmpty(Iterables.filter(addedRelations,SCHEMA_FILTER));
Preconditions.checkArgument(!hasSchemaElements || (!tx.getConfiguration().hasEnabledBatchLoading() && acquireLocks),
"Attempting to create schema elements in inconsistent state");
if (hasSchemaElements && !hasTxIsolation) {
/*
* On storage without transactional isolation, create separate
* backend transaction for schema aspects to make sure that
* those are persisted prior to and independently of other
* mutations in the tx. If the storage supports transactional
* isolation, then don't create a separate tx.
*/
final BackendTransaction schemaMutator = openBackendTransaction(tx);
try {
//[FAILURE] If the preparation throws an exception abort directly - nothing persisted since batch-loading cannot be enabled for schema elements
commitSummary = prepareCommit(addedRelations,deletedRelations, SCHEMA_FILTER, schemaMutator, tx, acquireLocks);
assert commitSummary.hasModifications && !commitSummary.has2iModifications;
} catch (Throwable e) {
//Roll back schema tx and escalate exception
schemaMutator.rollback();
throw e;
}
try {
schemaMutator.commit();
} catch (Throwable e) {
//[FAILURE] Primary persistence failed => abort and escalate exception, nothing should have been persisted
log.error("Could not commit transaction ["+transactionId+"] due to storage exception in system-commit",e);
throw e;
}
}
//[FAILURE] Exceptions during preparation here cause the entire transaction to fail on transactional systems
//or just the non-system part on others. Nothing has been persisted unless batch-loading
commitSummary = prepareCommit(addedRelations,deletedRelations, hasTxIsolation? NO_FILTER : NO_SCHEMA_FILTER, mutator, tx, acquireLocks);
if (commitSummary.hasModifications) {
String logTxIdentifier = tx.getConfiguration().getLogIdentifier();
boolean hasSecondaryPersistence = logTxIdentifier!=null || commitSummary.has2iModifications;
//1. Commit storage - failures lead to immediate abort
//1a. Add success message to tx log which will be committed atomically with all transactional changes so that we can recover secondary failures
// This should not throw an exception since the mutations are just cached. If it does, it will be escalated since its critical
if (logTransaction) {
txLog.add(txLogHeader.serializePrimary(serializer,
hasSecondaryPersistence?LogTxStatus.PRIMARY_SUCCESS:LogTxStatus.COMPLETE_SUCCESS),
txLogHeader.getLogKey(),mutator.getTxLogPersistor());
}
try {
mutator.commitStorage();
} catch (Throwable e) {
//[FAILURE] If primary storage persistence fails abort directly (only schema could have been persisted)
log.error("Could not commit transaction ["+transactionId+"] due to storage exception in commit",e);
throw e;
}
if (hasSecondaryPersistence) {
LogTxStatus status = LogTxStatus.SECONDARY_SUCCESS;
Map<String,Throwable> indexFailures = ImmutableMap.of();
boolean userlogSuccess = true;
try {
//2. Commit indexes - [FAILURE] all exceptions are collected and logged but nothing is aborted
indexFailures = mutator.commitIndexes();
if (!indexFailures.isEmpty()) {
status = LogTxStatus.SECONDARY_FAILURE;
for (Map.Entry<String,Throwable> entry : indexFailures.entrySet()) {
log.error("Error while commiting index mutations for transaction ["+transactionId+"] on index: " +entry.getKey(),entry.getValue());
}
}
//3. Log transaction if configured - [FAILURE] is recorded but does not cause exception
if (logTxIdentifier!=null) {
try {
userlogSuccess = false;
final Log userLog = backend.getUserLog(logTxIdentifier);
Future<Message> env = userLog.add(txLogHeader.serializeModifications(serializer, LogTxStatus.USER_LOG, tx, addedRelations, deletedRelations));
if (env.isDone()) {
try {
env.get();
} catch (ExecutionException ex) {
throw ex.getCause();
}
}
userlogSuccess=true;
} catch (Throwable e) {
status = LogTxStatus.SECONDARY_FAILURE;
log.error("Could not user-log committed transaction ["+transactionId+"] to " + logTxIdentifier, e);
}
}
} finally {
if (logTransaction) {
//[FAILURE] An exception here will be logged and not escalated; tx considered success and
// needs to be cleaned up later
try {
txLog.add(txLogHeader.serializeSecondary(serializer,status,indexFailures,userlogSuccess),txLogHeader.getLogKey());
} catch (Throwable e) {
log.error("Could not tx-log secondary persistence status on transaction ["+transactionId+"]",e);
}
}
}
} else {
//This just closes the transaction since there are no modifications
mutator.commitIndexes();
}
} else { //Just commit everything at once
//[FAILURE] This case only happens when there are no non-system mutations in which case all changes
//are already flushed. Hence, an exception here is unlikely and should abort
mutator.commit();
}
} catch (Throwable e) {
log.error("Could not commit transaction ["+transactionId+"] due to exception",e);
try {
//Clean up any left-over transaction handles
mutator.rollback();
} catch (Throwable e2) {
log.error("Could not roll-back transaction ["+transactionId+"] after failure due to exception",e2);
}
if (e instanceof RuntimeException) throw (RuntimeException)e;
else throw new TitanException("Unexpected exception",e);
}
}
private static class ShutdownThread extends Thread {
private final StandardTitanGraph graph;
public ShutdownThread(StandardTitanGraph graph) {
this.graph = graph;
}
@Override
public void start() {
if (graph.isOpen && log.isDebugEnabled())
log.debug("Shutting down graph {} using built-in shutdown hook.", graph);
graph.shutdown();
}
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_database_StandardTitanGraph.java
|
724 |
indexDeleteAction.execute(new IndexDeleteRequest(request), new ActionListener<IndexDeleteResponse>() {
@Override
public void onResponse(IndexDeleteResponse indexDeleteResponse) {
// go over the response, see if we have found one, and the version if found
long version = Versions.MATCH_ANY;
boolean found = false;
for (ShardDeleteResponse deleteResponse : indexDeleteResponse.getResponses()) {
if (deleteResponse.isFound()) {
version = deleteResponse.getVersion();
found = true;
break;
}
}
listener.onResponse(new DeleteResponse(request.index(), request.type(), request.id(), version, found));
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_delete_TransportDeleteAction.java
|
4,189 |
public class BlobStoreIndexShardRepository extends AbstractComponent implements IndexShardRepository {
private BlobStore blobStore;
private BlobPath basePath;
private final String repositoryName;
private ByteSizeValue chunkSize;
private final IndicesService indicesService;
private RateLimiter snapshotRateLimiter;
private RateLimiter restoreRateLimiter;
private RateLimiterListener rateLimiterListener;
private RateLimitingInputStream.Listener snapshotThrottleListener;
private static final String SNAPSHOT_PREFIX = "snapshot-";
@Inject
BlobStoreIndexShardRepository(Settings settings, RepositoryName repositoryName, IndicesService indicesService) {
super(settings);
this.repositoryName = repositoryName.name();
this.indicesService = indicesService;
}
/**
* Called by {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} on repository startup
*
* @param blobStore blob store
* @param basePath base path to blob store
* @param chunkSize chunk size
*/
public void initialize(BlobStore blobStore, BlobPath basePath, ByteSizeValue chunkSize,
RateLimiter snapshotRateLimiter, RateLimiter restoreRateLimiter,
final RateLimiterListener rateLimiterListener) {
this.blobStore = blobStore;
this.basePath = basePath;
this.chunkSize = chunkSize;
this.snapshotRateLimiter = snapshotRateLimiter;
this.restoreRateLimiter = restoreRateLimiter;
this.rateLimiterListener = rateLimiterListener;
this.snapshotThrottleListener = new RateLimitingInputStream.Listener() {
@Override
public void onPause(long nanos) {
rateLimiterListener.onSnapshotPause(nanos);
}
};
}
/**
* {@inheritDoc}
*/
@Override
public void snapshot(SnapshotId snapshotId, ShardId shardId, SnapshotIndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
SnapshotContext snapshotContext = new SnapshotContext(snapshotId, shardId, snapshotStatus);
snapshotStatus.startTime(System.currentTimeMillis());
try {
snapshotContext.snapshot(snapshotIndexCommit);
snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE);
} catch (Throwable e) {
snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE);
if (e instanceof IndexShardSnapshotFailedException) {
throw (IndexShardSnapshotFailedException) e;
} else {
throw new IndexShardSnapshotFailedException(shardId, e.getMessage(), e);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void restore(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryStatus recoveryStatus) {
RestoreContext snapshotContext = new RestoreContext(snapshotId, shardId, snapshotShardId, recoveryStatus);
try {
recoveryStatus.index().startTime(System.currentTimeMillis());
snapshotContext.restore();
recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
} catch (Throwable e) {
throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId.getSnapshot() + "]", e);
}
}
/**
* Delete shard snapshot
*
* @param snapshotId snapshot id
* @param shardId shard id
*/
public void delete(SnapshotId snapshotId, ShardId shardId) {
Context context = new Context(snapshotId, shardId, shardId);
context.delete();
}
@Override
public String toString() {
return "BlobStoreIndexShardRepository[" +
"[" + repositoryName +
"], [" + blobStore + ']' +
']';
}
/**
* Returns shard snapshot metadata file name
*
* @param snapshotId snapshot id
* @return shard snapshot metadata file name
*/
private String snapshotBlobName(SnapshotId snapshotId) {
return SNAPSHOT_PREFIX + snapshotId.getSnapshot();
}
/**
* Serializes snapshot to JSON
*
* @param snapshot snapshot
* @return JSON representation of the snapshot
* @throws IOException
*/
public static byte[] writeSnapshot(BlobStoreIndexShardSnapshot snapshot) throws IOException {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
BlobStoreIndexShardSnapshot.toXContent(snapshot, builder, ToXContent.EMPTY_PARAMS);
return builder.bytes().toBytes();
}
/**
* Parses JSON representation of a snapshot
*
* @param data JSON
* @return snapshot
* @throws IOException
*/
public static BlobStoreIndexShardSnapshot readSnapshot(byte[] data) throws IOException {
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(data);
try {
parser.nextToken();
return BlobStoreIndexShardSnapshot.fromXContent(parser);
} finally {
parser.close();
}
}
/**
* Context for snapshot/restore operations
*/
private class Context {
protected final SnapshotId snapshotId;
protected final ShardId shardId;
protected final ImmutableBlobContainer blobContainer;
public Context(SnapshotId snapshotId, ShardId shardId) {
this(snapshotId, shardId, shardId);
}
public Context(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId) {
this.snapshotId = snapshotId;
this.shardId = shardId;
blobContainer = blobStore.immutableBlobContainer(basePath.add("indices").add(snapshotShardId.getIndex()).add(Integer.toString(snapshotShardId.getId())));
}
/**
* Delete shard snapshot
*/
public void delete() {
final ImmutableMap<String, BlobMetaData> blobs;
try {
blobs = blobContainer.listBlobs();
} catch (IOException e) {
throw new IndexShardSnapshotException(shardId, "Failed to list content of gateway", e);
}
BlobStoreIndexShardSnapshots snapshots = buildBlobStoreIndexShardSnapshots(blobs);
String commitPointName = snapshotBlobName(snapshotId);
try {
blobContainer.deleteBlob(commitPointName);
} catch (IOException e) {
logger.debug("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId);
}
// delete all files that are not referenced by any commit point
// build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
List<BlobStoreIndexShardSnapshot> newSnapshotsList = Lists.newArrayList();
for (BlobStoreIndexShardSnapshot point : snapshots) {
if (!point.snapshot().equals(snapshotId.getSnapshot())) {
newSnapshotsList.add(point);
}
}
cleanup(newSnapshotsList, blobs);
}
/**
* Removes all unreferenced files from the repository
*
* @param snapshots list of active snapshots in the container
* @param blobs list of blobs in the container
*/
protected void cleanup(List<BlobStoreIndexShardSnapshot> snapshots, ImmutableMap<String, BlobMetaData> blobs) {
BlobStoreIndexShardSnapshots newSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
// now go over all the blobs, and if they don't exists in a snapshot, delete them
for (String blobName : blobs.keySet()) {
if (!blobName.startsWith("__")) {
continue;
}
if (newSnapshots.findNameFile(FileInfo.canonicalName(blobName)) == null) {
try {
blobContainer.deleteBlob(blobName);
} catch (IOException e) {
logger.debug("[{}] [{}] error deleting blob [{}] during cleanup", e, snapshotId, shardId, blobName);
}
}
}
}
/**
* Generates blob name
*
* @param generation the blob number
* @return the blob name
*/
protected String fileNameFromGeneration(long generation) {
return "__" + Long.toString(generation, Character.MAX_RADIX);
}
/**
* Finds the next available blob number
*
* @param blobs list of blobs in the repository
* @return next available blob number
*/
protected long findLatestFileNameGeneration(ImmutableMap<String, BlobMetaData> blobs) {
long generation = -1;
for (String name : blobs.keySet()) {
if (!name.startsWith("__")) {
continue;
}
name = FileInfo.canonicalName(name);
try {
long currentGen = Long.parseLong(name.substring(2) /*__*/, Character.MAX_RADIX);
if (currentGen > generation) {
generation = currentGen;
}
} catch (NumberFormatException e) {
logger.warn("file [{}] does not conform to the '__' schema");
}
}
return generation;
}
/**
* Loads all available snapshots in the repository
*
* @param blobs list of blobs in repository
* @return BlobStoreIndexShardSnapshots
*/
protected BlobStoreIndexShardSnapshots buildBlobStoreIndexShardSnapshots(ImmutableMap<String, BlobMetaData> blobs) {
List<BlobStoreIndexShardSnapshot> snapshots = Lists.newArrayList();
for (String name : blobs.keySet()) {
if (name.startsWith(SNAPSHOT_PREFIX)) {
try {
snapshots.add(readSnapshot(blobContainer.readBlobFully(name)));
} catch (IOException e) {
logger.warn("failed to read commit point [{}]", e, name);
}
}
}
return new BlobStoreIndexShardSnapshots(snapshots);
}
}
/**
* Context for snapshot operations
*/
private class SnapshotContext extends Context {
private final Store store;
private final IndexShardSnapshotStatus snapshotStatus;
/**
* Constructs new context
*
* @param snapshotId snapshot id
* @param shardId shard to be snapshotted
* @param snapshotStatus snapshot status to report progress
*/
public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) {
super(snapshotId, shardId);
store = indicesService.indexServiceSafe(shardId.getIndex()).shardInjectorSafe(shardId.id()).getInstance(Store.class);
this.snapshotStatus = snapshotStatus;
}
/**
* Create snapshot from index commit point
*
* @param snapshotIndexCommit
*/
public void snapshot(SnapshotIndexCommit snapshotIndexCommit) {
logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, repositoryName);
final ImmutableMap<String, BlobMetaData> blobs;
try {
blobs = blobContainer.listBlobs();
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
}
long generation = findLatestFileNameGeneration(blobs);
BlobStoreIndexShardSnapshots snapshots = buildBlobStoreIndexShardSnapshots(blobs);
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.STARTED);
final CountDownLatch indexLatch = new CountDownLatch(snapshotIndexCommit.getFiles().length);
final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = newArrayList();
int indexNumberOfFiles = 0;
long indexTotalFilesSize = 0;
for (String fileName : snapshotIndexCommit.getFiles()) {
if (snapshotStatus.aborted()) {
logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
throw new IndexShardSnapshotFailedException(shardId, "Aborted");
}
logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
final StoreFileMetaData md;
try {
md = store.metaData(fileName);
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
}
boolean snapshotRequired = false;
// TODO: For now segment files are copied on each commit because segment files don't have checksum
// if (snapshot.indexChanged() && fileName.equals(snapshotIndexCommit.getSegmentsFileName())) {
// snapshotRequired = true; // we want to always snapshot the segment file if the index changed
// }
BlobStoreIndexShardSnapshot.FileInfo fileInfo = snapshots.findPhysicalIndexFile(fileName);
if (fileInfo == null || !fileInfo.isSame(md) || !snapshotFileExistsInBlobs(fileInfo, blobs)) {
// commit point file does not exists in any commit point, or has different length, or does not fully exists in the listed blobs
snapshotRequired = true;
}
if (snapshotRequired) {
indexNumberOfFiles++;
indexTotalFilesSize += md.length();
// create a new FileInfo
try {
BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), fileName, md.length(), chunkSize, md.checksum());
indexCommitPointFiles.add(snapshotFileInfo);
snapshotFile(snapshotFileInfo, indexLatch, failures);
} catch (IOException e) {
failures.add(e);
}
} else {
indexCommitPointFiles.add(fileInfo);
indexLatch.countDown();
}
}
snapshotStatus.files(indexNumberOfFiles, indexTotalFilesSize);
snapshotStatus.indexVersion(snapshotIndexCommit.getGeneration());
try {
indexLatch.await();
} catch (InterruptedException e) {
failures.add(e);
Thread.currentThread().interrupt();
}
if (!failures.isEmpty()) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", failures.get(0));
}
// now create and write the commit point
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE);
String commitPointName = snapshotBlobName(snapshotId);
BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getSnapshot(), snapshotIndexCommit.getGeneration(), indexCommitPointFiles);
try {
byte[] snapshotData = writeSnapshot(snapshot);
logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
blobContainer.writeBlob(commitPointName, new BytesStreamInput(snapshotData, false), snapshotData.length);
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
}
// delete all files that are not referenced by any commit point
// build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
List<BlobStoreIndexShardSnapshot> newSnapshotsList = Lists.newArrayList();
newSnapshotsList.add(snapshot);
for (BlobStoreIndexShardSnapshot point : snapshots) {
newSnapshotsList.add(point);
}
cleanup(newSnapshotsList, blobs);
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE);
}
/**
* Snapshot individual file
* <p/>
* This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are
* added to the {@code failures} list
*
* @param fileInfo file to be snapshotted
* @param latch latch that should be counted down once file is snapshoted
* @param failures thread-safe list of failures
* @throws IOException
*/
private void snapshotFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) throws IOException {
final AtomicLong counter = new AtomicLong(fileInfo.numberOfParts());
for (long i = 0; i < fileInfo.numberOfParts(); i++) {
IndexInput indexInput = null;
try {
indexInput = store.openInputRaw(fileInfo.physicalName(), IOContext.READONCE);
indexInput.seek(i * fileInfo.partBytes());
InputStreamIndexInput inputStreamIndexInput = new ThreadSafeInputStreamIndexInput(indexInput, fileInfo.partBytes());
final IndexInput fIndexInput = indexInput;
long size = inputStreamIndexInput.actualSizeToRead();
InputStream inputStream;
if (snapshotRateLimiter != null) {
inputStream = new RateLimitingInputStream(inputStreamIndexInput, snapshotRateLimiter, snapshotThrottleListener);
} else {
inputStream = inputStreamIndexInput;
}
blobContainer.writeBlob(fileInfo.partName(i), inputStream, size, new ImmutableBlobContainer.WriterListener() {
@Override
public void onCompleted() {
IOUtils.closeWhileHandlingException(fIndexInput);
if (counter.decrementAndGet() == 0) {
latch.countDown();
}
}
@Override
public void onFailure(Throwable t) {
IOUtils.closeWhileHandlingException(fIndexInput);
failures.add(t);
if (counter.decrementAndGet() == 0) {
latch.countDown();
}
}
});
} catch (Throwable e) {
IOUtils.closeWhileHandlingException(indexInput);
failures.add(e);
latch.countDown();
}
}
}
/**
* Checks if snapshot file already exists in the list of blobs
*
* @param fileInfo file to check
* @param blobs list of blobs
* @return true if file exists in the list of blobs
*/
private boolean snapshotFileExistsInBlobs(BlobStoreIndexShardSnapshot.FileInfo fileInfo, ImmutableMap<String, BlobMetaData> blobs) {
BlobMetaData blobMetaData = blobs.get(fileInfo.name());
if (blobMetaData != null) {
return blobMetaData.length() == fileInfo.length();
} else if (blobs.containsKey(fileInfo.partName(0))) {
// multi part file sum up the size and check
int part = 0;
long totalSize = 0;
while (true) {
blobMetaData = blobs.get(fileInfo.partName(part++));
if (blobMetaData == null) {
break;
}
totalSize += blobMetaData.length();
}
return totalSize == fileInfo.length();
}
// no file, not exact and not multipart
return false;
}
}
/**
* Context for restore operations
*/
private class RestoreContext extends Context {
private final Store store;
private final RecoveryStatus recoveryStatus;
/**
* Constructs new restore context
*
* @param snapshotId snapshot id
* @param shardId shard to be restored
* @param snapshotShardId shard in the snapshot that data should be restored from
* @param recoveryStatus recovery status to report progress
*/
public RestoreContext(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryStatus recoveryStatus) {
super(snapshotId, shardId, snapshotShardId);
store = indicesService.indexServiceSafe(shardId.getIndex()).shardInjectorSafe(shardId.id()).getInstance(Store.class);
this.recoveryStatus = recoveryStatus;
}
/**
* Performs restore operation
*/
public void restore() {
logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId);
BlobStoreIndexShardSnapshot snapshot;
try {
snapshot = readSnapshot(blobContainer.readBlobFully(snapshotBlobName(snapshotId)));
} catch (IOException ex) {
throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex);
}
recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
int numberOfFiles = 0;
long totalSize = 0;
int numberOfReusedFiles = 0;
long reusedTotalSize = 0;
List<FileInfo> filesToRecover = Lists.newArrayList();
for (FileInfo fileInfo : snapshot.indexFiles()) {
String fileName = fileInfo.physicalName();
StoreFileMetaData md = null;
try {
md = store.metaData(fileName);
} catch (IOException e) {
// no file
}
numberOfFiles++;
// we don't compute checksum for segments, so always recover them
if (!fileName.startsWith("segments") && md != null && fileInfo.isSame(md)) {
totalSize += md.length();
numberOfReusedFiles++;
reusedTotalSize += md.length();
if (logger.isTraceEnabled()) {
logger.trace("not_recovering [{}], exists in local store and is same", fileInfo.physicalName());
}
} else {
totalSize += fileInfo.length();
filesToRecover.add(fileInfo);
if (logger.isTraceEnabled()) {
if (md == null) {
logger.trace("recovering [{}], does not exists in local store", fileInfo.physicalName());
} else {
logger.trace("recovering [{}], exists in local store but is different", fileInfo.physicalName());
}
}
}
}
recoveryStatus.index().files(numberOfFiles, totalSize, numberOfReusedFiles, reusedTotalSize);
if (filesToRecover.isEmpty()) {
logger.trace("no files to recover, all exists within the local store");
}
if (logger.isTraceEnabled()) {
logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId, numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize));
}
final CountDownLatch latch = new CountDownLatch(filesToRecover.size());
final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
for (final FileInfo fileToRecover : filesToRecover) {
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
restoreFile(fileToRecover, latch, failures);
}
try {
latch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (!failures.isEmpty()) {
throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", failures.get(0));
}
// read the snapshot data persisted
long version = -1;
try {
if (Lucene.indexExists(store.directory())) {
version = Lucene.readSegmentInfos(store.directory()).getVersion();
}
} catch (IOException e) {
throw new IndexShardRestoreFailedException(shardId, "Failed to fetch index version after copying it over", e);
}
recoveryStatus.index().updateVersion(version);
/// now, go over and clean files that are in the store, but were not in the snapshot
try {
for (String storeFile : store.directory().listAll()) {
if (!snapshot.containPhysicalIndexFile(storeFile)) {
try {
store.directory().deleteFile(storeFile);
} catch (IOException e) {
// ignore
}
}
}
} catch (IOException e) {
// ignore
}
}
/**
* Restores a file
* This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are
* added to the {@code failures} list
*
* @param fileInfo file to be restored
* @param latch latch that should be counted down once file is snapshoted
* @param failures thread-safe list of failures
*/
private void restoreFile(final FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) {
final IndexOutput indexOutput;
try {
// we create an output with no checksum, this is because the pure binary data of the file is not
// the checksum (because of seek). We will create the checksum file once copying is done
indexOutput = store.createOutputRaw(fileInfo.physicalName());
} catch (IOException e) {
failures.add(e);
latch.countDown();
return;
}
String firstFileToRecover = fileInfo.partName(0);
final AtomicInteger partIndex = new AtomicInteger();
blobContainer.readBlob(firstFileToRecover, new BlobContainer.ReadBlobListener() {
@Override
public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
recoveryStatus.index().addCurrentFilesSize(size);
indexOutput.writeBytes(data, offset, size);
if (restoreRateLimiter != null) {
rateLimiterListener.onRestorePause(restoreRateLimiter.pause(size));
}
}
@Override
public synchronized void onCompleted() {
int part = partIndex.incrementAndGet();
if (part < fileInfo.numberOfParts()) {
String partName = fileInfo.partName(part);
// continue with the new part
blobContainer.readBlob(partName, this);
return;
} else {
// we are done...
try {
indexOutput.close();
// write the checksum
if (fileInfo.checksum() != null) {
store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
}
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
} catch (IOException e) {
onFailure(e);
return;
}
}
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
failures.add(t);
latch.countDown();
}
});
}
}
public interface RateLimiterListener {
void onRestorePause(long nanos);
void onSnapshotPause(long nanos);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_snapshots_blobstore_BlobStoreIndexShardRepository.java
|
326 |
public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
public NodesInfoRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new NodesInfoRequest());
}
/**
* Clears all info flags.
*/
public NodesInfoRequestBuilder clear() {
request.clear();
return this;
}
/**
* Sets to reutrn all the data.
*/
public NodesInfoRequestBuilder all() {
request.all();
return this;
}
/**
* Should the node settings be returned.
*/
public NodesInfoRequestBuilder setSettings(boolean settings) {
request.settings(settings);
return this;
}
/**
* Should the node OS info be returned.
*/
public NodesInfoRequestBuilder setOs(boolean os) {
request.os(os);
return this;
}
/**
* Should the node OS process be returned.
*/
public NodesInfoRequestBuilder setProcess(boolean process) {
request.process(process);
return this;
}
/**
* Should the node JVM info be returned.
*/
public NodesInfoRequestBuilder setJvm(boolean jvm) {
request.jvm(jvm);
return this;
}
/**
* Should the node thread pool info be returned.
*/
public NodesInfoRequestBuilder setThreadPool(boolean threadPool) {
request.threadPool(threadPool);
return this;
}
/**
* Should the node Network info be returned.
*/
public NodesInfoRequestBuilder setNetwork(boolean network) {
request.network(network);
return this;
}
/**
* Should the node Transport info be returned.
*/
public NodesInfoRequestBuilder setTransport(boolean transport) {
request.transport(transport);
return this;
}
/**
* Should the node HTTP info be returned.
*/
public NodesInfoRequestBuilder setHttp(boolean http) {
request.http(http);
return this;
}
public NodesInfoRequestBuilder setPlugin(boolean plugin) {
request().plugin(plugin);
return this;
}
@Override
protected void doExecute(ActionListener<NodesInfoResponse> listener) {
((ClusterAdminClient) client).nodesInfo(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_NodesInfoRequestBuilder.java
|
33 |
static final class ThenCopy<T> extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<T> dst;
ThenCopy(CompletableFuture<?> src,
CompletableFuture<T> dst) {
this.src = src; this.dst = dst;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<T> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
dst.internalComplete(t, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
107 |
class CreateInNewUnitProposal implements ICompletionProposal,
ICompletionProposalExtension6 {
private final IFile file;
private final DefinitionGenerator dg;
CreateInNewUnitProposal(IFile file, DefinitionGenerator dg) {
this.file = file;
this.dg = dg;
}
@Override
public Point getSelection(IDocument doc) {
return null;
}
@Override
public Image getImage() {
return dg.getImage();
}
@Override
public String getDisplayString() {
return "Create toplevel " + dg.getDescription() +
" in a new source file";
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument doc) {
SelectNewUnitWizard w = new SelectNewUnitWizard("Create in New Source File",
"Create a new Ceylon source file with the missing declaration.",
dg.getBrokenName());
if (w.open(file)) {
CreateUnitChange change = new CreateUnitChange(w.getFile(),
w.includePreamble(), getText(doc), w.getProject(),
"Create in New Source File");
try {
performChange(getCurrentEditor(), doc, change,
"Move to New Source File");
gotoLocation(w.getFile().getFullPath(), 0);
}
catch (CoreException e) {
e.printStackTrace();
}
}
}
private String getText(IDocument doc) {
String delim = getDefaultLineDelimiter(doc);
String definition = dg.generate("", delim);
List<Declaration> imports = new ArrayList<Declaration>();
resolveImports(imports, dg.getReturnType());
if (dg.getParameters()!=null) {
resolveImports(imports, dg.getParameters().values());
}
String imps = imports(imports, doc);
if (!imps.isEmpty()) {
definition = imps + delim + delim + definition;
}
return definition;
}
static void addCreateInNewUnitProposal(Collection<ICompletionProposal> proposals,
DefinitionGenerator dg, IFile file) {
proposals.add(new CreateInNewUnitProposal(file, dg));
}
private static void resolveImports(List<Declaration> imports,
Collection<ProducedType> producedTypes) {
if (producedTypes!=null) {
for (ProducedType pt : producedTypes) {
resolveImports(imports, pt);
}
}
}
private static void resolveImports(List<Declaration> imports, ProducedType pt) {
if (pt != null) {
if (pt.getDeclaration() instanceof UnionType) {
resolveImports(imports, pt.getCaseTypes());
}
else if (pt.getDeclaration() instanceof IntersectionType) {
resolveImports(imports, pt.getSatisfiedTypes());
}
else if (pt.getDeclaration() instanceof TypeParameter) {
TypeParameter typeParam = (TypeParameter) pt.getDeclaration();
if (typeParam.isConstrained()) {
resolveImports(imports, typeParam.getCaseTypes());
resolveImports(imports, typeParam.getSatisfiedTypes());
}
if (typeParam.isDefaulted()) {
resolveImports(imports, typeParam.getDefaultTypeArgument());
}
} else {
resolveImports(imports, pt.getTypeArgumentList());
Package p = pt.getDeclaration().getUnit().getPackage();
if (!p.getQualifiedNameString().isEmpty() &&
!p.getQualifiedNameString().equals(Module.LANGUAGE_MODULE_NAME)) {
if (!imports.contains(pt.getDeclaration())) {
imports.add(pt.getDeclaration());
}
}
}
}
}
@Override
public StyledString getStyledDisplayString() {
return Highlights.styleProposal(getDisplayString(), false);
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_CreateInNewUnitProposal.java
|
2,934 |
public static final class Factory implements TokenFilterFactory {
private final int maxShingleSize;
private final boolean outputUnigrams;
private final boolean outputUnigramsIfNoShingles;
private final String tokenSeparator;
private int minShingleSize;
private final String name;
public Factory(String name) {
this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, false, ShingleFilter.TOKEN_SEPARATOR);
}
Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, String tokenSeparator) {
this.maxShingleSize = maxShingleSize;
this.outputUnigrams = outputUnigrams;
this.outputUnigramsIfNoShingles = outputUnigramsIfNoShingles;
this.tokenSeparator = tokenSeparator;
this.minShingleSize = minShingleSize;
this.name = name;
}
public TokenStream create(TokenStream tokenStream) {
ShingleFilter filter = new ShingleFilter(tokenStream, minShingleSize, maxShingleSize);
filter.setOutputUnigrams(outputUnigrams);
filter.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles);
filter.setTokenSeparator(tokenSeparator);
return filter;
}
public int getMaxShingleSize() {
return maxShingleSize;
}
public int getMinShingleSize() {
return minShingleSize;
}
public boolean getOutputUnigrams() {
return outputUnigrams;
}
public boolean getOutputUnigramsIfNoShingles() {
return outputUnigramsIfNoShingles;
}
@Override
public String name() {
return name;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_analysis_ShingleTokenFilterFactory.java
|
1,377 |
private static class CassandraMapIterable implements Iterable<Entry> {
private final SortedMap<ByteBuffer, Column> columnValues;
public CassandraMapIterable(final SortedMap<ByteBuffer, Column> columnValues) {
Preconditions.checkNotNull(columnValues);
this.columnValues = columnValues;
}
@Override
public Iterator<Entry> iterator() {
return new CassandraMapIterator(columnValues.entrySet().iterator());
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_cassandra_TitanCassandraHadoopGraph.java
|
42 |
public interface BiAction<A,B> { void apply(A a, B b); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
271 |
public class MapPutRunnable implements Runnable, DataSerializable, HazelcastInstanceAware {
private HazelcastInstance instance;
public String mapName;
public MapPutRunnable(){}
public MapPutRunnable(String mapName) {
this.mapName = mapName;
}
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(mapName);
}
public void readData(ObjectDataInput in) throws IOException {
mapName = in.readUTF();
}
public void run() {
Member member = instance.getCluster().getLocalMember();
IMap map = instance.getMap(mapName);
map.put(member.getUuid(), member.getUuid()+"value");
}
@Override
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
instance = hazelcastInstance;
}
public String getMapName() {
return mapName;
}
public void setMapName(String mapName) {
this.mapName = mapName;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_tasks_MapPutRunnable.java
|
4,490 |
class FileChunkTransportRequestHandler extends BaseTransportRequestHandler<RecoveryFileChunkRequest> {
@Override
public RecoveryFileChunkRequest newInstance() {
return new RecoveryFileChunkRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
Store store = onGoingRecovery.indexShard.store();
IndexOutput indexOutput;
if (request.position() == 0) {
// first request
onGoingRecovery.checksums.remove(request.name());
indexOutput = onGoingRecovery.removeOpenIndexOutputs(request.name());
IOUtils.closeWhileHandlingException(indexOutput);
// we create an output with no checksum, this is because the pure binary data of the file is not
// the checksum (because of seek). We will create the checksum file once copying is done
// also, we check if the file already exists, if it does, we create a file name based
// on the current recovery "id" and later we make the switch, the reason for that is that
// we only want to overwrite the index files once we copied all over, and not create a
// case where the index is half moved
String fileName = request.name();
if (store.directory().fileExists(fileName)) {
fileName = "recovery." + onGoingRecovery.startTime + "." + fileName;
}
indexOutput = onGoingRecovery.openAndPutIndexOutput(request.name(), fileName, store);
} else {
indexOutput = onGoingRecovery.getOpenIndexOutput(request.name());
}
if (indexOutput == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
boolean success = false;
synchronized (indexOutput) {
try {
if (recoverySettings.rateLimiter() != null) {
recoverySettings.rateLimiter().pause(request.content().length());
}
BytesReference content = request.content();
if (!content.hasArray()) {
content = content.toBytesArray();
}
indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
onGoingRecovery.currentFilesSize.addAndGet(request.length());
if (indexOutput.getFilePointer() == request.length()) {
// we are done
indexOutput.close();
// write the checksum
if (request.checksum() != null) {
onGoingRecovery.checksums.put(request.name(), request.checksum());
}
store.directory().sync(Collections.singleton(request.name()));
IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
assert remove == indexOutput;
}
success = true;
} finally {
if (!success || onGoingRecovery.isCanceled()) {
IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
assert remove == indexOutput;
IOUtils.closeWhileHandlingException(indexOutput);
}
}
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_recovery_RecoveryTarget.java
|
306 |
public class ClusterHealthRequest extends MasterNodeReadOperationRequest<ClusterHealthRequest> {
private String[] indices;
private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
private ClusterHealthStatus waitForStatus;
private int waitForRelocatingShards = -1;
private int waitForActiveShards = -1;
private String waitForNodes = "";
private Priority waitForEvents = null;
ClusterHealthRequest() {
}
public ClusterHealthRequest(String... indices) {
this.indices = indices;
}
public String[] indices() {
return indices;
}
public ClusterHealthRequest indices(String[] indices) {
this.indices = indices;
return this;
}
public TimeValue timeout() {
return timeout;
}
public ClusterHealthRequest timeout(TimeValue timeout) {
this.timeout = timeout;
if (masterNodeTimeout == DEFAULT_MASTER_NODE_TIMEOUT) {
masterNodeTimeout = timeout;
}
return this;
}
public ClusterHealthRequest timeout(String timeout) {
return this.timeout(TimeValue.parseTimeValue(timeout, null));
}
public ClusterHealthStatus waitForStatus() {
return waitForStatus;
}
public ClusterHealthRequest waitForStatus(ClusterHealthStatus waitForStatus) {
this.waitForStatus = waitForStatus;
return this;
}
public ClusterHealthRequest waitForGreenStatus() {
return waitForStatus(ClusterHealthStatus.GREEN);
}
public ClusterHealthRequest waitForYellowStatus() {
return waitForStatus(ClusterHealthStatus.YELLOW);
}
public int waitForRelocatingShards() {
return waitForRelocatingShards;
}
public ClusterHealthRequest waitForRelocatingShards(int waitForRelocatingShards) {
this.waitForRelocatingShards = waitForRelocatingShards;
return this;
}
public int waitForActiveShards() {
return waitForActiveShards;
}
public ClusterHealthRequest waitForActiveShards(int waitForActiveShards) {
this.waitForActiveShards = waitForActiveShards;
return this;
}
public String waitForNodes() {
return waitForNodes;
}
/**
* Waits for N number of nodes. Use "12" for exact mapping, ">12" and "<12" for range.
*/
public ClusterHealthRequest waitForNodes(String waitForNodes) {
this.waitForNodes = waitForNodes;
return this;
}
public ClusterHealthRequest waitForEvents(Priority waitForEvents) {
this.waitForEvents = waitForEvents;
return this;
}
public Priority waitForEvents() {
return this.waitForEvents;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
if (size == 0) {
indices = Strings.EMPTY_ARRAY;
} else {
indices = new String[size];
for (int i = 0; i < indices.length; i++) {
indices[i] = in.readString();
}
}
timeout = readTimeValue(in);
if (in.readBoolean()) {
waitForStatus = ClusterHealthStatus.fromValue(in.readByte());
}
waitForRelocatingShards = in.readInt();
waitForActiveShards = in.readInt();
waitForNodes = in.readString();
readLocal(in);
if (in.readBoolean()) {
waitForEvents = Priority.fromByte(in.readByte());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (indices == null) {
out.writeVInt(0);
} else {
out.writeVInt(indices.length);
for (String index : indices) {
out.writeString(index);
}
}
timeout.writeTo(out);
if (waitForStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(waitForStatus.value());
}
out.writeInt(waitForRelocatingShards);
out.writeInt(waitForActiveShards);
out.writeString(waitForNodes);
writeLocal(out);
if (waitForEvents == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(waitForEvents.value());
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterHealthRequest.java
|
618 |
public class IndicesStatsRequest extends BroadcastOperationRequest<IndicesStatsRequest> {
private CommonStatsFlags flags = new CommonStatsFlags();
/**
* Sets all flags to return all stats.
*/
public IndicesStatsRequest all() {
flags.all();
return this;
}
/**
* Clears all stats.
*/
public IndicesStatsRequest clear() {
flags.clear();
return this;
}
/**
* Document types to return stats for. Mainly affects {@link #indexing(boolean)} when
* enabled, returning specific indexing stats for those types.
*/
public IndicesStatsRequest types(String... types) {
flags.types(types);
return this;
}
/**
* Document types to return stats for. Mainly affects {@link #indexing(boolean)} when
* enabled, returning specific indexing stats for those types.
*/
public String[] types() {
return this.flags.types();
}
/**
* Sets specific search group stats to retrieve the stats for. Mainly affects search
* when enabled.
*/
public IndicesStatsRequest groups(String... groups) {
flags.groups(groups);
return this;
}
public String[] groups() {
return this.flags.groups();
}
public IndicesStatsRequest docs(boolean docs) {
flags.set(Flag.Docs, docs);
return this;
}
public boolean docs() {
return flags.isSet(Flag.Docs);
}
public IndicesStatsRequest store(boolean store) {
flags.set(Flag.Store, store);
return this;
}
public boolean store() {
return flags.isSet(Flag.Store);
}
public IndicesStatsRequest indexing(boolean indexing) {
flags.set(Flag.Indexing, indexing);
return this;
}
public boolean indexing() {
return flags.isSet(Flag.Indexing);
}
public IndicesStatsRequest get(boolean get) {
flags.set(Flag.Get, get);
return this;
}
public boolean get() {
return flags.isSet(Flag.Get);
}
public IndicesStatsRequest search(boolean search) {
flags.set(Flag.Search, search);
return this;
}
public boolean search() {
return flags.isSet(Flag.Search);
}
public IndicesStatsRequest merge(boolean merge) {
flags.set(Flag.Merge, merge);
return this;
}
public boolean merge() {
return flags.isSet(Flag.Merge);
}
public IndicesStatsRequest refresh(boolean refresh) {
flags.set(Flag.Refresh, refresh);
return this;
}
public boolean refresh() {
return flags.isSet(Flag.Refresh);
}
public IndicesStatsRequest flush(boolean flush) {
flags.set(Flag.Flush, flush);
return this;
}
public boolean flush() {
return flags.isSet(Flag.Flush);
}
public IndicesStatsRequest warmer(boolean warmer) {
flags.set(Flag.Warmer, warmer);
return this;
}
public boolean warmer() {
return flags.isSet(Flag.Warmer);
}
public IndicesStatsRequest filterCache(boolean filterCache) {
flags.set(Flag.FilterCache, filterCache);
return this;
}
public boolean filterCache() {
return flags.isSet(Flag.FilterCache);
}
public IndicesStatsRequest idCache(boolean idCache) {
flags.set(Flag.IdCache, idCache);
return this;
}
public boolean idCache() {
return flags.isSet(Flag.IdCache);
}
public IndicesStatsRequest fieldData(boolean fieldData) {
flags.set(Flag.FieldData, fieldData);
return this;
}
public boolean fieldData() {
return flags.isSet(Flag.FieldData);
}
public IndicesStatsRequest percolate(boolean percolate) {
flags.set(Flag.Percolate, percolate);
return this;
}
public boolean percolate() {
return flags.isSet(Flag.Percolate);
}
public IndicesStatsRequest segments(boolean segments) {
flags.set(Flag.Segments, segments);
return this;
}
public boolean segments() {
return flags.isSet(Flag.Segments);
}
public IndicesStatsRequest fieldDataFields(String... fieldDataFields) {
flags.fieldDataFields(fieldDataFields);
return this;
}
public String[] fieldDataFields() {
return flags.fieldDataFields();
}
public IndicesStatsRequest completion(boolean completion) {
flags.set(Flag.Completion, completion);
return this;
}
public boolean completion() {
return flags.isSet(Flag.Completion);
}
public IndicesStatsRequest completionFields(String... completionDataFields) {
flags.completionDataFields(completionDataFields);
return this;
}
public String[] completionFields() {
return flags.completionDataFields();
}
public IndicesStatsRequest translog(boolean translog) {
flags.set(Flag.Translog, translog);
return this;
}
public boolean translog() {
return flags.isSet(Flag.Translog);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
flags.writeTo(out);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
flags = CommonStatsFlags.readCommonStatsFlags(in);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_stats_IndicesStatsRequest.java
|
5 |
public class AbbreviationsManager {
/** A regular expression used to separate alternative abbreviations. (\s == any whitespace) */
private static final Pattern ABBREVIATION_SEPARATOR = Pattern.compile("\\s*\\|\\s*");
/** A regular expression used to separate words. */
private static final Pattern WORD_SEPARATOR = Pattern.compile("\\s+");
private Map<String, List<String>> abbreviations = new HashMap<String, List<String>>();
/**
* Creates a new abbreviations manager configured with a set of abbreviation
* properties. Abbreviation properties are of the form:
* <pre>
* phrase = alt1 | alt2 | ...
* </pre>
* Whitespace around the "=" and "|" separators is removed. The phrase is
* converted to lower case, but the alternatives are used verbatim.
*
* @param abbreviationProperties the abbreviation properties
*/
public AbbreviationsManager(Properties abbreviationProperties) {
@SuppressWarnings("unchecked")
Enumeration<String> e = (Enumeration<String>) abbreviationProperties.propertyNames();
while (e.hasMoreElements()) {
String phrase = e.nextElement();
String lcPhrase = phrase.toLowerCase();
String[] alternatives = ABBREVIATION_SEPARATOR.split(abbreviationProperties.getProperty(phrase).trim());
List<String> abbreviationsForPhrase = new ArrayList<String>(Arrays.asList(alternatives));
Collections.sort(abbreviationsForPhrase, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return o1.length() - o2.length();
}
});
abbreviations.put(lcPhrase, abbreviationsForPhrase);
}
}
/**
* Gets the alternative abbreviations for a phrase. The original phrase is always the
* the first alternative returned. If no abbreviations are found for the phrase, returns
* a list with one element, the original phrase. The phrase is converted to lower case
* before looking up its alternatives.
*
* @param phrase the phrase to abbreviate
* @return a list of alternative abbreviations, with the original phrase as the first element
*/
public List<String> getAlternatives(String phrase) {
List<String> result = new ArrayList<String>();
result.add(phrase);
List<String> alternatives = abbreviations.get(phrase.toLowerCase());
if (alternatives != null) {
result.addAll(alternatives);
}
return result;
}
/**
* Finds the phrases within a string that can be abbreviated, and returns
* a structure with those phrases and the alternatives for each phrase.
* A phrase is a sequence of one or more words in the original string, where
* words are delimited by whitespace. At each point in the original string,
* the longest phrase for which there are abbreviations is found.
*
* @param s the string to find abbreviations for
* @return a structure describing the available abbreviations
*/
public Abbreviations getAbbreviations(String s) {
AbbreviationsImpl abbrev = new AbbreviationsImpl(s);
List<String> phrases = getPhrasesWithAbbreviations(s);
for (String phrase : phrases) {
abbrev.addPhrase(phrase, getAlternatives(phrase));
}
return abbrev;
}
/**
* Constructs a partition of a string into phrases, along word boundaries,
* where each phrase has one or more alternative abbreviations, and each
* phrase is the longest match against the abbreviations at that position
* in the original string.
*
* @param s the original string to partition into phrases
* @return a list of phrases
*/
private List<String> getPhrasesWithAbbreviations(String s) {
int phraseStart = 0;
List<String> phrasesWithAbbreviations = new ArrayList<String>();
Matcher wordBoundary = WORD_SEPARATOR.matcher(s);
while (phraseStart < s.length()) {
int phraseLength = getLongestPhraseLength(s.substring(phraseStart));
phrasesWithAbbreviations.add(s.substring(phraseStart, phraseStart + phraseLength));
if (wordBoundary.find(phraseStart + phraseLength)) {
phraseStart = wordBoundary.end();
} else {
phraseStart = s.length();
}
}
return phrasesWithAbbreviations;
}
/**
* Finds the longest phrase within a string that has abbreviations. The first word
* is always a possibility, even if no alternatives exist to that word.
*
* @param s the string for which to find the longest phrase with alternatives
* @return the length of the longest phrase with alternative abbreviations
*/
private int getLongestPhraseLength(String s) {
// If the entire string matches, then it is obviously the longest matching phrase.
if (abbreviations.containsKey(s.toLowerCase())) {
return s.length();
}
Matcher wordBoundary = WORD_SEPARATOR.matcher(s);
if (!wordBoundary.find()) {
// No word boundaries found. Entire string is only possible phrase.
return s.length();
}
// First word is always an abbreviation candidate, perhaps with no
// alternatives but itself.
int longestMatchLength = wordBoundary.start();
while (wordBoundary.find()) {
if (abbreviations.containsKey(s.substring(0, wordBoundary.start()).toLowerCase())) {
longestMatchLength = wordBoundary.start();
}
}
return longestMatchLength;
}
}
| 0true
|
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationsManager.java
|
1,729 |
operation.setResponseHandler(new ResponseHandler() {
@Override
public void sendResponse(Object obj) {
if (checkIfMapLoaded.decrementAndGet() == 0) {
loaded.set(true);
}
}
public boolean isLocal() {
return true;
}
});
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_DefaultRecordStore.java
|
58 |
public class TestXaFramework extends AbstractNeo4jTestCase
{
private TransactionManager tm;
private XaDataSourceManager xaDsMgr;
private final TransactionStateFactory stateFactory = new TransactionStateFactory( new DevNullLoggingService() )
{
@Override
public TransactionState create( Transaction tx )
{
return new NoTransactionState()
{
@Override
@SuppressWarnings("deprecation")
public TxIdGenerator getTxIdGenerator()
{
return TxIdGenerator.DEFAULT;
}
};
}
};
private File path()
{
String path = getStorePath( "xafrmwrk" );
File file = new File( path );
try
{
FileUtils.deleteRecursively( file );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
assertTrue( "create directory: " + file, file.mkdirs() );
return file;
}
private File file( String name )
{
return new File( path(), name);
}
private File resourceFile()
{
return file( "dummy_resource" );
}
@Before
public void setUpFramework()
{
getTransaction().finish();
tm = getGraphDbAPI().getDependencyResolver().resolveDependency( TransactionManager.class );
xaDsMgr = getGraphDbAPI().getDependencyResolver().resolveDependency( XaDataSourceManager.class );
}
@Test
public void testCreateXaResource() throws Exception
{
Map<String, String> config = new HashMap<String, String>();
config.put( "store_dir", "target/var" );
FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
KernelHealth kernelHealth = mock( KernelHealth.class );
xaDsMgr.registerDataSource( new DummyXaDataSource(
UTF8.encode( "DDDDDD" ), "dummy_datasource",
new XaFactory(
new Config( config, GraphDatabaseSettings.class ), TxIdGenerator.DEFAULT,
new PlaceboTm( null, getGraphDbAPI().getDependencyResolver()
.resolveDependency( TxIdGenerator.class ) ),
fileSystem, new Monitors(), new DevNullLoggingService(),
RecoveryVerifier.ALWAYS_VALID, LogPruneStrategies.NO_PRUNING, kernelHealth ), stateFactory,
resourceFile() ) );
XaDataSource xaDs = xaDsMgr.getXaDataSource( "dummy_datasource" );
DummyXaConnection xaC = null;
try
{
xaC = (DummyXaConnection) xaDs.getXaConnection();
try
{
xaC.doStuff1();
fail( "Non enlisted resource should throw exception" );
}
catch ( XAException e )
{ // good
}
Xid xid = new XidImpl( new byte[0], new byte[0] );
xaC.getXaResource().start( xid, XAResource.TMNOFLAGS );
try
{
xaC.doStuff1();
xaC.doStuff2();
}
catch ( XAException e )
{
fail( "Enlisted resource should not throw exception" );
}
xaC.getXaResource().end( xid, XAResource.TMSUCCESS );
xaC.getXaResource().prepare( xid );
xaC.getXaResource().commit( xid, false );
}
finally
{
xaDsMgr.unregisterDataSource( "dummy_datasource" );
if ( xaC != null )
{
xaC.destroy();
}
}
// cleanup dummy resource log
deleteAllResourceFiles();
}
@Test
public void testTxIdGeneration() throws Exception
{
DummyXaConnection xaC1 = null;
try
{
Map<String, String> config = new HashMap<String, String>();
config.put( "store_dir", "target/var" );
FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
KernelHealth kernelHealth = mock( KernelHealth.class );
xaDsMgr.registerDataSource( new DummyXaDataSource( UTF8.encode( "DDDDDD" ), "dummy_datasource1",
new XaFactory( new Config( config, GraphDatabaseSettings.class ), TxIdGenerator.DEFAULT,
(AbstractTransactionManager)tm, fileSystem, new Monitors(), new DevNullLoggingService(),
RecoveryVerifier.ALWAYS_VALID, LogPruneStrategies.NO_PRUNING, kernelHealth ),
stateFactory, resourceFile() ) );
DummyXaDataSource xaDs1 = (DummyXaDataSource) xaDsMgr.getXaDataSource( "dummy_datasource1" );
xaC1 = (DummyXaConnection) xaDs1.getXaConnection();
tm.begin(); // get
xaC1.enlistWithTx( tm );
int currentTxId = xaC1.getTransactionId();
xaC1.doStuff1();
xaC1.delistFromTx( tm );
tm.commit();
// xaC2 = ( DummyXaConnection ) xaDs2.getXaConnection();
tm.begin();
Node node = getGraphDb().createNode(); // get resource in tx
xaC1.enlistWithTx( tm );
assertEquals( ++currentTxId, xaC1.getTransactionId() );
xaC1.doStuff1();
xaC1.delistFromTx( tm );
tm.commit();
tm.begin();
node = getGraphDb().getNodeById( node.getId() );
xaC1.enlistWithTx( tm );
assertEquals( ++currentTxId, xaC1.getTransactionId() );
xaC1.doStuff2();
xaC1.delistFromTx( tm );
node.delete();
tm.commit();
}
finally
{
xaDsMgr.unregisterDataSource( "dummy_datasource1" );
// xaDsMgr.unregisterDataSource( "dummy_datasource1" );
if ( xaC1 != null )
{
xaC1.destroy();
}
}
// cleanup dummy resource log
deleteAllResourceFiles();
}
private void deleteAllResourceFiles()
{
File dir = new File( "." );
final String prefix = resourceFile().getPath();
File files[] = dir.listFiles( new FilenameFilter()
{
@Override
public boolean accept( File dir, String fileName )
{
return fileName.startsWith( prefix );
}
} );
boolean allDeleted = true;
for ( File file : files )
{
if ( !file.delete() )
{
allDeleted = false;
}
}
assertTrue( "delete all files starting with " + prefix, allDeleted );
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestXaFramework.java
|
1,069 |
public class OIndexSearchResult {
final Map<String, Object> fieldValuePairs = new HashMap<String, Object>(8);
final OQueryOperator lastOperator;
final OSQLFilterItemField.FieldChain lastField;
final Object lastValue;
OIndexSearchResult(final OQueryOperator lastOperator, final OSQLFilterItemField.FieldChain field, final Object value) {
this.lastOperator = lastOperator;
lastField = field;
lastValue = value;
}
/**
* Combines two queries subset into one. This operation will be valid only if {@link #canBeMerged(OIndexSearchResult)} method will
* return <code>true</code> for the same passed in parameter.
*
* @param searchResult
* Query subset to merge.
* @return New instance that presents merged query.
*/
OIndexSearchResult merge(final OIndexSearchResult searchResult) {
final OQueryOperator operator;
final OIndexSearchResult result;
if (searchResult.lastOperator instanceof OQueryOperatorEquals) {
result = new OIndexSearchResult(this.lastOperator, lastField, lastValue);
result.fieldValuePairs.putAll(searchResult.fieldValuePairs);
result.fieldValuePairs.putAll(fieldValuePairs);
result.fieldValuePairs.put(searchResult.lastField.getItemName(0), searchResult.lastValue);
} else {
operator = searchResult.lastOperator;
result = new OIndexSearchResult(operator, searchResult.lastField, searchResult.lastValue);
result.fieldValuePairs.putAll(searchResult.fieldValuePairs);
result.fieldValuePairs.putAll(fieldValuePairs);
result.fieldValuePairs.put(lastField.getItemName(0), lastValue);
}
return result;
}
/**
* @param searchResult
* Query subset is going to be merged with given one.
* @return <code>true</code> if two query subsets can be merged.
*/
boolean canBeMerged(final OIndexSearchResult searchResult) {
if (lastField.isLong() || searchResult.lastField.isLong()) {
return false;
}
return isIndexEqualityOperator(lastOperator) || isIndexEqualityOperator(searchResult.lastOperator);
}
List<String> fields() {
final List<String> result = new ArrayList<String>(fieldValuePairs.size() + 1);
result.addAll(fieldValuePairs.keySet());
result.add(lastField.getItemName(0));
return result;
}
int getFieldCount() {
return fieldValuePairs.size() + 1;
}
public static boolean isIndexEqualityOperator(OQueryOperator queryOperator) {
return queryOperator instanceof OQueryOperatorEquals || queryOperator instanceof OQueryOperatorContains
|| queryOperator instanceof OQueryOperatorContainsKey || queryOperator instanceof OQueryOperatorContainsValue;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_OIndexSearchResult.java
|
1,785 |
public abstract class ShapeBuilder implements ToXContent {
protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
private static final boolean DEBUG;
static {
// if asserts are enabled we run the debug statements even if they are not logged
// to prevent exceptions only present if debug enabled
boolean debug = false;
assert debug = true;
DEBUG = debug;
}
public static final double DATELINE = 180;
public static final GeometryFactory FACTORY = new GeometryFactory();
public static final JtsSpatialContext SPATIAL_CONTEXT = new JtsSpatialContext(true);
protected final boolean wrapdateline = true;
protected ShapeBuilder() {
}
protected static Coordinate coordinate(double longitude, double latitude) {
return new Coordinate(longitude, latitude);
}
/**
* Create a new point
*
* @param longitude longitude of the point
* @param latitude latitude of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(double longitude, double latitude) {
return newPoint(new Coordinate(longitude, latitude));
}
/**
* Create a new {@link PointBuilder} from a {@link Coordinate}
* @param coordinate coordinate defining the position of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(Coordinate coordinate) {
return new PointBuilder().coordinate(coordinate);
}
/**
* Create a new set of points
* @return new {@link MultiPointBuilder}
*/
public static MultiPointBuilder newMultiPoint() {
return new MultiPointBuilder();
}
/**
* Create a new lineString
* @return a new {@link LineStringBuilder}
*/
public static LineStringBuilder newLineString() {
return new LineStringBuilder();
}
/**
* Create a new Collection of lineStrings
* @return a new {@link MultiLineStringBuilder}
*/
public static MultiLineStringBuilder newMultiLinestring() {
return new MultiLineStringBuilder();
}
/**
* Create a new Polygon
* @return a new {@link PointBuilder}
*/
public static PolygonBuilder newPolygon() {
return new PolygonBuilder();
}
/**
* Create a new Collection of polygons
* @return a new {@link MultiPolygonBuilder}
*/
public static MultiPolygonBuilder newMultiPolygon() {
return new MultiPolygonBuilder();
}
/**
* create a new Circle
* @return a new {@link CircleBuilder}
*/
public static CircleBuilder newCircleBuilder() {
return new CircleBuilder();
}
/**
* create a new rectangle
* @return a new {@link EnvelopeBuilder}
*/
public static EnvelopeBuilder newEnvelope() {
return new EnvelopeBuilder();
}
@Override
public String toString() {
try {
XContentBuilder xcontent = JsonXContent.contentBuilder();
return toXContent(xcontent, EMPTY_PARAMS).prettyPrint().string();
} catch (IOException e) {
return super.toString();
}
}
/**
* Create a new Shape from this builder. Since calling this method could change the
* defined shape. (by inserting new coordinates or change the position of points)
* the builder looses its validity. So this method should only be called once on a builder
* @return new {@link Shape} defined by the builder
*/
public abstract Shape build();
/**
* Recursive method which parses the arrays of coordinates used to define
* Shapes
*
* @param parser
* Parser that will be read from
* @return CoordinateNode representing the start of the coordinate tree
* @throws IOException
* Thrown if an error occurs while reading from the
* XContentParser
*/
private static CoordinateNode parseCoordinates(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
// Base case
if (token != XContentParser.Token.START_ARRAY) {
double lon = parser.doubleValue();
token = parser.nextToken();
double lat = parser.doubleValue();
token = parser.nextToken();
return new CoordinateNode(new Coordinate(lon, lat));
}
List<CoordinateNode> nodes = new ArrayList<CoordinateNode>();
while (token != XContentParser.Token.END_ARRAY) {
nodes.add(parseCoordinates(parser));
token = parser.nextToken();
}
return new CoordinateNode(nodes);
}
/**
* Create a new {@link ShapeBuilder} from {@link XContent}
* @param parser parser to read the GeoShape from
* @return {@link ShapeBuilder} read from the parser or null
* if the parsers current token has been <code><null</code>
* @throws IOException if the input could not be read
*/
public static ShapeBuilder parse(XContentParser parser) throws IOException {
return GeoShapeType.parse(parser);
}
protected static XContentBuilder toXContent(XContentBuilder builder, Coordinate coordinate) throws IOException {
return builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
}
protected static Coordinate shift(Coordinate coordinate, double dateline) {
if (dateline == 0) {
return coordinate;
} else {
return new Coordinate(-2 * dateline + coordinate.x, coordinate.y);
}
}
/**
* get the shapes type
* @return type of the shape
*/
public abstract GeoShapeType type();
/**
* Calculate the intersection of a line segment and a vertical dateline.
*
* @param p1
* start-point of the line segment
* @param p2
* end-point of the line segment
* @param dateline
* x-coordinate of the vertical dateline
* @return position of the intersection in the open range (0..1] if the line
* segment intersects with the line segment. Otherwise this method
* returns {@link Double#NaN}
*/
protected static final double intersection(Coordinate p1, Coordinate p2, double dateline) {
if (p1.x == p2.x) {
return Double.NaN;
} else {
final double t = (dateline - p1.x) / (p2.x - p1.x);
if (t > 1 || t <= 0) {
return Double.NaN;
} else {
return t;
}
}
}
/**
* Calculate all intersections of line segments and a vertical line. The
* Array of edges will be ordered asc by the y-coordinate of the
* intersections of edges.
*
* @param dateline
* x-coordinate of the dateline
* @param edges
* set of edges that may intersect with the dateline
* @return number of intersecting edges
*/
protected static int intersections(double dateline, Edge[] edges) {
int numIntersections = 0;
assert !Double.isNaN(dateline);
for (int i = 0; i < edges.length; i++) {
Coordinate p1 = edges[i].coordinate;
Coordinate p2 = edges[i].next.coordinate;
assert !Double.isNaN(p2.x) && !Double.isNaN(p1.x);
edges[i].intersect = IntersectionOrder.SENTINEL;
double position = intersection(p1, p2, dateline);
if (!Double.isNaN(position)) {
if (position == 1) {
if (Double.compare(p1.x, dateline) == Double.compare(edges[i].next.next.coordinate.x, dateline)) {
// Ignore the ear
continue;
} else if (p2.x == dateline) {
// Ignore Linesegment on dateline
continue;
}
}
edges[i].intersection(position);
numIntersections++;
}
}
Arrays.sort(edges, INTERSECTION_ORDER);
return numIntersections;
}
/**
* Node used to represent a tree of coordinates.
* <p/>
* Can either be a leaf node consisting of a Coordinate, or a parent with
* children
*/
protected static class CoordinateNode implements ToXContent {
protected final Coordinate coordinate;
protected final List<CoordinateNode> children;
/**
* Creates a new leaf CoordinateNode
*
* @param coordinate
* Coordinate for the Node
*/
protected CoordinateNode(Coordinate coordinate) {
this.coordinate = coordinate;
this.children = null;
}
/**
* Creates a new parent CoordinateNode
*
* @param children
* Children of the Node
*/
protected CoordinateNode(List<CoordinateNode> children) {
this.children = children;
this.coordinate = null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (children == null) {
builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
} else {
builder.startArray();
for (CoordinateNode child : children) {
child.toXContent(builder, params);
}
builder.endArray();
}
return builder;
}
}
/**
* This helper class implements a linked list for {@link Coordinate}. It contains
* fields for a dateline intersection and component id
*/
protected static final class Edge {
Coordinate coordinate; // coordinate of the start point
Edge next; // next segment
Coordinate intersect; // potential intersection with dateline
int component = -1; // id of the component this edge belongs to
protected Edge(Coordinate coordinate, Edge next, Coordinate intersection) {
this.coordinate = coordinate;
this.next = next;
this.intersect = intersection;
if (next != null) {
this.component = next.component;
}
}
protected Edge(Coordinate coordinate, Edge next) {
this(coordinate, next, IntersectionOrder.SENTINEL);
}
private static final int top(Coordinate[] points, int offset, int length) {
int top = 0; // we start at 1 here since top points to 0
for (int i = 1; i < length; i++) {
if (points[offset + i].y < points[offset + top].y) {
top = i;
} else if (points[offset + i].y == points[offset + top].y) {
if (points[offset + i].x < points[offset + top].x) {
top = i;
}
}
}
return top;
}
/**
* Concatenate a set of points to a polygon
*
* @param component
* component id of the polygon
* @param direction
* direction of the ring
* @param points
* list of points to concatenate
* @param pointOffset
* index of the first point
* @param edges
* Array of edges to write the result to
* @param edgeOffset
* index of the first edge in the result
* @param length
* number of points to use
* @return the edges creates
*/
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
int length) {
assert edges.length >= length+edgeOffset;
assert points.length >= length+pointOffset;
edges[edgeOffset] = new Edge(points[pointOffset], null);
for (int i = 1; i < length; i++) {
if (direction) {
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
edges[edgeOffset + i].component = component;
} else {
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
edges[edgeOffset + i - 1].component = component;
}
}
if (direction) {
edges[edgeOffset].next = edges[edgeOffset + length - 1];
edges[edgeOffset].component = component;
} else {
edges[edgeOffset + length - 1].next = edges[edgeOffset];
edges[edgeOffset + length - 1].component = component;
}
return edges;
}
/**
* Create a connected list of a list of coordinates
*
* @param points
* array of point
* @param offset
* index of the first point
* @param length
* number of points
* @return Array of edges
*/
protected static Edge[] ring(int component, boolean direction, Coordinate[] points, int offset, Edge[] edges, int toffset,
int length) {
// calculate the direction of the points:
// find the point a the top of the set and check its
// neighbors orientation. So direction is equivalent
// to clockwise/counterclockwise
final int top = top(points, offset, length);
final int prev = (offset + ((top + length - 1) % length));
final int next = (offset + ((top + 1) % length));
final boolean orientation = points[offset + prev].x > points[offset + next].x;
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
}
/**
* Set the intersection of this line segment to the given position
*
* @param position
* position of the intersection [0..1]
* @return the {@link Coordinate} of the intersection
*/
protected Coordinate intersection(double position) {
return intersect = position(coordinate, next.coordinate, position);
}
public static Coordinate position(Coordinate p1, Coordinate p2, double position) {
if (position == 0) {
return p1;
} else if (position == 1) {
return p2;
} else {
final double x = p1.x + position * (p2.x - p1.x);
final double y = p1.y + position * (p2.y - p1.y);
return new Coordinate(x, y);
}
}
@Override
public String toString() {
return "Edge[Component=" + component + "; start=" + coordinate + " " + "; intersection=" + intersect + "]";
}
}
protected static final IntersectionOrder INTERSECTION_ORDER = new IntersectionOrder();
private static final class IntersectionOrder implements Comparator<Edge> {
private static final Coordinate SENTINEL = new Coordinate(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY);
@Override
public int compare(Edge o1, Edge o2) {
return Double.compare(o1.intersect.y, o2.intersect.y);
}
}
public static final String FIELD_TYPE = "type";
public static final String FIELD_COORDINATES = "coordinates";
protected static final boolean debugEnabled() {
return LOGGER.isDebugEnabled() || DEBUG;
}
/**
* Enumeration that lists all {@link GeoShapeType}s that can be handled
*/
public static enum GeoShapeType {
POINT("point"),
MULTIPOINT("multipoint"),
LINESTRING("linestring"),
MULTILINESTRING("multilinestring"),
POLYGON("polygon"),
MULTIPOLYGON("multipolygon"),
ENVELOPE("envelope"),
CIRCLE("circle");
protected final String shapename;
private GeoShapeType(String shapename) {
this.shapename = shapename;
}
public static GeoShapeType forName(String geoshapename) {
String typename = geoshapename.toLowerCase(Locale.ROOT);
for (GeoShapeType type : values()) {
if(type.shapename.equals(typename)) {
return type;
}
}
throw new ElasticsearchIllegalArgumentException("unknown geo_shape ["+geoshapename+"]");
}
public static ShapeBuilder parse(XContentParser parser) throws IOException {
if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
return null;
} else if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("Shape must be an object consisting of type and coordinates");
}
GeoShapeType shapeType = null;
Distance radius = null;
CoordinateNode node = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
if (FIELD_TYPE.equals(fieldName)) {
parser.nextToken();
shapeType = GeoShapeType.forName(parser.text());
} else if (FIELD_COORDINATES.equals(fieldName)) {
parser.nextToken();
node = parseCoordinates(parser);
} else if (CircleBuilder.FIELD_RADIUS.equals(fieldName)) {
parser.nextToken();
radius = Distance.parseDistance(parser.text());
} else {
parser.nextToken();
parser.skipChildren();
}
}
}
if (shapeType == null) {
throw new ElasticsearchParseException("Shape type not included");
} else if (node == null) {
throw new ElasticsearchParseException("Coordinates not included");
} else if (radius != null && GeoShapeType.CIRCLE != shapeType) {
throw new ElasticsearchParseException("Field [" + CircleBuilder.FIELD_RADIUS + "] is supported for [" + CircleBuilder.TYPE
+ "] only");
}
switch (shapeType) {
case POINT: return parsePoint(node);
case MULTIPOINT: return parseMultiPoint(node);
case LINESTRING: return parseLineString(node);
case MULTILINESTRING: return parseMultiLine(node);
case POLYGON: return parsePolygon(node);
case MULTIPOLYGON: return parseMultiPolygon(node);
case CIRCLE: return parseCircle(node, radius);
case ENVELOPE: return parseEnvelope(node);
default:
throw new ElasticsearchParseException("Shape type [" + shapeType + "] not included");
}
}
protected static PointBuilder parsePoint(CoordinateNode node) {
return newPoint(node.coordinate);
}
protected static CircleBuilder parseCircle(CoordinateNode coordinates, Distance radius) {
return newCircleBuilder().center(coordinates.coordinate).radius(radius);
}
protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) {
return newEnvelope().topLeft(coordinates.children.get(0).coordinate).bottomRight(coordinates.children.get(1).coordinate);
}
protected static MultiPointBuilder parseMultiPoint(CoordinateNode coordinates) {
MultiPointBuilder points = new MultiPointBuilder();
for (CoordinateNode node : coordinates.children) {
points.point(node.coordinate);
}
return points;
}
protected static LineStringBuilder parseLineString(CoordinateNode coordinates) {
LineStringBuilder line = newLineString();
for (CoordinateNode node : coordinates.children) {
line.point(node.coordinate);
}
return line;
}
protected static MultiLineStringBuilder parseMultiLine(CoordinateNode coordinates) {
MultiLineStringBuilder multiline = newMultiLinestring();
for (CoordinateNode node : coordinates.children) {
multiline.linestring(parseLineString(node));
}
return multiline;
}
protected static PolygonBuilder parsePolygon(CoordinateNode coordinates) {
LineStringBuilder shell = parseLineString(coordinates.children.get(0));
PolygonBuilder polygon = new PolygonBuilder(shell.points);
for (int i = 1; i < coordinates.children.size(); i++) {
polygon.hole(parseLineString(coordinates.children.get(i)));
}
return polygon;
}
protected static MultiPolygonBuilder parseMultiPolygon(CoordinateNode coordinates) {
MultiPolygonBuilder polygons = newMultiPolygon();
for (CoordinateNode node : coordinates.children) {
polygons.polygon(parsePolygon(node));
}
return polygons;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_geo_builders_ShapeBuilder.java
|
2,495 |
public class AddEntryListenerRequest extends CallableClientRequest implements RetryableRequest {
String name;
Data key;
boolean includeValue;
public AddEntryListenerRequest() {
}
public AddEntryListenerRequest(String name, Data key, boolean includeValue) {
this.name = name;
this.key = key;
this.includeValue = includeValue;
}
public Object call() throws Exception {
final ClientEndpoint endpoint = getEndpoint();
final ClientEngine clientEngine = getClientEngine();
final MultiMapService service = getService();
EntryListener listener = new EntryAdapter() {
@Override
public void onEntryEvent(EntryEvent event) {
send(event);
}
private void send(EntryEvent event) {
if (endpoint.live()) {
Data key = clientEngine.toData(event.getKey());
Data value = clientEngine.toData(event.getValue());
Data oldValue = clientEngine.toData(event.getOldValue());
final EntryEventType type = event.getEventType();
final String uuid = event.getMember().getUuid();
PortableEntryEvent portableEntryEvent = new PortableEntryEvent(key, value, oldValue, type, uuid);
endpoint.sendEvent(portableEntryEvent, getCallId());
}
}
};
String registrationId = service.addListener(name, listener, key, includeValue, false);
endpoint.setListenerRegistration(MultiMapService.SERVICE_NAME, name, registrationId);
return registrationId;
}
public String getServiceName() {
return MultiMapService.SERVICE_NAME;
}
public int getFactoryId() {
return MultiMapPortableHook.F_ID;
}
public int getClassId() {
return MultiMapPortableHook.ADD_ENTRY_LISTENER;
}
public void write(PortableWriter writer) throws IOException {
writer.writeBoolean("i", includeValue);
writer.writeUTF("n", name);
final ObjectDataOutput out = writer.getRawDataOutput();
IOUtil.writeNullableData(out, key);
}
public void read(PortableReader reader) throws IOException {
includeValue = reader.readBoolean("i");
name = reader.readUTF("n");
final ObjectDataInput in = reader.getRawDataInput();
key = IOUtil.readNullableData(in);
}
public Permission getRequiredPermission() {
return new MultiMapPermission(name, ActionConstants.ACTION_LISTEN);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_multimap_operations_client_AddEntryListenerRequest.java
|
245 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CURRENCY")
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region = "blCMSElements")
@AdminPresentationClass(friendlyName = "BroadleafCurrencyImpl_baseCurrency")
public class BroadleafCurrencyImpl implements BroadleafCurrency {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "CURRENCY_CODE")
@AdminPresentation(friendlyName = "BroadleafCurrencyImpl_Currency_Code", order = 1, group = "BroadleafCurrencyImpl_Details", prominent = true)
protected String currencyCode;
@Column(name = "FRIENDLY_NAME")
@AdminPresentation(friendlyName = "BroadleafCurrencyImpl_Name", order = 2, group = "BroadleafCurrencyImpl_Details", prominent = true)
protected String friendlyName;
@Column(name = "DEFAULT_FLAG")
@AdminPresentation(friendlyName = "BroadleafCurrencyImpl_Is_Default", group = "BroadleafCurrencyImpl_Details", excluded = true)
protected Boolean defaultFlag = false;
@Override
public String getCurrencyCode() {
return currencyCode;
}
@Override
public void setCurrencyCode(String code) {
this.currencyCode = code;
}
@Override
public String getFriendlyName() {
return friendlyName;
}
@Override
public void setFriendlyName(String friendlyName) {
this.friendlyName = friendlyName;
}
@Override
public boolean getDefaultFlag() {
if (defaultFlag == null) {
return false;
}
return defaultFlag.booleanValue();
}
@Override
public void setDefaultFlag(boolean defaultFlag) {
this.defaultFlag = new Boolean(defaultFlag);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BroadleafCurrency)) {
return false;
}
BroadleafCurrencyImpl currency = (BroadleafCurrencyImpl) o;
if (currencyCode != null ? !currencyCode.equals(currency.currencyCode) : currency.currencyCode != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = currencyCode != null ? currencyCode.hashCode() : 0;
return result;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_currency_domain_BroadleafCurrencyImpl.java
|
5,262 |
public class RangeParser implements Aggregator.Parser {
@Override
public String type() {
return InternalRange.TYPE.name();
}
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
String field = null;
List<RangeAggregator.Range> ranges = null;
String script = null;
String scriptLang = null;
Map<String, Object> scriptParams = null;
boolean keyed = false;
boolean assumeSorted = false;
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("field".equals(currentFieldName)) {
field = parser.text();
} else if ("script".equals(currentFieldName)) {
script = parser.text();
} else if ("lang".equals(currentFieldName)) {
scriptLang = parser.text();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("ranges".equals(currentFieldName)) {
ranges = new ArrayList<RangeAggregator.Range>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
double from = Double.NEGATIVE_INFINITY;
String fromAsStr = null;
double to = Double.POSITIVE_INFINITY;
String toAsStr = null;
String key = null;
String toOrFromOrKey = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
toOrFromOrKey = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NUMBER) {
if ("from".equals(toOrFromOrKey)) {
from = parser.doubleValue();
} else if ("to".equals(toOrFromOrKey)) {
to = parser.doubleValue();
}
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("from".equals(toOrFromOrKey)) {
fromAsStr = parser.text();
} else if ("to".equals(toOrFromOrKey)) {
toAsStr = parser.text();
} else if ("key".equals(toOrFromOrKey)) {
key = parser.text();
}
}
}
ranges.add(new RangeAggregator.Range(key, from, fromAsStr, to, toAsStr));
}
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("params".equals(currentFieldName)) {
scriptParams = parser.map();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
if ("keyed".equals(currentFieldName)) {
keyed = parser.booleanValue();
} else if ("script_values_sorted".equals(currentFieldName)) {
assumeSorted = parser.booleanValue();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else {
throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
}
}
if (ranges == null) {
throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]");
}
if (script != null) {
config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
}
if (!assumeSorted) {
// we need values to be sorted and unique for efficiency
config.ensureSorted(true);
}
if (field == null) {
return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
}
FieldMapper<?> mapper = context.smartNameFieldMapper(field);
if (mapper == null) {
config.unmapped(true);
return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
}
IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
config.fieldContext(new FieldContext(field, indexFieldData));
return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_range_RangeParser.java
|
937 |
public class OfferTimeZoneType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, OfferTimeZoneType> TYPES = new LinkedHashMap<String, OfferTimeZoneType>();
public static final OfferTimeZoneType SERVER = new OfferTimeZoneType("SERVER", "Server");
public static final OfferTimeZoneType APPLICATION = new OfferTimeZoneType("APPLICATION", "Application Supplied");
public static final OfferTimeZoneType CST = new OfferTimeZoneType("CST", "CST", true);
public static final OfferTimeZoneType UTC = new OfferTimeZoneType("UTC", "UTC", true);
public static OfferTimeZoneType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
private Boolean javaStandardTimeZone;
public OfferTimeZoneType() {
//do nothing
}
public OfferTimeZoneType(final String type, final String friendlyType) {
this(type, friendlyType, false);
}
public OfferTimeZoneType(final String type, final String friendlyType, Boolean javaStandardTimeZone) {
this.friendlyType = friendlyType;
setType(type);
setJavaStandardTimeZone(javaStandardTimeZone);
}
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
public Boolean getJavaStandardTimeZone() {
return javaStandardTimeZone;
}
public void setJavaStandardTimeZone(Boolean javaStandardTimeZone) {
this.javaStandardTimeZone = javaStandardTimeZone;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferTimeZoneType other = (OfferTimeZoneType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_type_OfferTimeZoneType.java
|
357 |
future.andThen(new ExecutionCallback<Map<String, Integer>>() {
@Override
public void onResponse(Map<String, Integer> response) {
listenerResults.putAll(response);
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
1,175 |
public class ItemEvent<E> extends EventObject {
private final E item;
private final ItemEventType eventType;
private final Member member;
public ItemEvent(String name, int eventType, E item, Member member) {
this(name, ItemEventType.getByType(eventType), item, member);
}
public ItemEvent(String name, ItemEventType itemEventType, E item, Member member) {
super(name);
this.item = item;
this.eventType = itemEventType;
this.member = member;
}
/**
* Returns the event type.
*
* @return the event type.
*/
public ItemEventType getEventType() {
return eventType;
}
/**
* Returns the item related to event.
*
* @return the item.
*/
public E getItem() {
return item;
}
/**
* Returns the member fired this event.
*
* @return the member fired this event.
*/
public Member getMember() {
return member;
}
@Override
public String toString() {
return "ItemEvent{" +
"event=" + eventType +
", item=" + getItem() +
", member=" + getMember() +
"} ";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_core_ItemEvent.java
|
325 |
new Thread() {
public void run() {
map.forceUnlock("key1");
latch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
72 |
public final class AuthenticationRequest extends CallableClientRequest {
private Credentials credentials;
private ClientPrincipal principal;
private boolean reAuth;
private boolean firstConnection;
public AuthenticationRequest() {
}
public AuthenticationRequest(Credentials credentials) {
this.credentials = credentials;
}
public AuthenticationRequest(Credentials credentials, ClientPrincipal principal) {
this.credentials = credentials;
this.principal = principal;
}
public Object call() throws Exception {
boolean authenticated = authenticate();
if (authenticated) {
return handleAuthenticated();
} else {
return handleUnauthenticated();
}
}
private boolean authenticate() {
ClientEngineImpl clientEngine = getService();
Connection connection = endpoint.getConnection();
ILogger logger = clientEngine.getLogger(getClass());
boolean authenticated;
if (credentials == null) {
authenticated = false;
logger.severe("Could not retrieve Credentials object!");
} else if (clientEngine.getSecurityContext() != null) {
authenticated = authenticate(clientEngine.getSecurityContext());
} else if (credentials instanceof UsernamePasswordCredentials) {
UsernamePasswordCredentials usernamePasswordCredentials = (UsernamePasswordCredentials) credentials;
authenticated = authenticate(usernamePasswordCredentials);
} else {
authenticated = false;
logger.severe("Hazelcast security is disabled.\nUsernamePasswordCredentials or cluster "
+ "group-name and group-password should be used for authentication!\n"
+ "Current credentials type is: " + credentials.getClass().getName());
}
logger.log((authenticated ? Level.INFO : Level.WARNING), "Received auth from " + connection
+ ", " + (authenticated ? "successfully authenticated" : "authentication failed"));
return authenticated;
}
private boolean authenticate(UsernamePasswordCredentials credentials) {
ClientEngineImpl clientEngine = getService();
GroupConfig groupConfig = clientEngine.getConfig().getGroupConfig();
String nodeGroupName = groupConfig.getName();
String nodeGroupPassword = groupConfig.getPassword();
boolean usernameMatch = nodeGroupName.equals(credentials.getUsername());
boolean passwordMatch = nodeGroupPassword.equals(credentials.getPassword());
return usernameMatch && passwordMatch;
}
private boolean authenticate(SecurityContext securityContext) {
Connection connection = endpoint.getConnection();
credentials.setEndpoint(connection.getInetAddress().getHostAddress());
try {
LoginContext lc = securityContext.createClientLoginContext(credentials);
lc.login();
endpoint.setLoginContext(lc);
return true;
} catch (LoginException e) {
ILogger logger = clientEngine.getLogger(getClass());
logger.warning(e);
return false;
}
}
private Object handleUnauthenticated() {
ClientEngineImpl clientEngine = getService();
clientEngine.removeEndpoint(endpoint.getConnection());
return new AuthenticationException("Invalid credentials!");
}
private Object handleAuthenticated() {
ClientEngineImpl clientEngine = getService();
if (principal != null && reAuth) {
principal = new ClientPrincipal(principal.getUuid(), clientEngine.getLocalMember().getUuid());
reAuthLocal();
Collection<MemberImpl> members = clientEngine.getClusterService().getMemberList();
for (MemberImpl member : members) {
if (!member.localMember()) {
ClientReAuthOperation op = new ClientReAuthOperation(principal.getUuid(), firstConnection);
clientEngine.sendOperation(op, member.getAddress());
}
}
}
if (principal == null) {
principal = new ClientPrincipal(endpoint.getUuid(), clientEngine.getLocalMember().getUuid());
}
endpoint.authenticated(principal, firstConnection);
clientEngine.bind(endpoint);
return new SerializableCollection(clientEngine.toData(clientEngine.getThisAddress()), clientEngine.toData(principal));
}
private void reAuthLocal() {
final Set<ClientEndpoint> endpoints = clientEngine.getEndpoints(principal.getUuid());
for (ClientEndpoint endpoint : endpoints) {
endpoint.authenticated(principal, firstConnection);
}
}
public String getServiceName() {
return ClientEngineImpl.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return ClientPortableHook.ID;
}
@Override
public int getClassId() {
return ClientPortableHook.AUTH;
}
public void setReAuth(boolean reAuth) {
this.reAuth = reAuth;
}
public boolean isFirstConnection() {
return firstConnection;
}
public void setFirstConnection(boolean firstConnection) {
this.firstConnection = firstConnection;
}
@Override
public void write(PortableWriter writer) throws IOException {
writer.writePortable("credentials", (Portable) credentials);
if (principal != null) {
writer.writePortable("principal", principal);
} else {
writer.writeNullPortable("principal", ClientPortableHook.ID, ClientPortableHook.PRINCIPAL);
}
writer.writeBoolean("reAuth", reAuth);
writer.writeBoolean("firstConnection", firstConnection);
}
@Override
public void read(PortableReader reader) throws IOException {
credentials = (Credentials) reader.readPortable("credentials");
principal = reader.readPortable("principal");
reAuth = reader.readBoolean("reAuth");
firstConnection = reader.readBoolean("firstConnection");
}
@Override
public Permission getRequiredPermission() {
return null;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_client_AuthenticationRequest.java
|
266 |
public class CancellationAwareTask implements Callable<Boolean>, Serializable {
long sleepTime;
public CancellationAwareTask(long sleepTime) {
this.sleepTime = sleepTime;
}
public Boolean call() throws InterruptedException {
Thread.sleep(sleepTime);
return Boolean.TRUE;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_tasks_CancellationAwareTask.java
|
280 |
public interface ActionListener<Response> {
/**
* A response handler.
*/
void onResponse(Response response);
/**
* A failure handler.
*/
void onFailure(Throwable e);
}
| 0true
|
src_main_java_org_elasticsearch_action_ActionListener.java
|
77 |
public interface AttributeSerializer<V> extends AttributeHandler<V> {
/**
* Reads an attribute from the given ReadBuffer.
* <p/>
* It is expected that this read operation adjusts the position in the ReadBuffer to after the attribute value.
*
* @param buffer ReadBuffer to read attribute from
* @return Read attribute
*/
public V read(ScanBuffer buffer);
/**
* Writes the attribute value to the given WriteBuffer.
* <p/>
* It is expected that this write operation adjusts the position in the WriteBuffer to after the attribute value.
*
* @param buffer WriteBuffer to write attribute to
* @param attribute Attribute to write to WriteBuffer
*/
public void write(WriteBuffer buffer, V attribute);
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_AttributeSerializer.java
|
113 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_PAGE_RULE")
public class PageRuleImpl implements PageRule {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "PageRuleId")
@GenericGenerator(
name="PageRuleId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="PageRuleImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.page.domain.PageRuleImpl")
}
)
@Column(name = "PAGE_RULE_ID")
protected Long id;
@Lob
@Type(type = "org.hibernate.type.StringClobType")
@Column(name = "MATCH_RULE", length = Integer.MAX_VALUE - 1)
protected String matchRule;
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentRule#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentRule#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentRule#getMatchRule()
*/
@Override
public String getMatchRule() {
return matchRule;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentRule#setMatchRule(java.lang.String)
*/
@Override
public void setMatchRule(String matchRule) {
this.matchRule = matchRule;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((matchRule == null) ? 0 : matchRule.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PageRuleImpl other = (PageRuleImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (matchRule == null) {
if (other.matchRule != null)
return false;
} else if (!matchRule.equals(other.matchRule))
return false;
return true;
}
@Override
public PageRule cloneEntity() {
PageRuleImpl newField = new PageRuleImpl();
newField.matchRule = matchRule;
return newField;
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageRuleImpl.java
|
650 |
public class GetIndexTemplatesRequest extends MasterNodeReadOperationRequest<GetIndexTemplatesRequest> {
private String[] names;
public GetIndexTemplatesRequest() {
}
public GetIndexTemplatesRequest(String... names) {
this.names = names;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (names == null) {
validationException = addValidationError("names is null or empty", validationException);
} else {
for (String name : names) {
if (name == null || !Strings.hasText(name)) {
validationException = addValidationError("name is missing", validationException);
}
}
}
return validationException;
}
/**
* Sets the names of the index templates.
*/
public GetIndexTemplatesRequest names(String... names) {
this.names = names;
return this;
}
/**
* The names of the index templates.
*/
public String[] names() {
return this.names;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
names = in.readStringArray();
readLocal(in, Version.V_1_0_0_RC2);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(names);
writeLocal(out, Version.V_1_0_0_RC2);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_get_GetIndexTemplatesRequest.java
|
3,116 |
ItemListener listener = new ItemListener() {
@Override
public void itemAdded(ItemEvent item) {
send(item);
}
@Override
public void itemRemoved(ItemEvent item) {
send(item);
}
private void send(ItemEvent event) {
if (endpoint.live()) {
Data item = clientEngine.toData(event.getItem());
PortableItemEvent portableItemEvent = new PortableItemEvent(
item, event.getEventType(), event.getMember().getUuid());
endpoint.sendEvent(portableItemEvent, getCallId());
}
}
};
| 1no label
|
hazelcast_src_main_java_com_hazelcast_queue_client_AddListenerRequest.java
|
258 |
@Entity
@Table(name = "BLC_EMAIL_TRACKING_CLICKS")
public class EmailTrackingClicksImpl implements EmailTrackingClicks {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "ClickId")
@GenericGenerator(
name="ClickId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="EmailTrackingClicksImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.common.email.domain.EmailTrackingClicksImpl")
}
)
@Column(name = "CLICK_ID")
protected Long id;
@ManyToOne(optional=false, targetEntity = EmailTrackingImpl.class)
@JoinColumn(name = "EMAIL_TRACKING_ID")
@Index(name="TRACKINGCLICKS_TRACKING_INDEX", columnNames={"EMAIL_TRACKING_ID"})
protected EmailTracking emailTracking;
@Column(nullable=false, name = "DATE_CLICKED")
protected Date dateClicked;
@Column(name = "CUSTOMER_ID")
@Index(name="TRACKINGCLICKS_CUSTOMER_INDEX", columnNames={"CUSTOMER_ID"})
protected String customerId;
@Column(name = "DESTINATION_URI")
protected String destinationUri;
@Column(name = "QUERY_STRING")
protected String queryString;
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#getDateClicked()
*/
@Override
public Date getDateClicked() {
return dateClicked;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#setDateClicked(java.util.Date)
*/
@Override
public void setDateClicked(Date dateClicked) {
this.dateClicked = dateClicked;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#getDestinationUri()
*/
@Override
public String getDestinationUri() {
return destinationUri;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#setDestinationUri(java.lang.String)
*/
@Override
public void setDestinationUri(String destinationUri) {
this.destinationUri = destinationUri;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#getQueryString()
*/
@Override
public String getQueryString() {
return queryString;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#setQueryString(java.lang.String)
*/
@Override
public void setQueryString(String queryString) {
this.queryString = queryString;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#getEmailTracking()
*/
@Override
public EmailTracking getEmailTracking() {
return emailTracking;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingClicks#setEmailTracking(org.broadleafcommerce.common.email.domain.EmailTrackingImpl)
*/
@Override
public void setEmailTracking(EmailTracking emailTracking) {
this.emailTracking = emailTracking;
}
/**
* @return the customer
*/
@Override
public String getCustomerId() {
return customerId;
}
/**
* @param customerId the customer to set
*/
@Override
public void setCustomerId(String customerId) {
this.customerId = customerId;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((customerId == null) ? 0 : customerId.hashCode());
result = prime * result + ((dateClicked == null) ? 0 : dateClicked.hashCode());
result = prime * result + ((destinationUri == null) ? 0 : destinationUri.hashCode());
result = prime * result + ((emailTracking == null) ? 0 : emailTracking.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EmailTrackingClicksImpl other = (EmailTrackingClicksImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (customerId == null) {
if (other.customerId != null)
return false;
} else if (!customerId.equals(other.customerId))
return false;
if (dateClicked == null) {
if (other.dateClicked != null)
return false;
} else if (!dateClicked.equals(other.dateClicked))
return false;
if (destinationUri == null) {
if (other.destinationUri != null)
return false;
} else if (!destinationUri.equals(other.destinationUri))
return false;
if (emailTracking == null) {
if (other.emailTracking != null)
return false;
} else if (!emailTracking.equals(other.emailTracking))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_email_domain_EmailTrackingClicksImpl.java
|
1,520 |
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private Closure closure;
private boolean isVertex;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
try {
this.closure = (Closure) engine.eval(context.getConfiguration().get(CLOSURE));
} catch (final ScriptException e) {
throw new IOException(e.getMessage(), e);
}
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
//for (int i = 0; i < value.pathCount(); i++) {
this.closure.call(value);
//}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.IN)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edgesProcessed++;
//for (int i = 0; i < edge.pathCount(); i++) {
this.closure.call(edge);
//}
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_PROCESSED, edgesProcessed);
edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edgesProcessed++;
//for (int i = 0; i < edge.pathCount(); i++) {
this.closure.call(edge);
//}
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
context.write(NullWritable.get(), value);
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_SideEffectMap.java
|
21 |
class FindScopeVisitor extends Visitor {
Scope scope;
public void visit(Tree.Declaration that) {
super.visit(that);
AnnotationList al = that.getAnnotationList();
if (al!=null) {
for (Tree.Annotation a: al.getAnnotations()) {
Integer i = a.getPrimary().getStartIndex();
Integer j = node.getStartIndex();
if (i.intValue()==j.intValue()) {
scope = that.getDeclarationModel().getScope();
}
}
}
}
public void visit(Tree.DocLink that) {
super.visit(that);
scope = ((Tree.DocLink)node).getPkg();
}
};
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_CompletionUtil.java
|
1,118 |
public class OSQLFunctionCoalesce extends OSQLFunctionAbstract {
public static final String NAME = "coalesce";
public OSQLFunctionCoalesce() {
super(NAME, 1, 1000);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) {
int length = iParameters.length;
for (int i = 0; i < length; i++) {
if (iParameters[i] != null)
return iParameters[i];
}
return null;
}
@Override
public String getSyntax() {
return "Returns the first not-null parameter or null if all parameters are null. Syntax: coalesce(<field|value> [,<field|value>]*)";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_misc_OSQLFunctionCoalesce.java
|
1,348 |
private final class LogSegment implements Comparable<LogSegment> {
private final RandomAccessFile rndFile;
private final File file;
private long filledUpTo;
private final long order;
private final int maxPagesCacheSize;
private boolean closed;
private OWALPage currentPage;
private final ConcurrentLinkedQueue<OWALPage> pagesCache = new ConcurrentLinkedQueue<OWALPage>();
private long nextPositionToFlush;
private long flushId;
private final ScheduledExecutorService commitExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
thread.setName("WAL Flush Task");
return thread;
}
});
private OLogSequenceNumber last = null;
private volatile boolean flushNewData = true;
private LogSegment(File file, int maxPagesCacheSize) throws IOException {
this.file = file;
this.maxPagesCacheSize = maxPagesCacheSize;
order = extractOrder(file.getName());
closed = false;
rndFile = new RandomAccessFile(file, "rw");
}
public void startFlush() {
if (commitDelay > 0)
commitExecutor.scheduleAtFixedRate(new FlushTask(), commitDelay, commitDelay, TimeUnit.MILLISECONDS);
}
public void stopFlush(boolean flush) {
if (flush)
flush();
if (!commitExecutor.isShutdown()) {
commitExecutor.shutdown();
try {
if (!commitExecutor
.awaitTermination(OGlobalConfiguration.WAL_SHUTDOWN_TIMEOUT.getValueAsInteger(), TimeUnit.MILLISECONDS))
throw new OStorageException("WAL flush task for " + getPath() + " segment can not be stopped.");
} catch (InterruptedException e) {
OLogManager.instance().error(this, "Can not shutdown background WAL commit thread.");
}
}
}
public long getOrder() {
return order;
}
public void init() throws IOException {
selfCheck();
initPageCache();
initLastPage();
}
private void initLastPage() throws IOException {
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
long currentPage = pagesCount - 1;
if (currentPage < 0)
return;
do {
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(content);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
try {
final OWALPage page = new OWALPage(pointer, false);
int lastPosition = findLastRecord(page, true);
if (lastPosition > -1) {
last = new OLogSequenceNumber(order, currentPage * OWALPage.PAGE_SIZE + lastPosition);
return;
}
currentPage--;
} finally {
pointer.free();
}
} while (currentPage >= 0);
}
}
private void initPageCache() throws IOException {
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
if (pagesCount == 0)
return;
rndFile.seek((pagesCount - 1) * OWALPage.PAGE_SIZE);
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(content);
flushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
currentPage = new OWALPage(pointer, false);
filledUpTo = (pagesCount - 1) * OWALPage.PAGE_SIZE + currentPage.getFilledUpTo();
nextPositionToFlush = (pagesCount - 1) * OWALPage.PAGE_SIZE;
pagesCache.add(currentPage);
}
}
private long extractOrder(String name) {
int walOrderStartIndex = name.indexOf('.') + 1;
int walOrderEndIndex = name.indexOf('.', walOrderStartIndex);
String walOrder = name.substring(walOrderStartIndex, walOrderEndIndex);
try {
return Long.parseLong(walOrder);
} catch (NumberFormatException e) {
// never happen
throw new IllegalStateException(e);
}
}
@Override
public int compareTo(LogSegment other) {
final long otherOrder = other.order;
if (order > otherOrder)
return 1;
else if (order < otherOrder)
return -1;
return 0;
}
public long filledUpTo() throws IOException {
return filledUpTo;
}
public OLogSequenceNumber begin() throws IOException {
if (!pagesCache.isEmpty())
return new OLogSequenceNumber(order, OWALPage.RECORDS_OFFSET);
if (rndFile.length() > 0)
return new OLogSequenceNumber(order, OWALPage.RECORDS_OFFSET);
return null;
}
public OLogSequenceNumber end() {
return last;
}
private int findLastRecord(OWALPage page, boolean skipTailRecords) {
int prevOffset = OWALPage.RECORDS_OFFSET;
int pageOffset = OWALPage.RECORDS_OFFSET;
int maxOffset = page.getFilledUpTo();
while (pageOffset < maxOffset) {
prevOffset = pageOffset;
pageOffset += page.getSerializedRecordSize(pageOffset);
}
if (skipTailRecords && page.recordTail(prevOffset))
return -1;
return prevOffset;
}
public void delete(boolean flush) throws IOException {
close(flush);
boolean deleted = file.delete();
while (!deleted) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
deleted = !file.exists() || file.delete();
}
}
public String getPath() {
return file.getAbsolutePath();
}
public OLogSequenceNumber logRecord(byte[] record) throws IOException {
flushNewData = true;
int pageOffset = (int) (filledUpTo % OWALPage.PAGE_SIZE);
long pageIndex = filledUpTo / OWALPage.PAGE_SIZE;
if (pageOffset == 0 && pageIndex > 0)
pageIndex--;
int pos = 0;
boolean firstChunk = true;
OLogSequenceNumber lsn = null;
while (pos < record.length) {
if (currentPage == null) {
ODirectMemoryPointer pointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
currentPage = new OWALPage(pointer, true);
pagesCache.add(currentPage);
filledUpTo += OWALPage.RECORDS_OFFSET;
}
int freeSpace = currentPage.getFreeSpace();
if (freeSpace < OWALPage.MIN_RECORD_SIZE) {
filledUpTo += freeSpace + OWALPage.RECORDS_OFFSET;
ODirectMemoryPointer pointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
currentPage = new OWALPage(pointer, true);
pagesCache.add(currentPage);
pageIndex++;
freeSpace = currentPage.getFreeSpace();
}
final OWALPage walPage = currentPage;
synchronized (walPage) {
final int entrySize = OWALPage.calculateSerializedSize(record.length - pos);
int addedChunkOffset;
if (entrySize <= freeSpace) {
if (pos == 0)
addedChunkOffset = walPage.appendRecord(record, false, !firstChunk);
else
addedChunkOffset = walPage.appendRecord(Arrays.copyOfRange(record, pos, record.length), false, !firstChunk);
pos = record.length;
} else {
int chunkSize = OWALPage.calculateRecordSize(freeSpace);
if (chunkSize > record.length - pos)
chunkSize = record.length - pos;
addedChunkOffset = walPage.appendRecord(Arrays.copyOfRange(record, pos, pos + chunkSize), true, !firstChunk);
pos += chunkSize;
}
if (firstChunk) {
lsn = new OLogSequenceNumber(order, pageIndex * OWALPage.PAGE_SIZE + addedChunkOffset);
}
int spaceDiff = freeSpace - walPage.getFreeSpace();
filledUpTo += spaceDiff;
firstChunk = false;
}
}
if (pagesCache.size() > maxPagesCacheSize) {
OLogManager.instance().info(this, "Max cache limit is reached (%d vs. %d), sync flush is performed.", maxPagesCacheSize,
pagesCache.size());
flush();
}
last = lsn;
return last;
}
public byte[] readRecord(OLogSequenceNumber lsn) throws IOException {
assert lsn.getSegment() == order;
if (lsn.getPosition() >= filledUpTo)
return null;
if (flushedLsn == null || flushedLsn.compareTo(lsn) < 0)
flush();
byte[] record = null;
long pageIndex = lsn.getPosition() / OWALPage.PAGE_SIZE;
int pageOffset = (int) (lsn.getPosition() % OWALPage.PAGE_SIZE);
long pageCount = (filledUpTo + OWALPage.PAGE_SIZE - 1) / OWALPage.PAGE_SIZE;
while (pageIndex < pageCount) {
synchronized (rndFile) {
byte[] pageContent = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(pageIndex * OWALPage.PAGE_SIZE);
rndFile.readFully(pageContent);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(pageContent);
try {
OWALPage page = new OWALPage(pointer, false);
byte[] content = page.getRecord(pageOffset);
if (record == null)
record = content;
else {
byte[] oldRecord = record;
record = new byte[record.length + content.length];
System.arraycopy(oldRecord, 0, record, 0, oldRecord.length);
System.arraycopy(content, 0, record, oldRecord.length, record.length - oldRecord.length);
}
if (page.mergeWithNextPage(pageOffset)) {
pageOffset = OWALPage.RECORDS_OFFSET;
pageIndex++;
} else
break;
} finally {
pointer.free();
}
}
}
return record;
}
public OLogSequenceNumber getNextLSN(OLogSequenceNumber lsn) throws IOException {
final byte[] record = readRecord(lsn);
if (record == null)
return null;
long pos = lsn.getPosition();
long pageIndex = pos / OWALPage.PAGE_SIZE;
int pageOffset = (int) (pos - pageIndex * OWALPage.PAGE_SIZE);
int restOfRecord = record.length;
while (restOfRecord > 0) {
int entrySize = OWALPage.calculateSerializedSize(restOfRecord);
if (entrySize + pageOffset < OWALPage.PAGE_SIZE) {
if (entrySize + pageOffset <= OWALPage.PAGE_SIZE - OWALPage.MIN_RECORD_SIZE)
pos += entrySize;
else
pos += OWALPage.PAGE_SIZE - pageOffset + OWALPage.RECORDS_OFFSET;
break;
} else if (entrySize + pageOffset == OWALPage.PAGE_SIZE) {
pos += entrySize + OWALPage.RECORDS_OFFSET;
break;
} else {
int chunkSize = OWALPage.calculateRecordSize(OWALPage.PAGE_SIZE - pageOffset);
restOfRecord -= chunkSize;
pos += OWALPage.PAGE_SIZE - pageOffset + OWALPage.RECORDS_OFFSET;
pageOffset = OWALPage.RECORDS_OFFSET;
}
}
if (pos >= filledUpTo)
return null;
return new OLogSequenceNumber(order, pos);
}
public void close(boolean flush) throws IOException {
if (!closed) {
stopFlush(flush);
rndFile.close();
closed = true;
if (!pagesCache.isEmpty()) {
for (OWALPage page : pagesCache)
page.getPagePointer().free();
}
currentPage = null;
}
}
private void selfCheck() throws IOException {
if (!pagesCache.isEmpty())
throw new IllegalStateException("WAL cache is not empty, we can not verify WAL after it was started to be used");
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
if (rndFile.length() % OWALPage.PAGE_SIZE > 0) {
OLogManager.instance().error(this, "Last WAL page was written partially, auto fix.");
rndFile.setLength(OWALPage.PAGE_SIZE * pagesCount);
}
long currentPage = pagesCount - 1;
CRC32 crc32 = new CRC32();
while (currentPage >= 0) {
crc32.reset();
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
int pageCRC = OIntegerSerializer.INSTANCE.deserializeNative(content, 0);
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
int calculatedCRC = (int) crc32.getValue();
if (pageCRC != calculatedCRC) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
} else
break;
}
if (currentPage < 0)
return;
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
currentPage--;
long intialFlushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
long loadedFlushId = intialFlushId;
int flushedPagesCount = 1;
while (currentPage >= 0) {
content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
crc32.reset();
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
int calculatedCRC = (int) crc32.getValue();
int pageCRC = OIntegerSerializer.INSTANCE.deserializeNative(content, 0);
if (pageCRC != calculatedCRC) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
flushedPagesCount = 0;
} else {
loadedFlushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
if (loadedFlushId == intialFlushId) {
flushedPagesCount++;
currentPage--;
} else
break;
}
}
if (flushedPagesCount != 0) {
content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek((currentPage + 1) * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
final int firstFlushIndex = OIntegerSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_INDEX_OFFSET);
if (firstFlushIndex != 0) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage + 1);
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
flushedPagesCount = 0;
}
}
currentPage += flushedPagesCount;
while (currentPage >= 0) {
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
try {
OWALPage page = new OWALPage(pointer, false);
int pageOffset = findLastRecord(page, false);
if (pageOffset >= 0) {
if (page.mergeWithNextPage(pageOffset)) {
page.truncateTill(pageOffset);
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
content = pointer.get(0, OWALPage.PAGE_SIZE);
rndFile.write(content);
if (page.isEmpty()) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
} else
break;
} else
break;
} else
break;
} finally {
pointer.free();
}
}
rndFile.getFD().sync();
}
}
public OLogSequenceNumber readFlushedLSN() throws IOException {
long pages = rndFile.length() / OWALPage.PAGE_SIZE;
if (pages == 0)
return null;
long pageIndex = pages - 1;
while (true) {
rndFile.seek(pageIndex * OWALPage.PAGE_SIZE);
byte[] pageContent = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(pageContent);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(pageContent);
try {
OWALPage page = new OWALPage(pointer, false);
int pageOffset = findLastRecord(page, true);
if (pageOffset < 0) {
pageIndex--;
if (pageIndex < 0)
return null;
continue;
}
return new OLogSequenceNumber(order, pageIndex * OWALPage.PAGE_SIZE + pageOffset);
} finally {
pointer.free();
}
}
}
public void flush() {
if (!commitExecutor.isShutdown()) {
try {
commitExecutor.submit(new FlushTask()).get();
} catch (InterruptedException e) {
Thread.interrupted();
throw new OStorageException("Thread was interrupted during flush", e);
} catch (ExecutionException e) {
throw new OStorageException("Error during WAL segment " + getPath() + " flush.");
}
} else {
new FlushTask().run();
}
}
private final class FlushTask implements Runnable {
private FlushTask() {
}
@Override
public void run() {
try {
commit();
} catch (Throwable e) {
OLogManager.instance().error(this, "Error during WAL background flush", e);
}
}
private void commit() throws IOException {
if (pagesCache.isEmpty())
return;
if (!flushNewData)
return;
flushNewData = false;
final int maxSize = pagesCache.size();
ODirectMemoryPointer[] pagesToFlush = new ODirectMemoryPointer[maxSize];
long filePointer = nextPositionToFlush;
int lastRecordOffset = -1;
long lastPageIndex = -1;
int flushedPages = 0;
Iterator<OWALPage> pageIterator = pagesCache.iterator();
while (flushedPages < maxSize) {
final OWALPage page = pageIterator.next();
synchronized (page) {
ODirectMemoryPointer dataPointer;
if (flushedPages == maxSize - 1) {
dataPointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
page.getPagePointer().moveData(0, dataPointer, 0, OWALPage.PAGE_SIZE);
} else {
dataPointer = page.getPagePointer();
}
pagesToFlush[flushedPages] = dataPointer;
int recordOffset = findLastRecord(page, true);
if (recordOffset >= 0) {
lastRecordOffset = recordOffset;
lastPageIndex = flushedPages;
}
}
flushedPages++;
}
flushId++;
synchronized (rndFile) {
rndFile.seek(filePointer);
for (int i = 0; i < pagesToFlush.length; i++) {
ODirectMemoryPointer dataPointer = pagesToFlush[i];
byte[] pageContent = dataPointer.get(0, OWALPage.PAGE_SIZE);
if (i == pagesToFlush.length - 1)
dataPointer.free();
OLongSerializer.INSTANCE.serializeNative(flushId, pageContent, OWALPage.FLUSH_ID_OFFSET);
OIntegerSerializer.INSTANCE.serializeNative(i, pageContent, OWALPage.FLUSH_INDEX_OFFSET);
flushPage(pageContent);
filePointer += OWALPage.PAGE_SIZE;
}
rndFile.getFD().sync();
}
long oldPositionToFlush = nextPositionToFlush;
nextPositionToFlush = filePointer - OWALPage.PAGE_SIZE;
if (lastRecordOffset >= 0)
flushedLsn = new OLogSequenceNumber(order, oldPositionToFlush + lastPageIndex * OWALPage.PAGE_SIZE + lastRecordOffset);
for (int i = 0; i < flushedPages - 1; i++) {
OWALPage page = pagesCache.poll();
page.getPagePointer().free();
}
assert !pagesCache.isEmpty();
}
private void flushPage(byte[] content) throws IOException {
CRC32 crc32 = new CRC32();
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
OIntegerSerializer.INSTANCE.serializeNative((int) crc32.getValue(), content, 0);
rndFile.write(content);
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OWriteAheadLog.java
|
4,896 |
public class RestRecoveryAction extends AbstractCatAction {
@Inject
protected RestRecoveryAction(Settings settings, Client client, RestController restController) {
super(settings, client);
restController.registerHandler(GET, "/_cat/recovery", this);
restController.registerHandler(GET, "/_cat/recovery/{index}", this);
}
@Override
void documentation(StringBuilder sb) {
sb.append("/_cat/recovery\n");
sb.append("/_cat/recovery/{index}\n");
}
@Override
public void doRequest(final RestRequest request, final RestChannel channel) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear().nodes(true);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
@Override
public void onResponse(final ClusterStateResponse clusterStateResponse) {
IndicesStatusRequest indicesStatusRequest = new IndicesStatusRequest(indices);
indicesStatusRequest.recovery(true);
indicesStatusRequest.operationThreading(BroadcastOperationThreading.SINGLE_THREAD);
client.admin().indices().status(indicesStatusRequest, new ActionListener<IndicesStatusResponse>() {
@Override
public void onResponse(IndicesStatusResponse indicesStatusResponse) {
Map<String, Long> primarySizes = new HashMap<String, Long>();
Set<ShardStatus> replicas = new HashSet<ShardStatus>();
// Loop through all the shards in the index status, keeping
// track of the primary shard size with a Map and the
// recovering shards in a Set of ShardStatus objects
for (ShardStatus shardStatus : indicesStatusResponse.getShards()) {
if (shardStatus.getShardRouting().primary()) {
primarySizes.put(shardStatus.getShardRouting().getIndex() + shardStatus.getShardRouting().getId(),
shardStatus.getStoreSize().bytes());
} else if (shardStatus.getState() == IndexShardState.RECOVERING) {
replicas.add(shardStatus);
}
}
try {
channel.sendResponse(RestTable.buildResponse(buildRecoveryTable(request, clusterStateResponse, primarySizes, replicas), request, channel));
} catch (Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e2) {
logger.error("Unable to send recovery status response", e2);
}
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
Table getTableWithHeader(RestRequest request) {
Table t = new Table();
t.startHeaders().addCell("index", "alias:i,idx;desc:index name")
.addCell("shard", "alias:s,sh;desc:shard name")
.addCell("target", "alias:t;text-align:right;desc:bytes of source shard")
.addCell("recovered", "alias:r;text-align:right;desc:bytes recovered so far")
.addCell("percent", "alias:per,ratio;text-align:right;desc:percent recovered so far")
.addCell("host", "alias:h;desc:node host where source shard lives")
.addCell("ip", "desc:node ip where source shard lives")
.addCell("node", "alias:n;desc:node name where source shard lives")
.endHeaders();
return t;
}
/**
* buildRecoveryTable will build a table of recovery information suitable
* for displaying at the command line.
*
* @param request
* @param state Current cluster state.
* @param primarySizes A Map of {@code index + shardId} strings to store size for all primary shards.
* @param recoveringReplicas A Set of {@link org.elasticsearch.action.admin.indices.status.ShardStatus} objects for each recovering replica to be displayed.
* @return A table containing index, shardId, node, target size, recovered size and percentage for each recovering replica
*/
public Table buildRecoveryTable(RestRequest request, ClusterStateResponse state, Map<String, Long> primarySizes, Set<ShardStatus> recoveringReplicas) {
Table t = getTableWithHeader(request);
for (ShardStatus status : recoveringReplicas) {
DiscoveryNode node = state.getState().nodes().get(status.getShardRouting().currentNodeId());
String index = status.getShardRouting().getIndex();
int id = status.getShardId();
long replicaSize = status.getStoreSize().bytes();
Long primarySize = primarySizes.get(index + id);
t.startRow();
t.addCell(index);
t.addCell(id);
t.addCell(primarySize);
t.addCell(replicaSize);
t.addCell(primarySize == null ? null : String.format(Locale.ROOT, "%1.1f%%", 100.0 * (float) replicaSize / primarySize));
t.addCell(node == null ? null : node.getHostName());
t.addCell(node == null ? null : node.getHostAddress());
t.addCell(node == null ? null : node.name());
t.endRow();
}
return t;
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_cat_RestRecoveryAction.java
|
223 |
private class PropertyPlaceholderConfigurerResolver implements PropertyPlaceholderHelper.PlaceholderResolver {
private final Properties props;
private PropertyPlaceholderConfigurerResolver(Properties props) {
this.props = props;
}
public String resolvePlaceholder(String placeholderName) {
return RuntimeEnvironmentPropertiesConfigurer.this.resolvePlaceholder(placeholderName, props, 1);
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_RuntimeEnvironmentPropertiesConfigurer.java
|
541 |
public class DeleteMappingRequestBuilder extends AcknowledgedRequestBuilder<DeleteMappingRequest, DeleteMappingResponse, DeleteMappingRequestBuilder> {
public DeleteMappingRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new DeleteMappingRequest());
}
/**
* Sets the indices the delete mapping will execute on
*/
public DeleteMappingRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Sets the type of the mapping to remove
*/
public DeleteMappingRequestBuilder setType(String... types) {
request.types(types);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
public DeleteMappingRequestBuilder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return this;
}
@Override
protected void doExecute(ActionListener<DeleteMappingResponse> listener) {
((IndicesAdminClient) client).deleteMapping(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_DeleteMappingRequestBuilder.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.