Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
4,664 |
private final PercolatorType matchPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x03;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
long foundMatches = 0;
int numMatches = 0;
for (PercolateShardResponse response : shardResults) {
foundMatches += response.count();
numMatches += response.matches().length;
}
int requestedSize = shardResults.get(0).requestedSize();
// Use a custom impl of AbstractBigArray for Object[]?
List<PercolateResponse.Match> finalMatches = new ArrayList<PercolateResponse.Match>(requestedSize == 0 ? numMatches : requestedSize);
outer:
for (PercolateShardResponse response : shardResults) {
Text index = new StringText(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i];
Text match = new BytesText(new BytesArray(response.matches()[i]));
Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
if (requestedSize != 0 && finalMatches.size() == requestedSize) {
break outer;
}
}
}
assert !shardResults.isEmpty();
InternalFacets reducedFacets = reduceFacets(shardResults);
InternalAggregations reducedAggregations = reduceAggregations(shardResults);
return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedFacets, reducedAggregations);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
long count = 0;
List<BytesRef> matches = new ArrayList<BytesRef>();
List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
Lucene.ExistsCollector collector = new Lucene.ExistsCollector();
for (Map.Entry<HashedBytesRef, Query> entry : context.percolateQueries().entrySet()) {
collector.reset();
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(entry.getValue(), ImmutableMap.<String, Filter>of()));
context.hitContext().cache().clear();
}
try {
context.docSearcher().search(entry.getValue(), collector);
} catch (Throwable e) {
logger.warn("[" + entry.getKey() + "] failed to execute query", e);
}
if (collector.exists()) {
if (!context.limit || count < context.size) {
matches.add(entry.getKey().bytes);
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
count++;
}
}
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, context, request.index(), request.shardId());
}
};
| 1no label
|
src_main_java_org_elasticsearch_percolator_PercolatorService.java
|
984 |
public class UnlockOperation extends BaseLockOperation implements Notifier, BackupAwareOperation {
private boolean force;
private boolean shouldNotify;
public UnlockOperation() {
}
public UnlockOperation(ObjectNamespace namespace, Data key, long threadId) {
super(namespace, key, threadId);
}
public UnlockOperation(ObjectNamespace namespace, Data key, long threadId, boolean force) {
super(namespace, key, threadId);
this.force = force;
}
@Override
public void run() throws Exception {
if (force) {
forceUnlock();
} else {
unlock();
}
}
private void unlock() {
LockStoreImpl lockStore = getLockStore();
boolean unlocked = lockStore.unlock(key, getCallerUuid(), threadId);
response = unlocked;
ensureUnlocked(lockStore, unlocked);
}
private void ensureUnlocked(LockStoreImpl lockStore, boolean unlocked) {
if (!unlocked) {
String ownerInfo = lockStore.getOwnerInfo(key);
throw new IllegalMonitorStateException("Current thread is not owner of the lock! -> " + ownerInfo);
}
}
private void forceUnlock() {
LockStoreImpl lockStore = getLockStore();
response = lockStore.forceUnlock(key);
}
@Override
public void afterRun() throws Exception {
LockStoreImpl lockStore = getLockStore();
AwaitOperation awaitResponse = lockStore.pollExpiredAwaitOp(key);
if (awaitResponse != null) {
OperationService operationService = getNodeEngine().getOperationService();
operationService.runOperationOnCallingThread(awaitResponse);
}
shouldNotify = awaitResponse == null;
}
@Override
public Operation getBackupOperation() {
return new UnlockBackupOperation(namespace, key, threadId, getCallerUuid(), force);
}
@Override
public boolean shouldBackup() {
return Boolean.TRUE.equals(response);
}
@Override
public boolean shouldNotify() {
return shouldNotify;
}
@Override
public final WaitNotifyKey getNotifiedKey() {
LockStoreImpl lockStore = getLockStore();
ConditionKey conditionKey = lockStore.getSignalKey(key);
if (conditionKey == null) {
return new LockWaitNotifyKey(namespace, key);
} else {
return conditionKey;
}
}
@Override
public int getId() {
return LockDataSerializerHook.UNLOCK;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeBoolean(force);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
force = in.readBoolean();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_concurrent_lock_operations_UnlockOperation.java
|
600 |
ex.execute(new Runnable() {
public void run() {
if (sleep) {
try {
Thread.sleep((int) (1000 * Math.random()));
} catch (InterruptedException ignored) {
}
}
HazelcastInstance h = nodeFactory.newHazelcastInstance(config);
map.put(index, h);
latch.countDown();
}
});
| 0true
|
hazelcast_src_test_java_com_hazelcast_cluster_JoinStressTest.java
|
1,184 |
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
node.close();
}
});
| 0true
|
src_main_java_org_elasticsearch_bootstrap_Bootstrap.java
|
1,220 |
public class PageCacheRecycler extends AbstractComponent {
public static final String TYPE = "page.type";
public static final String LIMIT_HEAP = "page.limit.heap";
public static final String LIMIT_PER_THREAD = "page.limit.per_thread";
public static final String WEIGHT = "page.weight";
private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage;
private final Recycler<long[]> longPage;
private final Recycler<double[]> doublePage;
private final Recycler<Object[]> objectPage;
public void close() {
bytePage.close();
intPage.close();
longPage.close();
doublePage.close();
objectPage.close();
}
private static int maximumSearchThreadPoolSize(ThreadPool threadPool, Settings settings) {
ThreadPool.Info searchThreadPool = threadPool.info(ThreadPool.Names.SEARCH);
assert searchThreadPool != null;
final int maxSize = searchThreadPool.getMax();
if (maxSize <= 0) {
// happens with cached thread pools, let's assume there are at most 3x ${number of processors} threads
return 3 * EsExecutors.boundedNumberOfProcessors(settings);
} else {
return maxSize;
}
}
// return the maximum number of pages that may be cached depending on
// - limit: the total amount of memory available
// - pageSize: the size of a single page
// - weight: the weight for this data type
// - totalWeight: the sum of all weights
private static int maxCount(long limit, long pageSize, double weight, double totalWeight) {
return (int) (weight / totalWeight * limit / pageSize);
}
@Inject
public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings);
final Type type = Type.parse(componentSettings.get(TYPE));
final long limit = componentSettings.getAsMemory(LIMIT_HEAP, "10%").bytes();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings);
// We have a global amount of memory that we need to divide across data types.
// Since some types are more useful than other ones we give them different weights.
// Trying to store all of them in a single stack would be problematic because eg.
// a work load could fill the recycler with only byte[] pages and then another
// workload that would work with double[] pages couldn't recycle them because there
// is no space left in the stack/queue. LRU/LFU policies are not an option either
// because they would make obtain/release too costly: we really need constant-time
// operations.
// Ultimately a better solution would be to only store one kind of data and have the
// ability to intepret it either as a source of bytes, doubles, longs, etc. eg. thanks
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
// that would need to be addressed such as garbage collection of native memory or safety
// of Unsafe writes.
final double bytesWeight = componentSettings.getAsDouble(WEIGHT + ".bytes", 1d);
final double intsWeight = componentSettings.getAsDouble(WEIGHT + ".ints", 1d);
final double longsWeight = componentSettings.getAsDouble(WEIGHT + ".longs", 1d);
final double doublesWeight = componentSettings.getAsDouble(WEIGHT + ".doubles", 1d);
// object pages are less useful to us so we give them a lower weight by default
final double objectsWeight = componentSettings.getAsDouble(WEIGHT + ".objects", 0.1d);
final double totalWeight = bytesWeight + intsWeight + longsWeight + doublesWeight + objectsWeight;
bytePage = build(type, maxCount(limit, BigArrays.BYTE_PAGE_SIZE, bytesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<byte[]>() {
@Override
public byte[] newInstance(int sizing) {
return new byte[BigArrays.BYTE_PAGE_SIZE];
}
@Override
public void clear(byte[] value) {}
});
intPage = build(type, maxCount(limit, BigArrays.INT_PAGE_SIZE, intsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<int[]>() {
@Override
public int[] newInstance(int sizing) {
return new int[BigArrays.INT_PAGE_SIZE];
}
@Override
public void clear(int[] value) {}
});
longPage = build(type, maxCount(limit, BigArrays.LONG_PAGE_SIZE, longsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<long[]>() {
@Override
public long[] newInstance(int sizing) {
return new long[BigArrays.LONG_PAGE_SIZE];
}
@Override
public void clear(long[] value) {}
});
doublePage = build(type, maxCount(limit, BigArrays.DOUBLE_PAGE_SIZE, doublesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<double[]>() {
@Override
public double[] newInstance(int sizing) {
return new double[BigArrays.DOUBLE_PAGE_SIZE];
}
@Override
public void clear(double[] value) {}
});
objectPage = build(type, maxCount(limit, BigArrays.OBJECT_PAGE_SIZE, objectsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<Object[]>() {
@Override
public Object[] newInstance(int sizing) {
return new Object[BigArrays.OBJECT_PAGE_SIZE];
}
@Override
public void clear(Object[] value) {
Arrays.fill(value, null); // we need to remove the strong refs on the objects stored in the array
}
});
}
public Recycler.V<byte[]> bytePage(boolean clear) {
final Recycler.V<byte[]> v = bytePage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), (byte) 0);
}
return v;
}
public Recycler.V<int[]> intPage(boolean clear) {
final Recycler.V<int[]> v = intPage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), 0);
}
return v;
}
public Recycler.V<long[]> longPage(boolean clear) {
final Recycler.V<long[]> v = longPage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), 0L);
}
return v;
}
public Recycler.V<double[]> doublePage(boolean clear) {
final Recycler.V<double[]> v = doublePage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), 0d);
}
return v;
}
public Recycler.V<Object[]> objectPage() {
// object pages are cleared on release anyway
return objectPage.obtain();
}
private static <T> Recycler<T> build(Type type, int limit, int estimatedThreadPoolSize, int availableProcessors, Recycler.C<T> c) {
final Recycler<T> recycler;
if (limit == 0) {
recycler = none(c);
} else {
recycler = type.build(c, limit, estimatedThreadPoolSize, availableProcessors);
}
return recycler;
}
public static enum Type {
SOFT_THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return threadLocal(softFactory(dequeFactory(c, limit / estimatedThreadPoolSize)));
}
},
THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return threadLocal(dequeFactory(c, limit / estimatedThreadPoolSize));
}
},
QUEUE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return concurrentDeque(c, limit);
}
},
SOFT_CONCURRENT {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return concurrent(softFactory(dequeFactory(c, limit / availableProcessors)), availableProcessors);
}
},
CONCURRENT {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return concurrent(dequeFactory(c, limit / availableProcessors), availableProcessors);
}
},
NONE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return none(c);
}
};
public static Type parse(String type) {
if (Strings.isNullOrEmpty(type)) {
return SOFT_CONCURRENT;
}
try {
return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]");
}
}
abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors);
}
}
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_PageCacheRecycler.java
|
689 |
public class BulkItemResponse implements Streamable {
/**
* Represents a failure.
*/
public static class Failure {
private final String index;
private final String type;
private final String id;
private final String message;
private final RestStatus status;
public Failure(String index, String type, String id, Throwable t) {
this.index = index;
this.type = type;
this.id = id;
this.message = ExceptionsHelper.detailedMessage(t);
this.status = ExceptionsHelper.status(t);
}
public Failure(String index, String type, String id, String message, RestStatus status) {
this.index = index;
this.type = type;
this.id = id;
this.message = message;
this.status = status;
}
/**
* The index name of the action.
*/
public String getIndex() {
return this.index;
}
/**
* The type of the action.
*/
public String getType() {
return type;
}
/**
* The id of the action.
*/
public String getId() {
return id;
}
/**
* The failure message.
*/
public String getMessage() {
return this.message;
}
/**
* The rest status.
*/
public RestStatus getStatus() {
return this.status;
}
}
private int id;
private String opType;
private ActionResponse response;
private Failure failure;
BulkItemResponse() {
}
public BulkItemResponse(int id, String opType, ActionResponse response) {
this.id = id;
this.opType = opType;
this.response = response;
}
public BulkItemResponse(int id, String opType, Failure failure) {
this.id = id;
this.opType = opType;
this.failure = failure;
}
/**
* The numeric order of the item matching the same request order in the bulk request.
*/
public int getItemId() {
return id;
}
/**
* The operation type ("index", "create" or "delete").
*/
public String getOpType() {
return this.opType;
}
/**
* The index name of the action.
*/
public String getIndex() {
if (failure != null) {
return failure.getIndex();
}
if (response instanceof IndexResponse) {
return ((IndexResponse) response).getIndex();
} else if (response instanceof DeleteResponse) {
return ((DeleteResponse) response).getIndex();
} else if (response instanceof UpdateResponse) {
return ((UpdateResponse) response).getIndex();
}
return null;
}
/**
* The type of the action.
*/
public String getType() {
if (failure != null) {
return failure.getType();
}
if (response instanceof IndexResponse) {
return ((IndexResponse) response).getType();
} else if (response instanceof DeleteResponse) {
return ((DeleteResponse) response).getType();
} else if (response instanceof UpdateResponse) {
return ((UpdateResponse) response).getType();
}
return null;
}
/**
* The id of the action.
*/
public String getId() {
if (failure != null) {
return failure.getId();
}
if (response instanceof IndexResponse) {
return ((IndexResponse) response).getId();
} else if (response instanceof DeleteResponse) {
return ((DeleteResponse) response).getId();
} else if (response instanceof UpdateResponse) {
return ((UpdateResponse) response).getId();
}
return null;
}
/**
* The version of the action.
*/
public long getVersion() {
if (failure != null) {
return -1;
}
if (response instanceof IndexResponse) {
return ((IndexResponse) response).getVersion();
} else if (response instanceof DeleteResponse) {
return ((DeleteResponse) response).getVersion();
} else if (response instanceof UpdateResponse) {
return ((UpdateResponse) response).getVersion();
}
return -1;
}
/**
* The actual response ({@link IndexResponse} or {@link DeleteResponse}). <tt>null</tt> in
* case of failure.
*/
public <T extends ActionResponse> T getResponse() {
return (T) response;
}
/**
* Is this a failed execution of an operation.
*/
public boolean isFailed() {
return failure != null;
}
/**
* The failure message, <tt>null</tt> if it did not fail.
*/
public String getFailureMessage() {
if (failure != null) {
return failure.getMessage();
}
return null;
}
/**
* The actual failure object if there was a failure.
*/
public Failure getFailure() {
return this.failure;
}
public static BulkItemResponse readBulkItem(StreamInput in) throws IOException {
BulkItemResponse response = new BulkItemResponse();
response.readFrom(in);
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readVInt();
opType = in.readSharedString();
byte type = in.readByte();
if (type == 0) {
response = new IndexResponse();
response.readFrom(in);
} else if (type == 1) {
response = new DeleteResponse();
response.readFrom(in);
} else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses'
response = new UpdateResponse();
response.readFrom(in);
}
if (in.readBoolean()) {
String fIndex = in.readSharedString();
String fType = in.readSharedString();
String fId = in.readOptionalString();
String fMessage = in.readString();
RestStatus status = RestStatus.readFrom(in);
failure = new Failure(fIndex, fType, fId, fMessage, status);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);
out.writeSharedString(opType);
if (response == null) {
out.writeByte((byte) 2);
} else {
if (response instanceof IndexResponse) {
out.writeByte((byte) 0);
} else if (response instanceof DeleteResponse) {
out.writeByte((byte) 1);
} else if (response instanceof UpdateResponse) {
out.writeByte((byte) 3); // make 3 instead of 2, because 2 is already in use for 'no responses'
}
response.writeTo(out);
}
if (failure == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeSharedString(failure.getIndex());
out.writeSharedString(failure.getType());
out.writeOptionalString(failure.getId());
out.writeString(failure.getMessage());
RestStatus.writeTo(out, failure.getStatus());
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_bulk_BulkItemResponse.java
|
63 |
public interface IntByIntToInt { int apply(int a, int b); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
124 |
class FindInvocationVisitor extends Visitor {
Node node;
Tree.InvocationExpression result;
Tree.InvocationExpression current;
TypedDeclaration parameter;
FindInvocationVisitor(Node node) {
this.node=node;
}
@Override
public void visit(Tree.ListedArgument that) {
Expression e = that.getExpression();
if (e!=null && node==e.getTerm()) {
result=current;
Parameter p = that.getParameter();
if (p!=null) {
parameter=p.getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.SpreadArgument that) {
Expression e = that.getExpression();
if (e!=null && node==e.getTerm()) {
result=current;
Parameter p = that.getParameter();
if (p!=null) {
parameter = p.getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.NamedArgument that) {
if (node==that) {
result=current;
Parameter p = that.getParameter();
if (p!=null) {
parameter = p.getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.Return that) {
Expression e = that.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = (TypedDeclaration) that.getDeclaration();
}
super.visit(that);
}
@Override
public void visit(Tree.AssignOp that) {
if (node==that.getRightTerm()) {
//result=current;
Term lt = that.getLeftTerm();
if (lt instanceof Tree.BaseMemberExpression) {
Declaration d = ((Tree.BaseMemberExpression) lt).getDeclaration();
if (d instanceof TypedDeclaration) {
parameter = (TypedDeclaration) d;
}
}
}
super.visit(that);
}
@Override
public void visit(Tree.SpecifierStatement that) {
Expression e = that.getSpecifierExpression().getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
Term bme = that.getBaseMemberExpression();
if (bme instanceof Tree.BaseMemberExpression) {
Declaration d =
((Tree.BaseMemberExpression) bme).getDeclaration();
if (d instanceof TypedDeclaration) {
parameter = (TypedDeclaration) d;
}
}
}
super.visit(that);
}
@Override
public void visit(Tree.AttributeDeclaration that) {
Tree.SpecifierOrInitializerExpression sie =
that.getSpecifierOrInitializerExpression();
if (sie!=null) {
Expression e = sie.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = that.getDeclarationModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.MethodDeclaration that) {
Tree.SpecifierOrInitializerExpression sie =
that.getSpecifierExpression();
if (sie!=null) {
Expression e = sie.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = that.getDeclarationModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.InitializerParameter that) {
Tree.SpecifierExpression se = that.getSpecifierExpression();
if (se!=null) {
Tree.Expression e = se.getExpression();
if (e!=null && node==e.getTerm()) {
//result=current;
parameter = that.getParameterModel().getModel();
}
}
super.visit(that);
}
@Override
public void visit(Tree.InvocationExpression that) {
Tree.InvocationExpression oc=current;
current = that;
super.visit(that);
current=oc;
}
@Override
public void visit(Tree.BaseMemberExpression that) {
if (that == node) {
result = current;
}
super.visit(that);
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_FindInvocationVisitor.java
|
178 |
static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
final Throwable ex;
ExceptionNode next;
final long thrower; // use id not ref to avoid weak cycles
final int hashCode; // store task hashCode before weak ref disappears
ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
super(task, exceptionTableRefQueue);
this.ex = ex;
this.next = next;
this.thrower = Thread.currentThread().getId();
this.hashCode = System.identityHashCode(task);
}
}
| 0true
|
src_main_java_jsr166y_ForkJoinTask.java
|
550 |
public static final class OClusterPositionFactoryLong extends OClusterPositionFactory {
@Override
public OClusterPosition generateUniqueClusterPosition() {
throw new UnsupportedOperationException();
}
@Override
public OClusterPosition valueOf(long value) {
return new OClusterPositionLong(value);
}
@Override
public OClusterPosition valueOf(String value) {
return new OClusterPositionLong(Long.valueOf(value));
}
@Override
public OClusterPosition fromStream(byte[] content, int start) {
return new OClusterPositionLong(OLongSerializer.INSTANCE.deserialize(content, start));
}
@Override
public int getSerializedSize() {
return OLongSerializer.LONG_SIZE;
}
@Override
public OClusterPosition getMaxValue() {
return new OClusterPositionLong(Long.MAX_VALUE);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_id_OClusterPositionFactory.java
|
777 |
public class CollectionTransactionLog implements KeyAwareTransactionLog {
String transactionId;
private long itemId;
private String name;
private Operation op;
private int partitionId;
private String serviceName;
public CollectionTransactionLog() {
}
public CollectionTransactionLog(long itemId,
String name,
int partitionId,
String serviceName,
String transactionId,
Operation op) {
this.itemId = itemId;
this.name = name;
this.op = op;
this.partitionId = partitionId;
this.serviceName = serviceName;
this.transactionId = transactionId;
}
@Override
public Object getKey() {
return new TransactionLogKey(name, itemId, serviceName);
}
@Override
public Future prepare(NodeEngine nodeEngine) {
boolean removeOperation = op instanceof CollectionTxnRemoveOperation;
CollectionPrepareOperation operation = new CollectionPrepareOperation(name, itemId, transactionId, removeOperation);
try {
return nodeEngine.getOperationService().invokeOnPartition(serviceName, operation, partitionId);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
@Override
public Future commit(NodeEngine nodeEngine) {
try {
return nodeEngine.getOperationService().invokeOnPartition(serviceName, op, partitionId);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
@Override
public Future rollback(NodeEngine nodeEngine) {
boolean removeOperation = op instanceof CollectionTxnRemoveOperation;
CollectionRollbackOperation operation = new CollectionRollbackOperation(name, itemId, removeOperation);
try {
return nodeEngine.getOperationService().invokeOnPartition(serviceName, operation, partitionId);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeLong(itemId);
out.writeUTF(name);
out.writeInt(partitionId);
out.writeUTF(serviceName);
out.writeObject(op);
out.writeUTF(transactionId);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
itemId = in.readLong();
name = in.readUTF();
partitionId = in.readInt();
serviceName = in.readUTF();
op = in.readObject();
transactionId = in.readUTF();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_txn_CollectionTransactionLog.java
|
396 |
public class ClusterSearchShardsResponse extends ActionResponse implements ToXContent {
private ClusterSearchShardsGroup[] groups;
private DiscoveryNode[] nodes;
ClusterSearchShardsResponse() {
}
public ClusterSearchShardsGroup[] getGroups() {
return groups;
}
public DiscoveryNode[] getNodes() {
return nodes;
}
public ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes) {
this.groups = groups;
this.nodes = nodes;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
groups = new ClusterSearchShardsGroup[in.readVInt()];
for (int i = 0; i < groups.length; i++) {
groups[i] = ClusterSearchShardsGroup.readSearchShardsGroupResponse(in);
}
nodes = new DiscoveryNode[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = DiscoveryNode.readNode(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(groups.length);
for (ClusterSearchShardsGroup response : groups) {
response.writeTo(out);
}
out.writeVInt(nodes.length);
for (DiscoveryNode node : nodes) {
node.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("nodes");
for (DiscoveryNode node : nodes) {
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("name", node.name());
builder.field("transport_address", node.getAddress());
if (!node.attributes().isEmpty()) {
builder.startObject("attributes");
for (Map.Entry<String, String> attr : node.attributes().entrySet()) {
builder.field(attr.getKey(), attr.getValue());
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
builder.startArray("shards");
for (ClusterSearchShardsGroup group : groups) {
group.toXContent(builder, params);
}
builder.endArray();
return builder;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_shards_ClusterSearchShardsResponse.java
|
651 |
constructors[LIST_SET] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new ListSetOperation();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
|
3,193 |
static abstract class FieldBased implements IndexFieldDataCache, SegmentReader.CoreClosedListener, RemovalListener<FieldBased.Key, AtomicFieldData> {
@Nullable
private final IndexService indexService;
private final FieldMapper.Names fieldNames;
private final FieldDataType fieldDataType;
private final Cache<Key, AtomicFieldData> cache;
protected FieldBased(@Nullable IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType, CacheBuilder cache) {
this.indexService = indexService;
this.fieldNames = fieldNames;
this.fieldDataType = fieldDataType;
cache.removalListener(this);
//noinspection unchecked
this.cache = cache.build();
}
@Override
public void onRemoval(RemovalNotification<Key, AtomicFieldData> notification) {
Key key = notification.getKey();
if (key == null || key.listener == null) {
return; // we can't do anything here...
}
AtomicFieldData value = notification.getValue();
long sizeInBytes = key.sizeInBytes;
if (sizeInBytes == -1 && value != null) {
sizeInBytes = value.getMemorySizeInBytes();
}
key.listener.onUnload(fieldNames, fieldDataType, notification.wasEvicted(), sizeInBytes, value);
}
@Override
public <FD extends AtomicFieldData, IFD extends IndexFieldData<FD>> FD load(final AtomicReaderContext context, final IFD indexFieldData) throws Exception {
final Key key = new Key(context.reader().getCoreCacheKey());
//noinspection unchecked
return (FD) cache.get(key, new Callable<AtomicFieldData>() {
@Override
public AtomicFieldData call() throws Exception {
SegmentReaderUtils.registerCoreListener(context.reader(), FieldBased.this);
AtomicFieldData fieldData = indexFieldData.loadDirect(context);
key.sizeInBytes = fieldData.getMemorySizeInBytes();
if (indexService != null) {
ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null) {
IndexShard shard = indexService.shard(shardId.id());
if (shard != null) {
key.listener = shard.fieldData();
}
}
}
if (key.listener != null) {
key.listener.onLoad(fieldNames, fieldDataType, fieldData);
}
return fieldData;
}
});
}
@Override
public void clear() {
cache.invalidateAll();
}
@Override
public void clear(String fieldName) {
cache.invalidateAll();
}
@Override
public void clear(Object coreCacheKey) {
cache.invalidate(new Key(coreCacheKey));
}
@Override
public void onClose(Object coreCacheKey) {
cache.invalidate(new Key(coreCacheKey));
}
static class Key {
final Object readerKey;
@Nullable
Listener listener; // optional stats listener
long sizeInBytes = -1; // optional size in bytes (we keep it here in case the values are soft references)
Key(Object readerKey) {
this.readerKey = readerKey;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
Key key = (Key) o;
if (!readerKey.equals(key.readerKey)) return false;
return true;
}
@Override
public int hashCode() {
return readerKey.hashCode();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_IndexFieldDataCache.java
|
434 |
map.addChangeListener(new OMultiValueChangeListener<Object, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<Object, String> event) {
changed.value = true;
}
});
| 0true
|
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedMapTest.java
|
743 |
public class ListIndexOfRequest extends CollectionRequest {
Data value;
boolean last;
public ListIndexOfRequest() {
}
public ListIndexOfRequest(String name, Data value, boolean last) {
super(name);
this.value = value;
this.last = last;
}
@Override
protected Operation prepareOperation() {
return new ListIndexOfOperation(name, last, value);
}
@Override
public int getClassId() {
return CollectionPortableHook.LIST_INDEX_OF;
}
public void write(PortableWriter writer) throws IOException {
super.write(writer);
writer.writeBoolean("l", last);
value.writeData(writer.getRawDataOutput());
}
public void read(PortableReader reader) throws IOException {
super.read(reader);
last = reader.readBoolean("l");
value = new Data();
value.readData(reader.getRawDataInput());
}
@Override
public String getRequiredAction() {
return ActionConstants.ACTION_READ;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_client_ListIndexOfRequest.java
|
1,305 |
public interface ClusterStateListener {
/**
* Called when cluster state changes.
*/
void clusterChanged(ClusterChangedEvent event);
}
| 0true
|
src_main_java_org_elasticsearch_cluster_ClusterStateListener.java
|
137 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_SC")
@EntityListeners(value = { AdminAuditableListener.class })
@AdminPresentationOverrides(
{
@AdminPresentationOverride(name = "auditable.createdBy.id", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.updatedBy.id", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.createdBy.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.updatedBy.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.dateCreated", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.dateUpdated", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "structuredContentType.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "structuredContentType.structuredContentFieldTemplate.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL))
}
)
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "StructuredContentImpl_baseStructuredContent")
public class StructuredContentImpl implements StructuredContent {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "StructuredContentId")
@GenericGenerator(
name="StructuredContentId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="StructuredContentImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.structure.domain.StructuredContentImpl")
}
)
@Column(name = "SC_ID")
protected Long id;
@Embedded
@AdminPresentation(excluded = true)
protected AdminAuditable auditable = new AdminAuditable();
@AdminPresentation(friendlyName = "StructuredContentImpl_Content_Name", order = 1,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description,
prominent = true, gridOrder = 1)
@Column(name = "CONTENT_NAME", nullable = false)
@Index(name="CONTENT_NAME_INDEX", columnNames={"CONTENT_NAME", "ARCHIVED_FLAG", "SC_TYPE_ID"})
protected String contentName;
@ManyToOne(targetEntity = LocaleImpl.class, optional = false)
@JoinColumn(name = "LOCALE_CODE")
@AdminPresentation(friendlyName = "StructuredContentImpl_Locale", order = 2,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description,
prominent = true, gridOrder = 2)
@AdminPresentationToOneLookup(lookupDisplayProperty = "friendlyName", lookupType = LookupType.DROPDOWN)
protected Locale locale;
@Column(name = "PRIORITY", nullable = false)
@AdminPresentation(friendlyName = "StructuredContentImpl_Priority", order = 3,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description)
@Index(name="CONTENT_PRIORITY_INDEX", columnNames={"PRIORITY"})
protected Integer priority;
@ManyToMany(targetEntity = StructuredContentRuleImpl.class, cascade = {CascadeType.ALL})
@JoinTable(name = "BLC_SC_RULE_MAP", inverseJoinColumns = @JoinColumn(name = "SC_RULE_ID", referencedColumnName = "SC_RULE_ID"))
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@MapKeyColumn(name = "MAP_KEY", nullable = false)
@AdminPresentationMapFields(
mapDisplayFields = {
@AdminPresentationMapField(
fieldName = RuleIdentifier.CUSTOMER_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 1,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.CUSTOMER, friendlyName = "Generic_Customer_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.TIME_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 2,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.TIME, friendlyName = "Generic_Time_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.REQUEST_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 3,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.REQUEST, friendlyName = "Generic_Request_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.PRODUCT_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 4,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.PRODUCT, friendlyName = "Generic_Product_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.ORDER_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 5,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.ORDER, friendlyName = "Generic_Order_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.CATEGORY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 6,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.CATEGORY, friendlyName = "Generic_Category_Rule")
)
}
)
Map<String, StructuredContentRule> structuredContentMatchRules = new HashMap<String, StructuredContentRule>();
@OneToMany(fetch = FetchType.LAZY, targetEntity = StructuredContentItemCriteriaImpl.class, cascade={CascadeType.ALL})
@JoinTable(name = "BLC_QUAL_CRIT_SC_XREF", joinColumns = @JoinColumn(name = "SC_ID"), inverseJoinColumns = @JoinColumn(name = "SC_ITEM_CRITERIA_ID"))
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@AdminPresentation(friendlyName = "Generic_Item_Rule", order = 5,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
fieldType = SupportedFieldType.RULE_WITH_QUANTITY,
ruleIdentifier = RuleIdentifier.ORDERITEM)
protected Set<StructuredContentItemCriteria> qualifyingItemCriteria = new HashSet<StructuredContentItemCriteria>();
@Column(name = "ORIG_ITEM_ID")
@Index(name="SC_ORIG_ITEM_ID_INDEX", columnNames={"ORIG_ITEM_ID"})
@AdminPresentation(friendlyName = "StructuredContentImpl_Original_Item_Id", order = 1,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
visibility = VisibilityEnum.HIDDEN_ALL)
protected Long originalItemId;
@ManyToOne (targetEntity = SandBoxImpl.class)
@JoinColumn(name="SANDBOX_ID")
@AdminPresentation(friendlyName = "StructuredContentImpl_Content_SandBox", order = 1,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
excluded = true)
protected SandBox sandbox;
@ManyToOne(targetEntity = SandBoxImpl.class)
@JoinColumn(name = "ORIG_SANDBOX_ID")
@AdminPresentation(excluded = true)
protected SandBox originalSandBox;
@ManyToOne(targetEntity = StructuredContentTypeImpl.class)
@JoinColumn(name="SC_TYPE_ID")
@AdminPresentation(friendlyName = "StructuredContentImpl_Content_Type", order = 2, prominent = true,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description,
requiredOverride = RequiredOverride.REQUIRED)
@AdminPresentationToOneLookup(lookupDisplayProperty = "name", forcePopulateChildProperties = true)
protected StructuredContentType structuredContentType;
@ManyToMany(targetEntity = StructuredContentFieldImpl.class, cascade = CascadeType.ALL)
@JoinTable(name = "BLC_SC_FLD_MAP", joinColumns = @JoinColumn(name = "SC_ID", referencedColumnName = "SC_ID"), inverseJoinColumns = @JoinColumn(name = "SC_FLD_ID", referencedColumnName = "SC_FLD_ID"))
@MapKeyColumn(name = "MAP_KEY")
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@BatchSize(size = 20)
protected Map<String,StructuredContentField> structuredContentFields = new HashMap<String,StructuredContentField>();
@Column(name = "DELETED_FLAG")
@Index(name="SC_DLTD_FLG_INDX", columnNames={"DELETED_FLAG"})
@AdminPresentation(friendlyName = "StructuredContentImpl_Deleted", order = 2,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
visibility = VisibilityEnum.HIDDEN_ALL)
protected Boolean deletedFlag = false;
@Column(name = "ARCHIVED_FLAG")
@Index(name="SC_ARCHVD_FLG_INDX", columnNames={"ARCHIVED_FLAG"})
@AdminPresentation(friendlyName = "StructuredContentImpl_Archived", order = 3,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
visibility = VisibilityEnum.HIDDEN_ALL)
protected Boolean archivedFlag = false;
@AdminPresentation(friendlyName = "StructuredContentImpl_Offline", order = 4,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description)
@Column(name = "OFFLINE_FLAG")
@Index(name="SC_OFFLN_FLG_INDX", columnNames={"OFFLINE_FLAG"})
protected Boolean offlineFlag = false;
@Column (name = "LOCKED_FLAG")
@AdminPresentation(friendlyName = "StructuredContentImpl_Is_Locked",
visibility = VisibilityEnum.HIDDEN_ALL)
@Index(name="SC_LCKD_FLG_INDX", columnNames={"LOCKED_FLAG"})
protected Boolean lockedFlag = false;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getContentName() {
return contentName;
}
@Override
public void setContentName(String contentName) {
this.contentName = contentName;
}
@Override
public Locale getLocale() {
return locale;
}
@Override
public void setLocale(Locale locale) {
this.locale = locale;
}
@Override
public SandBox getSandbox() {
return sandbox;
}
@Override
public void setSandbox(SandBox sandbox) {
this.sandbox = sandbox;
}
@Override
public StructuredContentType getStructuredContentType() {
return structuredContentType;
}
@Override
public void setStructuredContentType(StructuredContentType structuredContentType) {
this.structuredContentType = structuredContentType;
}
@Override
public Map<String, StructuredContentField> getStructuredContentFields() {
return structuredContentFields;
}
@Override
public void setStructuredContentFields(Map<String, StructuredContentField> structuredContentFields) {
this.structuredContentFields = structuredContentFields;
}
@Override
public Boolean getDeletedFlag() {
if (deletedFlag == null) {
return Boolean.FALSE;
} else {
return deletedFlag;
}
}
@Override
public void setDeletedFlag(Boolean deletedFlag) {
this.deletedFlag = deletedFlag;
}
@Override
public Boolean getOfflineFlag() {
if (offlineFlag == null) {
return Boolean.FALSE;
} else {
return offlineFlag;
}
}
@Override
public void setOfflineFlag(Boolean offlineFlag) {
this.offlineFlag = offlineFlag;
}
@Override
public Integer getPriority() {
return priority;
}
@Override
public void setPriority(Integer priority) {
this.priority = priority;
}
@Override
public Long getOriginalItemId() {
return originalItemId;
}
@Override
public void setOriginalItemId(Long originalItemId) {
this.originalItemId = originalItemId;
}
@Override
public Boolean getArchivedFlag() {
if (archivedFlag == null) {
return Boolean.FALSE;
} else {
return archivedFlag;
}
}
@Override
public void setArchivedFlag(Boolean archivedFlag) {
this.archivedFlag = archivedFlag;
}
@Override
public AdminAuditable getAuditable() {
return auditable;
}
@Override
public void setAuditable(AdminAuditable auditable) {
this.auditable = auditable;
}
@Override
public Boolean getLockedFlag() {
if (lockedFlag == null) {
return Boolean.FALSE;
} else {
return lockedFlag;
}
}
@Override
public void setLockedFlag(Boolean lockedFlag) {
this.lockedFlag = lockedFlag;
}
@Override
public SandBox getOriginalSandBox() {
return originalSandBox;
}
@Override
public void setOriginalSandBox(SandBox originalSandBox) {
this.originalSandBox = originalSandBox;
}
@Override
public Map<String, StructuredContentRule> getStructuredContentMatchRules() {
return structuredContentMatchRules;
}
@Override
public void setStructuredContentMatchRules(Map<String, StructuredContentRule> structuredContentMatchRules) {
this.structuredContentMatchRules = structuredContentMatchRules;
}
@Override
public Set<StructuredContentItemCriteria> getQualifyingItemCriteria() {
return qualifyingItemCriteria;
}
@Override
public void setQualifyingItemCriteria(Set<StructuredContentItemCriteria> qualifyingItemCriteria) {
this.qualifyingItemCriteria = qualifyingItemCriteria;
}
public String getMainEntityName() {
return getContentName();
}
@Override
public StructuredContent cloneEntity() {
StructuredContentImpl newContent = new StructuredContentImpl();
newContent.archivedFlag = archivedFlag;
newContent.contentName = contentName;
newContent.deletedFlag = deletedFlag;
newContent.locale = locale;
newContent.offlineFlag = offlineFlag;
newContent.originalItemId = originalItemId;
newContent.priority = priority;
newContent.structuredContentType = structuredContentType;
Map<String, StructuredContentRule> ruleMap = newContent.getStructuredContentMatchRules();
for (String key : structuredContentMatchRules.keySet()) {
StructuredContentRule newField = structuredContentMatchRules.get(key).cloneEntity();
ruleMap.put(key, newField);
}
Set<StructuredContentItemCriteria> criteriaList = newContent.getQualifyingItemCriteria();
for (StructuredContentItemCriteria structuredContentItemCriteria : qualifyingItemCriteria) {
StructuredContentItemCriteria newField = structuredContentItemCriteria.cloneEntity();
criteriaList.add(newField);
}
Map<String, StructuredContentField> fieldMap = newContent.getStructuredContentFields();
for (StructuredContentField field : structuredContentFields.values()) {
StructuredContentField newField = field.cloneEntity();
fieldMap.put(newField.getFieldKey(), newField);
}
return newContent;
}
public static class Presentation {
public static class Tab {
public static class Name {
public static final String Rules = "StructuredContentImpl_Rules_Tab";
}
public static class Order {
public static final int Rules = 1000;
}
}
public static class Group {
public static class Name {
public static final String Description = "StructuredContentImpl_Description";
public static final String Internal = "StructuredContentImpl_Internal";
public static final String Rules = "StructuredContentImpl_Rules";
}
public static class Order {
public static final int Description = 1000;
public static final int Internal = 2000;
public static final int Rules = 1000;
}
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentImpl.java
|
585 |
getEntriesBetween(iRangeFrom, iRangeTo, iInclusive, new IndexEntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
result.add(entry);
return true;
}
});
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexAbstract.java
|
99 |
public interface Page extends Serializable {
public Long getId();
public void setId(Long id);
public String getFullUrl();
public void setFullUrl(String fullUrl);
public String getDescription();
public void setDescription(String description);
public PageTemplate getPageTemplate();
public void setPageTemplate(PageTemplate pageTemplate);
public Map<String, PageField> getPageFields();
public void setPageFields(Map<String, PageField> pageFields);
public Boolean getDeletedFlag();
public void setDeletedFlag(Boolean deletedFlag);
public Boolean getArchivedFlag();
public void setArchivedFlag(Boolean archivedFlag);
public SandBox getSandbox();
public void setSandbox(SandBox sandbox);
public Boolean getLockedFlag();
public void setLockedFlag(Boolean lockedFlag);
public Long getOriginalPageId();
public void setOriginalPageId(Long originalPageId);
public SandBox getOriginalSandBox();
public void setOriginalSandBox(SandBox originalSandBox);
public AdminAuditable getAuditable();
public void setAuditable(AdminAuditable auditable);
/**
* Returns the offlineFlag. True indicates that the page should no longer appear on the site.
* The item will still appear within the content administration program but no longer
* be returned as part of the client facing APIs.
*
* @return true if this item is offline
*/
@Nullable
public Boolean getOfflineFlag();
/**
* Sets the offline flag.
*
* @param offlineFlag
*/
public void setOfflineFlag(@Nullable Boolean offlineFlag);
/**
* Gets the integer priority of this content item. Items with a lower priority should
* be displayed before items with a higher priority.
*
* @return the priority as a numeric value
*/
@Nullable
public Integer getPriority();
/**
* Sets the display priority of this item. Lower priorities should be displayed first.
*
* @param priority
*/
public void setPriority(@Nullable Integer priority);
/**
* Returns a map of the targeting rules associated with this page.
*
* Targeting rules are defined in the content mangagement system and used to
* enforce which page is returned to the client.
*
* @return
*/
@Nullable
public Map<String, PageRule> getPageMatchRules();
/**
* Sets the targeting rules for this content item.
*
* @param pageRules
*/
public void setPageMatchRules(@Nullable Map<String, PageRule> pageRules);
/**
* Returns the item (or cart) based rules associated with this content item.
*
* @return
*/
@Nullable
public Set<PageItemCriteria> getQualifyingItemCriteria();
/**
* Sets the item (e.g. cart) based rules associated with this content item.
*
* @param qualifyingItemCriteria
*/
public void setQualifyingItemCriteria(@Nullable Set<PageItemCriteria> qualifyingItemCriteria);
public Page cloneEntity();
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_Page.java
|
474 |
public abstract class BaseAliasesRequestBuilder<Response extends ActionResponse, Builder extends BaseAliasesRequestBuilder<Response, Builder>> extends MasterNodeReadOperationRequestBuilder<GetAliasesRequest, Response, Builder> {
public BaseAliasesRequestBuilder(IndicesAdminClient client, String... aliases) {
super((InternalIndicesAdminClient) client, new GetAliasesRequest(aliases));
}
@SuppressWarnings("unchecked")
public Builder setAliases(String... aliases) {
request.aliases(aliases);
return (Builder) this;
}
@SuppressWarnings("unchecked")
public Builder addAliases(String... aliases) {
request.aliases(ObjectArrays.concat(request.aliases(), aliases, String.class));
return (Builder) this;
}
@SuppressWarnings("unchecked")
public Builder setIndices(String... indices) {
request.indices(indices);
return (Builder) this;
}
@SuppressWarnings("unchecked")
public Builder addIndices(String... indices) {
request.indices(ObjectArrays.concat(request.indices(), indices, String.class));
return (Builder) this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
@SuppressWarnings("unchecked")
public Builder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return (Builder) this;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_alias_get_BaseAliasesRequestBuilder.java
|
1,510 |
public class LinkMapReduce {
// public static final String DIRECTION = Tokens.makeNamespace(LinkMapReduce.class) + ".direction";
// public static final String LABEL = Tokens.makeNamespace(LinkMapReduce.class) + ".label";
// public static final String STEP = Tokens.makeNamespace(LinkMapReduce.class) + ".step";
// public static final String MERGE_DUPLICATES = Tokens.makeNamespace(LinkMapReduce.class) + ".mergeDuplicates";
// public static final String MERGE_WEIGHT_KEY = Tokens.makeNamespace(LinkMapReduce.class) + ".mergeWeightKey";
public static final String NO_WEIGHT_KEY = "_";
public enum Counters {
IN_EDGES_CREATED,
OUT_EDGES_CREATED
}
public static org.apache.hadoop.conf.Configuration createConfiguration(final Direction direction, final String label, final int step, final String mergeWeightKey) {
ModifiableHadoopConfiguration c = ModifiableHadoopConfiguration.withoutResources();
c.set(LINK_STEP, step);
c.set(LINK_DIRECTION, direction);
c.set(LINK_LABEL, label);
if (null == mergeWeightKey) {
c.set(LINK_MERGE_DUPLICATES, false);
c.set(LINK_MERGE_WEIGHT_KEY, NO_WEIGHT_KEY);
} else {
c.set(LINK_MERGE_DUPLICATES, true);
c.set(LINK_MERGE_WEIGHT_KEY, mergeWeightKey);
}
c.set(PIPELINE_TRACK_PATHS, true);
return c.getHadoopConfiguration();
}
public static class Map extends Mapper<NullWritable, FaunusVertex, LongWritable, Holder> {
private Direction direction;
private String label;
private int step;
private final Holder<FaunusPathElement> holder = new Holder<FaunusPathElement>();
private final LongWritable longWritable = new LongWritable();
private boolean mergeDuplicates;
private String mergeWeightKey;
private Configuration faunusConf;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
if (!faunusConf.get(PIPELINE_TRACK_PATHS))
throw new IllegalStateException(LinkMapReduce.class.getSimpleName() + " requires that paths be enabled");
step = faunusConf.get(LINK_STEP);
direction = faunusConf.get(LINK_DIRECTION);
label = faunusConf.get(LINK_LABEL);
mergeDuplicates = faunusConf.get(LINK_MERGE_DUPLICATES);
mergeWeightKey = faunusConf.get(LINK_MERGE_WEIGHT_KEY);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, LongWritable, Holder>.Context context) throws IOException, InterruptedException {
final long valueId = value.getLongId();
if (value.hasPaths()) {
long edgesCreated = 0;
if (mergeDuplicates) {
final CounterMap<Long> map = new CounterMap<Long>();
for (final List<FaunusPathElement.MicroElement> path : value.getPaths()) {
map.incr(path.get(step).getId(), 1);
}
for (java.util.Map.Entry<Long, Long> entry : map.entrySet()) {
final long linkElementId = entry.getKey();
final StandardFaunusEdge edge;
if (direction.equals(IN))
edge = new StandardFaunusEdge(faunusConf, linkElementId, valueId, label);
else
edge = new StandardFaunusEdge(faunusConf, valueId, linkElementId, label);
if (!mergeWeightKey.equals(NO_WEIGHT_KEY))
edge.setProperty(mergeWeightKey, entry.getValue());
value.addEdge(direction, edge);
edgesCreated++;
longWritable.set(linkElementId);
context.write(longWritable, holder.set('e', edge));
}
} else {
for (final List<FaunusPathElement.MicroElement> path : value.getPaths()) {
final long linkElementId = path.get(step).getId();
final StandardFaunusEdge edge;
if (direction.equals(IN))
edge = new StandardFaunusEdge(faunusConf, linkElementId, valueId, label);
else
edge = new StandardFaunusEdge(faunusConf, valueId, linkElementId, label);
value.addEdge(direction, edge);
edgesCreated++;
longWritable.set(linkElementId);
context.write(longWritable, holder.set('e', edge));
}
}
if (direction.equals(OUT)) {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_CREATED, edgesCreated);
} else {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_CREATED, edgesCreated);
}
}
longWritable.set(valueId);
context.write(longWritable, holder.set('v', value));
}
}
public static class Combiner extends Reducer<LongWritable, Holder, LongWritable, Holder> {
private Direction direction;
private Configuration faunusConf;
private static final Logger log =
LoggerFactory.getLogger(Combiner.class);
@Override
public void setup(final Reducer.Context context) throws IOException, InterruptedException {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
if (!faunusConf.has(LINK_DIRECTION)) {
Iterator<Entry<String, String>> it = context.getConfiguration().iterator();
log.error("Broken configuration missing {}", LINK_DIRECTION);
log.error("---- Start config dump ----");
while (it.hasNext()) {
Entry<String,String> ent = it.next();
log.error("k:{} -> v:{}", ent.getKey(), ent.getValue());
}
log.error("---- End config dump ----");
throw new NullPointerException();
}
direction = faunusConf.get(LINK_DIRECTION).opposite();
}
private final Holder<FaunusVertex> holder = new Holder<FaunusVertex>();
@Override
public void reduce(final LongWritable key, final Iterable<Holder> values, final Reducer<LongWritable, Holder, LongWritable, Holder>.Context context) throws IOException, InterruptedException {
long edgesCreated = 0;
final FaunusVertex vertex = new FaunusVertex(faunusConf, key.get());
char outTag = 'x';
for (final Holder holder : values) {
final char tag = holder.getTag();
if (tag == 'v') {
vertex.addAll((FaunusVertex) holder.get());
outTag = 'v';
} else if (tag == 'e') {
vertex.addEdge(direction, (StandardFaunusEdge) holder.get());
edgesCreated++;
} else {
vertex.addEdges(Direction.BOTH, (FaunusVertex) holder.get());
}
}
context.write(key, holder.set(outTag, vertex));
if (direction.equals(OUT)) {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_CREATED, edgesCreated);
} else {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_CREATED, edgesCreated);
}
}
}
public static class Reduce extends Reducer<LongWritable, Holder, NullWritable, FaunusVertex> {
private Direction direction;
private Configuration faunusConf;
@Override
public void setup(final Reducer.Context context) throws IOException, InterruptedException {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
direction = faunusConf.get(LINK_DIRECTION).opposite();
}
@Override
public void reduce(final LongWritable key, final Iterable<Holder> values, final Reducer<LongWritable, Holder, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
long edgesCreated = 0;
final FaunusVertex vertex = new FaunusVertex(faunusConf, key.get());
for (final Holder holder : values) {
final char tag = holder.getTag();
if (tag == 'v') {
vertex.addAll((FaunusVertex) holder.get());
} else if (tag == 'e') {
vertex.addEdge(direction, (StandardFaunusEdge) holder.get());
edgesCreated++;
} else {
vertex.addEdges(Direction.BOTH, (FaunusVertex) holder.get());
}
}
context.write(NullWritable.get(), vertex);
if (direction.equals(OUT)) {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_CREATED, edgesCreated);
} else {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_CREATED, edgesCreated);
}
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_LinkMapReduce.java
|
1,494 |
public class CommitEdgesMap {
public static final String ACTION = Tokens.makeNamespace(CommitEdgesMap.class) + ".action";
public enum Counters {
OUT_EDGES_DROPPED,
OUT_EDGES_KEPT,
IN_EDGES_DROPPED,
IN_EDGES_KEPT
}
public static Configuration createConfiguration(final Tokens.Action action) {
final Configuration configuration = new EmptyConfiguration();
configuration.set(ACTION, action.name());
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean drop;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.drop = Tokens.Action.valueOf(context.getConfiguration().get(ACTION)).equals(Tokens.Action.DROP);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
Iterator<Edge> itty = value.getEdges(Direction.IN).iterator();
long edgesKept = 0;
long edgesDropped = 0;
while (itty.hasNext()) {
if (this.drop) {
if ((((StandardFaunusEdge) itty.next()).hasPaths())) {
itty.remove();
edgesDropped++;
} else
edgesKept++;
} else {
if (!(((StandardFaunusEdge) itty.next()).hasPaths())) {
itty.remove();
edgesDropped++;
} else
edgesKept++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_DROPPED, edgesDropped);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_KEPT, edgesKept);
///////////////////
itty = value.getEdges(Direction.OUT).iterator();
edgesKept = 0;
edgesDropped = 0;
while (itty.hasNext()) {
if (this.drop) {
if ((((StandardFaunusEdge) itty.next()).hasPaths())) {
itty.remove();
edgesDropped++;
} else
edgesKept++;
} else {
if (!(((StandardFaunusEdge) itty.next()).hasPaths())) {
itty.remove();
edgesDropped++;
} else
edgesKept++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_DROPPED, edgesDropped);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_KEPT, edgesKept);
context.write(NullWritable.get(), value);
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_CommitEdgesMap.java
|
1,539 |
public class ResourceAdapterImpl implements ResourceAdapter, Serializable {
/**
* Identity generator
*/
private static final AtomicInteger ID_GEN = new AtomicInteger();
private static final long serialVersionUID = -1727994229521767306L;
/**
* The hazelcast instance itself
*/
private HazelcastInstance hazelcast;
/**
* The configured hazelcast configuration location
*/
private String configurationLocation;
/**
* Identity
*/
private final transient int id;
public ResourceAdapterImpl() {
id = ID_GEN.incrementAndGet();
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter
* #endpointActivation(javax.resource.spi.endpoint.MessageEndpointFactory, javax.resource.spi.ActivationSpec)
*/
public void endpointActivation(MessageEndpointFactory endpointFactory, ActivationSpec spec)
throws ResourceException {
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter
* #endpointDeactivation(javax.resource.spi.endpoint.MessageEndpointFactory, javax.resource.spi.ActivationSpec)
*/
public void endpointDeactivation(MessageEndpointFactory endpointFactory, ActivationSpec spec) {
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter
* #getXAResources(javax.resource.spi.ActivationSpec[])
*/
public XAResource[] getXAResources(ActivationSpec[] specs) throws ResourceException {
//JBoss is fine with null, weblogic requires an empty array
return new XAResource[0];
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter#start(javax.resource.spi.BootstrapContext)
*/
public void start(BootstrapContext ctx) throws ResourceAdapterInternalException {
// Gets/creates the hazelcast instance
ConfigBuilder config = buildConfiguration();
setHazelcast(Hazelcast.newHazelcastInstance(config.build()));
;
}
/**
* Creates a hazelcast configuration based on the {@link #getConfigLocation()}
*
* @return the created hazelcast configuration
* @throws ResourceAdapterInternalException If there was a problem with the configuration creation
*/
private ConfigBuilder buildConfiguration()
throws ResourceAdapterInternalException {
XmlConfigBuilder config;
if (configurationLocation == null || configurationLocation.length() == 0) {
config = new XmlConfigBuilder();
} else {
try {
config = new XmlConfigBuilder(configurationLocation);
} catch (FileNotFoundException e) {
throw new ResourceAdapterInternalException(e.getMessage(), e);
}
}
return config;
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter#stop()
*/
public void stop() {
if (getHazelcast() != null) {
getHazelcast().getLifecycleService().shutdown();
}
}
/**
* Sets the underlying hazelcast instance
*/
private void setHazelcast(HazelcastInstance hazelcast) {
this.hazelcast = hazelcast;
}
/**
* Provides access to the underlying hazelcast instance
*/
HazelcastInstance getHazelcast() {
return hazelcast;
}
/**
* Called by the container
*
* @param configLocation Hazelcast's configuration location
*/
public void setConfigLocation(String configLocation) {
this.configurationLocation = configLocation;
}
/**
* @return The configured hazelcast configuration location via RAR deployment descriptor
*/
public String getConfigLocation() {
return configurationLocation;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + id;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ResourceAdapterImpl other = (ResourceAdapterImpl) obj;
if (id != other.id) {
return false;
}
return true;
}
}
| 1no label
|
hazelcast-ra_hazelcast-jca_src_main_java_com_hazelcast_jca_ResourceAdapterImpl.java
|
5,852 |
public abstract class SearchContext implements Releasable {
private static ThreadLocal<SearchContext> current = new ThreadLocal<SearchContext>();
public static void setCurrent(SearchContext value) {
current.set(value);
QueryParseContext.setTypes(value.types());
}
public static void removeCurrent() {
current.remove();
QueryParseContext.removeTypes();
}
public static SearchContext current() {
return current.get();
}
public abstract boolean clearAndRelease();
/**
* Should be called before executing the main query and after all other parameters have been set.
*/
public abstract void preProcess();
public abstract Filter searchFilter(String[] types);
public abstract long id();
public abstract String source();
public abstract ShardSearchRequest request();
public abstract SearchType searchType();
public abstract SearchContext searchType(SearchType searchType);
public abstract SearchShardTarget shardTarget();
public abstract int numberOfShards();
public abstract boolean hasTypes();
public abstract String[] types();
public abstract float queryBoost();
public abstract SearchContext queryBoost(float queryBoost);
public abstract long nowInMillis();
public abstract Scroll scroll();
public abstract SearchContext scroll(Scroll scroll);
public abstract SearchContextAggregations aggregations();
public abstract SearchContext aggregations(SearchContextAggregations aggregations);
public abstract SearchContextFacets facets();
public abstract SearchContext facets(SearchContextFacets facets);
public abstract SearchContextHighlight highlight();
public abstract void highlight(SearchContextHighlight highlight);
public abstract SuggestionSearchContext suggest();
public abstract void suggest(SuggestionSearchContext suggest);
/**
* @return list of all rescore contexts. empty if there aren't any.
*/
public abstract List<RescoreSearchContext> rescore();
public abstract void addRescore(RescoreSearchContext rescore);
public abstract boolean hasFieldDataFields();
public abstract FieldDataFieldsContext fieldDataFields();
public abstract boolean hasScriptFields();
public abstract ScriptFieldsContext scriptFields();
public abstract boolean hasPartialFields();
public abstract PartialFieldsContext partialFields();
/**
* A shortcut function to see whether there is a fetchSourceContext and it says the source is requested.
*
* @return
*/
public abstract boolean sourceRequested();
public abstract boolean hasFetchSourceContext();
public abstract FetchSourceContext fetchSourceContext();
public abstract SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext);
public abstract ContextIndexSearcher searcher();
public abstract IndexShard indexShard();
public abstract MapperService mapperService();
public abstract AnalysisService analysisService();
public abstract IndexQueryParserService queryParserService();
public abstract SimilarityService similarityService();
public abstract ScriptService scriptService();
public abstract CacheRecycler cacheRecycler();
public abstract PageCacheRecycler pageCacheRecycler();
public abstract FilterCache filterCache();
public abstract DocSetCache docSetCache();
public abstract IndexFieldDataService fieldData();
public abstract IdCache idCache();
public abstract long timeoutInMillis();
public abstract void timeoutInMillis(long timeoutInMillis);
public abstract SearchContext minimumScore(float minimumScore);
public abstract Float minimumScore();
public abstract SearchContext sort(Sort sort);
public abstract Sort sort();
public abstract SearchContext trackScores(boolean trackScores);
public abstract boolean trackScores();
public abstract SearchContext parsedPostFilter(ParsedFilter postFilter);
public abstract ParsedFilter parsedPostFilter();
public abstract Filter aliasFilter();
public abstract SearchContext parsedQuery(ParsedQuery query);
public abstract ParsedQuery parsedQuery();
/**
* The query to execute, might be rewritten.
*/
public abstract Query query();
/**
* Has the query been rewritten already?
*/
public abstract boolean queryRewritten();
/**
* Rewrites the query and updates it. Only happens once.
*/
public abstract SearchContext updateRewriteQuery(Query rewriteQuery);
public abstract int from();
public abstract SearchContext from(int from);
public abstract int size();
public abstract SearchContext size(int size);
public abstract boolean hasFieldNames();
public abstract List<String> fieldNames();
public abstract void emptyFieldNames();
public abstract boolean explain();
public abstract void explain(boolean explain);
@Nullable
public abstract List<String> groupStats();
public abstract void groupStats(List<String> groupStats);
public abstract boolean version();
public abstract void version(boolean version);
public abstract int[] docIdsToLoad();
public abstract int docIdsToLoadFrom();
public abstract int docIdsToLoadSize();
public abstract SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize);
public abstract void accessed(long accessTime);
public abstract long lastAccessTime();
public abstract long keepAlive();
public abstract void keepAlive(long keepAlive);
public abstract SearchLookup lookup();
public abstract DfsSearchResult dfsResult();
public abstract QuerySearchResult queryResult();
public abstract FetchSearchResult fetchResult();
public abstract void addReleasable(Releasable releasable);
public abstract void clearReleasables();
public abstract ScanContext scanContext();
public abstract MapperService.SmartNameFieldMappers smartFieldMappers(String name);
public abstract FieldMappers smartNameFieldMappers(String name);
public abstract FieldMapper smartNameFieldMapper(String name);
public abstract MapperService.SmartNameObjectMapper smartNameObjectMapper(String name);
}
| 1no label
|
src_main_java_org_elasticsearch_search_internal_SearchContext.java
|
110 |
public class OUtils {
public static boolean equals(final Object a, final Object b) {
if (a == b)
return true;
if (a != null)
return a.equals(b);
return b.equals(a);
}
public static String camelCase(final String iText) {
return Character.toUpperCase(iText.charAt(0)) + iText.substring(1);
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_io_OUtils.java
|
658 |
constructors[LIST_ADD_ALL] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new ListAddAllOperation();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
|
299 |
public class OTraverseMultiValueDepthFirstProcess extends OTraverseAbstractProcess<Iterator<Object>> {
protected Object value;
protected int index = -1;
public OTraverseMultiValueDepthFirstProcess(final OTraverse iCommand, final Iterator<Object> iTarget) {
super(iCommand, iTarget);
}
public OIdentifiable process() {
while (target.hasNext()) {
value = target.next();
index++;
if (value instanceof OIdentifiable) {
final ORecord<?> rec = ((OIdentifiable) value).getRecord();
if (rec instanceof ODocument) {
final OTraverseRecordProcess subProcess = new OTraverseRecordProcess(command, (ODocument) rec);
final OIdentifiable subValue = subProcess.process();
if (subValue != null)
return subValue;
}
}
}
return drop();
}
@Override
public String getStatus() {
return toString();
}
@Override
public String toString() {
return "[idx:" + index + "]";
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_traverse_OTraverseMultiValueDepthFirstProcess.java
|
526 |
public static class CBAuthorisation implements Serializable {
private int amount;
public void setAmount(int amount) {
this.amount = amount;
}
public int getAmount() {
return amount;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_txn_ClientTxnMapTest.java
|
1,624 |
public class TimedMemberStateFactory {
private final HazelcastInstanceImpl instance;
private final int maxVisibleInstanceCount;
public TimedMemberStateFactory(HazelcastInstanceImpl instance) {
this.instance = instance;
maxVisibleInstanceCount = instance.node.groupProperties.MC_MAX_INSTANCE_COUNT.getInteger();
}
public TimedMemberState createTimedMemberState() {
MemberStateImpl memberState = new MemberStateImpl();
createMemberState(memberState);
GroupConfig groupConfig = instance.getConfig().getGroupConfig();
TimedMemberState timedMemberState = new TimedMemberState();
timedMemberState.setMaster(instance.node.isMaster());
if (timedMemberState.getMaster()) {
timedMemberState.setMemberList(new ArrayList<String>());
Set<Member> memberSet = instance.getCluster().getMembers();
for (Member member : memberSet) {
MemberImpl memberImpl = (MemberImpl) member;
Address address = memberImpl.getAddress();
timedMemberState.getMemberList().add(address.getHost() + ":" + address.getPort());
}
}
timedMemberState.setMemberState(memberState);
timedMemberState.setClusterName(groupConfig.getName());
timedMemberState.setInstanceNames(getLongInstanceNames());
return timedMemberState;
}
private void createMemberState(MemberStateImpl memberState) {
final Node node = instance.node;
memberState.setAddress(node.getThisAddress());
PartitionService partitionService = instance.getPartitionService();
Set<Partition> partitions = partitionService.getPartitions();
memberState.clearPartitions();
for (Partition partition : partitions) {
if (partition.getOwner() != null && partition.getOwner().localMember()) {
memberState.addPartition(partition.getPartitionId());
}
}
Collection<DistributedObject> proxyObjects = new ArrayList<DistributedObject>(instance.getDistributedObjects());
createRuntimeProps(memberState);
createMemState(memberState, proxyObjects);
}
private void createRuntimeProps(MemberStateImpl memberState) {
Runtime runtime = Runtime.getRuntime();
ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
ClassLoadingMXBean clMxBean = ManagementFactory.getClassLoadingMXBean();
MemoryMXBean memoryMxBean = ManagementFactory.getMemoryMXBean();
MemoryUsage heapMemory = memoryMxBean.getHeapMemoryUsage();
MemoryUsage nonHeapMemory = memoryMxBean.getNonHeapMemoryUsage();
Map<String, Long> map = new HashMap<String, Long>();
map.put("runtime.availableProcessors", Integer.valueOf(runtime.availableProcessors()).longValue());
map.put("date.startTime", runtimeMxBean.getStartTime());
map.put("seconds.upTime", runtimeMxBean.getUptime());
map.put("memory.maxMemory", runtime.maxMemory());
map.put("memory.freeMemory", runtime.freeMemory());
map.put("memory.totalMemory", runtime.totalMemory());
map.put("memory.heapMemoryMax", heapMemory.getMax());
map.put("memory.heapMemoryUsed", heapMemory.getUsed());
map.put("memory.nonHeapMemoryMax", nonHeapMemory.getMax());
map.put("memory.nonHeapMemoryUsed", nonHeapMemory.getUsed());
map.put("runtime.totalLoadedClassCount", clMxBean.getTotalLoadedClassCount());
map.put("runtime.loadedClassCount", Integer.valueOf(clMxBean.getLoadedClassCount()).longValue());
map.put("runtime.unloadedClassCount", clMxBean.getUnloadedClassCount());
map.put("runtime.totalStartedThreadCount", threadMxBean.getTotalStartedThreadCount());
map.put("runtime.threadCount", Integer.valueOf(threadMxBean.getThreadCount()).longValue());
map.put("runtime.peakThreadCount", Integer.valueOf(threadMxBean.getPeakThreadCount()).longValue());
map.put("runtime.daemonThreadCount", Integer.valueOf(threadMxBean.getDaemonThreadCount()).longValue());
OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean();
map.put("osMemory.freePhysicalMemory", get(osMxBean, "getFreePhysicalMemorySize", 0L));
map.put("osMemory.committedVirtualMemory", get(osMxBean, "getCommittedVirtualMemorySize", 0L));
map.put("osMemory.totalPhysicalMemory", get(osMxBean, "getTotalPhysicalMemorySize", 0L));
map.put("osSwap.freeSwapSpace", get(osMxBean, "getFreeSwapSpaceSize", 0L));
map.put("osSwap.totalSwapSpace", get(osMxBean, "getTotalSwapSpaceSize", 0L));
map.put("os.maxFileDescriptorCount", get(osMxBean, "getMaxFileDescriptorCount", 0L));
map.put("os.openFileDescriptorCount", get(osMxBean, "getOpenFileDescriptorCount", 0L));
map.put("os.processCpuLoad", get(osMxBean, "getProcessCpuLoad", -1L));
map.put("os.systemLoadAverage", get(osMxBean, "getSystemLoadAverage", -1L));
map.put("os.systemCpuLoad", get(osMxBean, "getSystemCpuLoad", -1L));
map.put("os.processCpuTime", get(osMxBean, "getProcessCpuTime", 0L));
map.put("os.availableProcessors", get(osMxBean, "getAvailableProcessors", 0L));
memberState.setRuntimeProps(map);
}
private static Long get(OperatingSystemMXBean mbean, String methodName, Long defaultValue) {
try {
Method method = mbean.getClass().getMethod(methodName);
method.setAccessible(true);
Object value = method.invoke(mbean);
if (value == null) {
return defaultValue;
}
if (value instanceof Integer) {
return (long) (Integer) value;
}
if (value instanceof Double) {
double v = (Double) value;
return Math.round(v * 100);
}
if (value instanceof Long) {
return (Long) value;
}
return defaultValue;
} catch (RuntimeException re) {
throw re;
} catch (Exception e) {
return defaultValue;
}
}
private void createMemState(MemberStateImpl memberState,
Collection<DistributedObject> distributedObjects) {
int count = 0;
final Config config = instance.getConfig();
for (DistributedObject distributedObject : distributedObjects) {
if (count < maxVisibleInstanceCount) {
if (distributedObject instanceof IMap) {
IMap map = (IMap) distributedObject;
if (config.findMapConfig(map.getName()).isStatisticsEnabled()) {
memberState.putLocalMapStats(map.getName(), (LocalMapStatsImpl) map.getLocalMapStats());
count++;
}
} else if (distributedObject instanceof IQueue) {
IQueue queue = (IQueue) distributedObject;
if (config.findQueueConfig(queue.getName()).isStatisticsEnabled()) {
LocalQueueStatsImpl stats = (LocalQueueStatsImpl) queue.getLocalQueueStats();
memberState.putLocalQueueStats(queue.getName(), stats);
count++;
}
} else if (distributedObject instanceof ITopic) {
ITopic topic = (ITopic) distributedObject;
if (config.findTopicConfig(topic.getName()).isStatisticsEnabled()) {
LocalTopicStatsImpl stats = (LocalTopicStatsImpl) topic.getLocalTopicStats();
memberState.putLocalTopicStats(topic.getName(), stats);
count++;
}
} else if (distributedObject instanceof MultiMap) {
MultiMap multiMap = (MultiMap) distributedObject;
if (config.findMultiMapConfig(multiMap.getName()).isStatisticsEnabled()) {
LocalMultiMapStatsImpl stats = (LocalMultiMapStatsImpl) multiMap.getLocalMultiMapStats();
memberState.putLocalMultiMapStats(multiMap.getName(), stats);
count++;
}
} else if (distributedObject instanceof IExecutorService) {
IExecutorService executorService = (IExecutorService) distributedObject;
if (config.findExecutorConfig(executorService.getName()).isStatisticsEnabled()) {
LocalExecutorStatsImpl stats = (LocalExecutorStatsImpl) executorService.getLocalExecutorStats();
memberState.putLocalExecutorStats(executorService.getName(), stats);
count++;
}
}
}
}
}
private Set<String> getLongInstanceNames() {
Set<String> setLongInstanceNames = new HashSet<String>(maxVisibleInstanceCount);
Collection<DistributedObject> proxyObjects = new ArrayList<DistributedObject>(instance.getDistributedObjects());
collectInstanceNames(setLongInstanceNames, proxyObjects);
return setLongInstanceNames;
}
private void collectInstanceNames(Set<String> setLongInstanceNames,
Collection<DistributedObject> distributedObjects) {
int count = 0;
final Config config = instance.getConfig();
for (DistributedObject distributedObject : distributedObjects) {
if (count < maxVisibleInstanceCount) {
if (distributedObject instanceof MultiMap) {
MultiMap multiMap = (MultiMap) distributedObject;
if (config.findMultiMapConfig(multiMap.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("m:" + multiMap.getName());
count++;
}
} else if (distributedObject instanceof IMap) {
IMap map = (IMap) distributedObject;
if (config.findMapConfig(map.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("c:" + map.getName());
count++;
}
} else if (distributedObject instanceof IQueue) {
IQueue queue = (IQueue) distributedObject;
if (config.findQueueConfig(queue.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("q:" + queue.getName());
count++;
}
} else if (distributedObject instanceof ITopic) {
ITopic topic = (ITopic) distributedObject;
if (config.findTopicConfig(topic.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("t:" + topic.getName());
count++;
}
} else if (distributedObject instanceof IExecutorService) {
IExecutorService executorService = (IExecutorService) distributedObject;
if (config.findExecutorConfig(executorService.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("e:" + executorService.getName());
count++;
}
}
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_management_TimedMemberStateFactory.java
|
41 |
public class Stats {
public int waiting_requests;
public int threads;
public int uptime; //seconds
public long cmd_get;
public long cmd_set;
public long cmd_touch;
public long get_hits;
public long get_misses;
public long delete_hits;
public long delete_misses;
public long incr_hits;
public long incr_misses;
public long decr_hits;
public long decr_misses;
public long bytes;
public int curr_connections;
public int total_connections;
// public Stats(int uptime, int threads, long get_misses, long get_hits, long cmd_set, long cmd_get, long bytes) {
// this.uptime = uptime;
// this.threads = threads;
// this.get_misses = get_misses;
// this.get_hits = get_hits;
// this.cmd_set = cmd_set;
// this.cmd_get = cmd_get;
// this.bytes = bytes;
// }
public Stats() {
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_Stats.java
|
574 |
private static final class ODocumentWrapper {
private final ODocument document;
private ODocumentWrapper(ODocument document) {
this.document = document;
}
@Override
public int hashCode() {
int hashCode = document.getIdentity().hashCode();
for (Object field : document.fieldValues())
hashCode = 31 * hashCode + field.hashCode();
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj == null)
return false;
if (obj == document)
return true;
if (obj.getClass() != document.getClass())
return false;
final ODocument anotherDocument = (ODocument) obj;
if (!document.getIdentity().equals(anotherDocument.getIdentity()))
return false;
final String[] filedNames = document.fieldNames();
final String[] anotherFieldNames = anotherDocument.fieldNames();
if (filedNames.length != anotherFieldNames.length)
return false;
for (final String fieldName : filedNames) {
final Object fieldValue = document.field(fieldName);
final Object anotherFieldValue = anotherDocument.field(fieldName);
if (fieldValue == null && anotherFieldValue != null)
return false;
if (fieldValue != null && !fieldValue.equals(anotherFieldValue))
return false;
}
return true;
}
@Override
public String toString() {
return document.toString();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_index_ODocumentFieldsHashSet.java
|
451 |
static final class Fields {
static final XContentBuilderString NODES = new XContentBuilderString("nodes");
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
static final XContentBuilderString UUID = new XContentBuilderString("uuid");
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsResponse.java
|
1,350 |
private final class FlushTask implements Runnable {
private FlushTask() {
}
@Override
public void run() {
try {
commit();
} catch (Throwable e) {
OLogManager.instance().error(this, "Error during WAL background flush", e);
}
}
private void commit() throws IOException {
if (pagesCache.isEmpty())
return;
if (!flushNewData)
return;
flushNewData = false;
final int maxSize = pagesCache.size();
ODirectMemoryPointer[] pagesToFlush = new ODirectMemoryPointer[maxSize];
long filePointer = nextPositionToFlush;
int lastRecordOffset = -1;
long lastPageIndex = -1;
int flushedPages = 0;
Iterator<OWALPage> pageIterator = pagesCache.iterator();
while (flushedPages < maxSize) {
final OWALPage page = pageIterator.next();
synchronized (page) {
ODirectMemoryPointer dataPointer;
if (flushedPages == maxSize - 1) {
dataPointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
page.getPagePointer().moveData(0, dataPointer, 0, OWALPage.PAGE_SIZE);
} else {
dataPointer = page.getPagePointer();
}
pagesToFlush[flushedPages] = dataPointer;
int recordOffset = findLastRecord(page, true);
if (recordOffset >= 0) {
lastRecordOffset = recordOffset;
lastPageIndex = flushedPages;
}
}
flushedPages++;
}
flushId++;
synchronized (rndFile) {
rndFile.seek(filePointer);
for (int i = 0; i < pagesToFlush.length; i++) {
ODirectMemoryPointer dataPointer = pagesToFlush[i];
byte[] pageContent = dataPointer.get(0, OWALPage.PAGE_SIZE);
if (i == pagesToFlush.length - 1)
dataPointer.free();
OLongSerializer.INSTANCE.serializeNative(flushId, pageContent, OWALPage.FLUSH_ID_OFFSET);
OIntegerSerializer.INSTANCE.serializeNative(i, pageContent, OWALPage.FLUSH_INDEX_OFFSET);
flushPage(pageContent);
filePointer += OWALPage.PAGE_SIZE;
}
rndFile.getFD().sync();
}
long oldPositionToFlush = nextPositionToFlush;
nextPositionToFlush = filePointer - OWALPage.PAGE_SIZE;
if (lastRecordOffset >= 0)
flushedLsn = new OLogSequenceNumber(order, oldPositionToFlush + lastPageIndex * OWALPage.PAGE_SIZE + lastRecordOffset);
for (int i = 0; i < flushedPages - 1; i++) {
OWALPage page = pagesCache.poll();
page.getPagePointer().free();
}
assert !pagesCache.isEmpty();
}
private void flushPage(byte[] content) throws IOException {
CRC32 crc32 = new CRC32();
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
OIntegerSerializer.INSTANCE.serializeNative((int) crc32.getValue(), content, 0);
rndFile.write(content);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OWriteAheadLog.java
|
1,041 |
public class MultiTermVectorsItemResponse implements Streamable {
private TermVectorResponse response;
private MultiTermVectorsResponse.Failure failure;
MultiTermVectorsItemResponse() {
}
public MultiTermVectorsItemResponse(TermVectorResponse response, MultiTermVectorsResponse.Failure failure) {
assert (((response == null) && (failure != null)) || ((response != null) && (failure == null)));
this.response = response;
this.failure = failure;
}
/**
* The index name of the document.
*/
public String getIndex() {
if (failure != null) {
return failure.getIndex();
}
return response.getIndex();
}
/**
* The type of the document.
*/
public String getType() {
if (failure != null) {
return failure.getType();
}
return response.getType();
}
/**
* The id of the document.
*/
public String getId() {
if (failure != null) {
return failure.getId();
}
return response.getId();
}
/**
* Is this a failed execution?
*/
public boolean isFailed() {
return failure != null;
}
/**
* The actual get response, <tt>null</tt> if its a failure.
*/
public TermVectorResponse getResponse() {
return this.response;
}
/**
* The failure if relevant.
*/
public MultiTermVectorsResponse.Failure getFailure() {
return this.failure;
}
public static MultiTermVectorsItemResponse readItemResponse(StreamInput in) throws IOException {
MultiTermVectorsItemResponse response = new MultiTermVectorsItemResponse();
response.readFrom(in);
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
if (in.readBoolean()) {
failure = MultiTermVectorsResponse.Failure.readFailure(in);
} else {
response = new TermVectorResponse();
response.readFrom(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (failure != null) {
out.writeBoolean(true);
failure.writeTo(out);
} else {
out.writeBoolean(false);
response.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_termvector_MultiTermVectorsItemResponse.java
|
568 |
public class DemoOracleSingleLineSqlCommandExtractor extends SingleLineSqlCommandExtractor {
private static final SupportLogger LOGGER = SupportLogManager.getLogger("UserOverride", DemoOracleSingleLineSqlCommandExtractor.class);
private static final String BOOLEANTRUEMATCH = "(?i)(true)";
private static final String BOOLEANFALSEMATCH = "(?i)(false)";
private static final String TIMESTAMPMATCH = "(?<!\\{ts\\s)('\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2}')";
public static final String TRUE = "1";
public static final String FALSE = "0";
protected boolean alreadyRun = false;
@Override
public String[] extractCommands(Reader reader) {
if (!alreadyRun) {
alreadyRun = true;
LOGGER.support("Converting hibernate.hbm2ddl.import_files sql statements for compatibility with Oracle");
}
String[] statements = super.extractCommands(reader);
for (int j=0; j<statements.length; j++) {
//try start matches
statements[j] = statements[j].replaceAll(BOOLEANTRUEMATCH + "\\s*[,]", TRUE + ",");
statements[j] = statements[j].replaceAll(BOOLEANFALSEMATCH + "\\s*[,]", FALSE + ",");
//try middle matches
statements[j] = statements[j].replaceAll("[,]\\s*" + BOOLEANTRUEMATCH + "\\s*[,]", "," + TRUE + ",");
statements[j] = statements[j].replaceAll("[,]\\s*" + BOOLEANFALSEMATCH + "\\s*[,]", "," + FALSE + ",");
//try end matches
statements[j] = statements[j].replaceAll("[,]\\s*" + BOOLEANTRUEMATCH, "," + TRUE);
statements[j] = statements[j].replaceAll("[,]\\s*" + BOOLEANFALSEMATCH, "," + FALSE);
}
//remove Oracle incompatible - multi-row inserts
List<String> stringList = new ArrayList<String>(Arrays.asList(statements)); //Arrays.asList is immutable
int j=0;
for (String statement : statements) {
if (statement.matches(".*[)]\\s*[,].*")) {
int pos = statement.toUpperCase().indexOf("VALUES ") + "VALUES ".length();
String prefix = statement.substring(0, pos);
stringList.remove(j);
String values = statement.substring(pos, statement.length());
String[] tokens = values.split("[)]\\s*[,]\\s*[(]");
String[] newStatements = new String[tokens.length];
for (int i=0; i<tokens.length; i++) {
String suffix = tokens[i];
if (!suffix.startsWith("(")) {
suffix = "(" + suffix;
}
if (!suffix.endsWith(")")) {
suffix += ")";
}
newStatements[i] = prefix + suffix;
}
stringList.addAll(j, Arrays.asList(newStatements));
j += tokens.length;
} else {
j++;
}
}
//Address raw string dates, if any, for Oracle
Pattern pattern = Pattern.compile(TIMESTAMPMATCH);
statements = stringList.toArray(new String[stringList.size()]);
for (int x=0; x<statements.length; x++) {
Matcher matcher = pattern.matcher(statements[x]);
while (matcher.find()) {
String date = matcher.group(1);
String temp = statements[x].substring(0, statements[x].indexOf(date)) + "{ts " + date + "}" +
statements[x].substring(statements[x].indexOf(date) + date.length(), statements[x].length());
statements[x] = temp;
}
}
return statements;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_util_sql_importsql_DemoOracleSingleLineSqlCommandExtractor.java
|
380 |
public interface ODetachable {
/**
* Detaches the object.
*
* @return true if the object has been fully detached, otherwise false
*/
public boolean detach();
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_record_ODetachable.java
|
1,309 |
public class EmptyClusterInfoService extends AbstractComponent implements ClusterInfoService {
private final static class Holder {
private final static EmptyClusterInfoService instance = new EmptyClusterInfoService();
}
private final ClusterInfo emptyClusterInfo;
private EmptyClusterInfoService() {
super(ImmutableSettings.EMPTY);
emptyClusterInfo = new ClusterInfo(ImmutableMap.<String, DiskUsage>of(), ImmutableMap.<String, Long>of());
}
public static EmptyClusterInfoService getInstance() {
return Holder.instance;
}
@Override
public ClusterInfo getClusterInfo() {
return emptyClusterInfo;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_EmptyClusterInfoService.java
|
70 |
@Repository("blStaticAssetDao")
public class StaticAssetDaoImpl implements StaticAssetDao {
private static SandBox DUMMY_SANDBOX = new SandBoxImpl();
{
DUMMY_SANDBOX.setId(-1l);
}
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name="blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Override
public StaticAsset readStaticAssetById(Long id) {
return em.find(StaticAssetImpl.class, id);
}
public List<StaticAsset> readAllStaticAssets() {
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<StaticAsset> criteria = builder.createQuery(StaticAsset.class);
Root<StaticAssetImpl> handler = criteria.from(StaticAssetImpl.class);
criteria.select(handler);
try {
return em.createQuery(criteria).getResultList();
} catch (NoResultException e) {
return new ArrayList<StaticAsset>();
}
}
@Override
public StaticAsset readStaticAssetByFullUrl(String fullUrl, SandBox targetSandBox) {
TypedQuery<StaticAsset> query;
if (targetSandBox == null) {
query = em.createNamedQuery("BC_READ_STATIC_ASSET_BY_FULL_URL_AND_TARGET_SANDBOX_NULL", StaticAsset.class);
query.setParameter("fullUrl", fullUrl);
} else {
query = em.createNamedQuery("BC_READ_STATIC_ASSET_BY_FULL_URL", StaticAsset.class);
query.setParameter("targetSandbox", targetSandBox);
query.setParameter("fullUrl", fullUrl);
}
query.setHint(QueryHints.HINT_CACHEABLE, true);
List<StaticAsset> results = query.getResultList();
if (CollectionUtils.isEmpty(results)) {
return null;
} else {
return results.iterator().next();
}
}
@Override
public StaticAsset addOrUpdateStaticAsset(StaticAsset asset, boolean clearLevel1Cache) {
if (clearLevel1Cache) {
em.detach(asset);
}
return em.merge(asset);
}
@Override
public void delete(StaticAsset asset) {
if (!em.contains(asset)) {
asset = readStaticAssetById(asset.getId());
}
em.remove(asset);
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_dao_StaticAssetDaoImpl.java
|
1,421 |
public class OChannelBinaryAsynchClient extends OChannelBinary {
private final Condition readCondition = lockRead.getUnderlying().newCondition();
private volatile boolean channelRead = false;
private byte currentStatus;
private int currentSessionId;
private final int maxUnreadResponses;
protected final int socketTimeout; // IN MS
protected final short srvProtocolVersion;
private final String serverURL;
private OAsynchChannelServiceThread serviceThread;
public OChannelBinaryAsynchClient(final String remoteHost, final int remotePort, final OContextConfiguration iConfig,
final int iProtocolVersion) throws IOException {
this(remoteHost, remotePort, iConfig, iProtocolVersion, null);
}
public OChannelBinaryAsynchClient(final String remoteHost, final int remotePort, final OContextConfiguration iConfig,
final int protocolVersion, final ORemoteServerEventListener asynchEventListener) throws IOException {
super(new Socket(), iConfig);
maxUnreadResponses = OGlobalConfiguration.NETWORK_BINARY_READ_RESPONSE_MAX_TIMES.getValueAsInteger();
serverURL = remoteHost + ":" + remotePort;
socketTimeout = iConfig.getValueAsInteger(OGlobalConfiguration.NETWORK_SOCKET_TIMEOUT);
socket.setPerformancePreferences(0, 2, 1);
socket.setKeepAlive(true);
socket.setSendBufferSize(socketBufferSize);
socket.setReceiveBufferSize(socketBufferSize);
try {
socket.connect(new InetSocketAddress(remoteHost, remotePort), socketTimeout);
connected();
} catch (java.net.SocketTimeoutException e) {
throw new IOException("Cannot connect to host " + remoteHost + ":" + remotePort, e);
}
inStream = new BufferedInputStream(socket.getInputStream(), socketBufferSize);
outStream = new BufferedOutputStream(socket.getOutputStream(), socketBufferSize);
in = new DataInputStream(inStream);
out = new DataOutputStream(outStream);
try {
srvProtocolVersion = readShort();
} catch (IOException e) {
throw new ONetworkProtocolException("Cannot read protocol version from remote server " + socket.getRemoteSocketAddress()
+ ": " + e);
}
if (srvProtocolVersion != protocolVersion) {
OLogManager.instance().warn(
this,
"The Client driver version is different than Server version: client=" + protocolVersion + ", server="
+ srvProtocolVersion
+ ". You could not use the full features of the newer version. Assure to have the same versions on both");
}
if (asynchEventListener != null)
serviceThread = new OAsynchChannelServiceThread(asynchEventListener, this);
}
public void beginRequest() {
acquireWriteLock();
}
public void endRequest() throws IOException {
flush();
releaseWriteLock();
}
public void beginResponse(final int iRequesterId) throws IOException {
beginResponse(iRequesterId, timeout);
}
public void beginResponse(final int iRequesterId, final long iTimeout) throws IOException {
try {
int unreadResponse = 0;
final long startClock = iTimeout > 0 ? System.currentTimeMillis() : 0;
// WAIT FOR THE RESPONSE
do {
if (iTimeout <= 0)
acquireReadLock();
else if (!lockRead.tryAcquireLock(iTimeout, TimeUnit.MILLISECONDS))
throw new OTimeoutException("Cannot acquire read lock against channel: " + this);
if (!channelRead) {
channelRead = true;
try {
currentStatus = readByte();
currentSessionId = readInt();
if (debug)
OLogManager.instance().debug(this, "%s - Read response: %d-%d", socket.getLocalAddress(), (int) currentStatus,
currentSessionId);
} catch (IOException e) {
// UNLOCK THE RESOURCE AND PROPAGATES THE EXCEPTION
channelRead = false;
readCondition.signalAll();
releaseReadLock();
throw e;
}
}
if (currentSessionId == iRequesterId)
// IT'S FOR ME
break;
try {
if (debug)
OLogManager.instance().debug(this, "%s - Session %d skip response, it is for %d", socket.getLocalAddress(),
iRequesterId, currentSessionId);
if (iTimeout > 0 && (System.currentTimeMillis() - startClock) > iTimeout) {
// CLOSE THE SOCKET TO CHANNEL TO AVOID FURTHER DIRTY DATA
close();
throw new OTimeoutException("Timeout on reading response from the server "
+ (socket != null ? socket.getRemoteSocketAddress() : "") + " for the request " + iRequesterId);
}
if (unreadResponse > maxUnreadResponses) {
if (debug)
OLogManager.instance().info(this, "Unread responses %d > %d, consider the buffer as dirty: clean it", unreadResponse,
maxUnreadResponses);
close();
throw new IOException("Timeout on reading response");
}
readCondition.signalAll();
if (debug)
OLogManager.instance().debug(this, "Session %d is going to sleep...", iRequesterId);
final long start = System.currentTimeMillis();
// WAIT 1 SECOND AND RETRY
readCondition.await(1, TimeUnit.SECONDS);
final long now = System.currentTimeMillis();
if (debug)
OLogManager.instance().debug(this, "Waked up: slept %dms, checking again from %s for session %d", (now - start),
socket.getLocalAddress(), iRequesterId);
if (now - start >= 1000)
unreadResponse++;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
releaseReadLock();
}
} while (true);
if (debug)
OLogManager.instance().debug(this, "%s - Session %d handle response", socket.getLocalAddress(), iRequesterId);
handleStatus(currentStatus, currentSessionId);
} catch (OLockException e) {
Thread.currentThread().interrupt();
// NEVER HAPPENS?
e.printStackTrace();
}
}
protected int handleStatus(final byte iResult, final int iClientTxId) throws IOException {
if (iResult == OChannelBinaryProtocol.RESPONSE_STATUS_OK || iResult == OChannelBinaryProtocol.PUSH_DATA) {
} else if (iResult == OChannelBinaryProtocol.RESPONSE_STATUS_ERROR) {
StringBuilder buffer = new StringBuilder();
final List<OPair<String, String>> exceptions = new ArrayList<OPair<String, String>>();
// EXCEPTION
while (readByte() == 1) {
final String excClassName = readString();
final String excMessage = readString();
exceptions.add(new OPair<String, String>(excClassName, excMessage));
}
byte[] serializedException = null;
if (srvProtocolVersion >= 19)
serializedException = readBytes();
Exception previous = null;
if (serializedException != null && serializedException.length > 0)
throwSerializedException(serializedException);
for (int i = exceptions.size() - 1; i > -1; --i) {
previous = createException(exceptions.get(i).getKey(), exceptions.get(i).getValue(), previous);
}
if (previous != null) {
throw new RuntimeException(previous);
} else
throw new ONetworkProtocolException("Network response error: " + buffer.toString());
} else {
// PROTOCOL ERROR
// close();
throw new ONetworkProtocolException("Error on reading response from the server");
}
return iClientTxId;
}
private void throwSerializedException(byte[] serializedException) throws IOException {
final OMemoryInputStream inputStream = new OMemoryInputStream(serializedException);
final ObjectInputStream objectInputStream = new ObjectInputStream(inputStream);
Object throwable = null;
try {
throwable = objectInputStream.readObject();
} catch (ClassNotFoundException e) {
OLogManager.instance().error(this, "Error during exception serialization.", e);
}
objectInputStream.close();
if (throwable instanceof Throwable)
throw new OResponseProcessingException("Exception during response processing.", (Throwable) throwable);
else
OLogManager.instance().error(
this,
"Error during exception serialization, serialized exception is not Throwable, exception type is "
+ (throwable != null ? throwable.getClass().getName() : "null"));
}
@SuppressWarnings("unchecked")
private static RuntimeException createException(final String iClassName, final String iMessage, final Exception iPrevious) {
RuntimeException rootException = null;
Constructor<?> c = null;
try {
final Class<RuntimeException> excClass = (Class<RuntimeException>) Class.forName(iClassName);
if (iPrevious != null) {
try {
c = excClass.getConstructor(String.class, Throwable.class);
} catch (NoSuchMethodException e) {
c = excClass.getConstructor(String.class, Exception.class);
}
}
if (c == null)
c = excClass.getConstructor(String.class);
} catch (Exception e) {
// UNABLE TO REPRODUCE THE SAME SERVER-SIZE EXCEPTION: THROW A STORAGE EXCEPTION
rootException = new OStorageException(iMessage, iPrevious);
}
if (c != null)
try {
final Throwable e;
if (c.getParameterTypes().length > 1)
e = (Throwable) c.newInstance(iMessage, iPrevious);
else
e = (Throwable) c.newInstance(iMessage);
if (e instanceof RuntimeException)
rootException = (RuntimeException) e;
else
rootException = new OException(e);
} catch (InstantiationException e) {
} catch (IllegalAccessException e) {
} catch (InvocationTargetException e) {
}
return rootException;
}
public void endResponse() {
channelRead = false;
// WAKE UP ALL THE WAITING THREADS
try {
readCondition.signalAll();
} catch (IllegalMonitorStateException e) {
// IGNORE IT
OLogManager.instance().debug(this, "Error on signaling waiting clients after reading response");
}
try {
releaseReadLock();
} catch (IllegalMonitorStateException e) {
// IGNORE IT
OLogManager.instance().debug(this, "Error on unlocking network channel after reading response");
}
}
@Override
public void close() {
if (lockRead.tryAcquireLock())
try {
readCondition.signalAll();
} finally {
releaseReadLock();
}
super.close();
if (serviceThread != null) {
final OAsynchChannelServiceThread s = serviceThread;
serviceThread = null;
s.sendShutdown();
}
}
@Override
public void clearInput() throws IOException {
acquireReadLock();
try {
super.clearInput();
} finally {
releaseReadLock();
}
}
/**
* Tells if the channel is connected.
*
* @return true if it's connected, otherwise false.
*/
public boolean isConnected() {
if (socket != null && socket.isConnected() && !socket.isInputShutdown() && !socket.isOutputShutdown())
return true;
return false;
}
/**
* Gets the major supported protocol version
*
*/
public short getSrvProtocolVersion() {
return srvProtocolVersion;
}
public OAdaptiveLock getLockRead() {
return lockRead;
}
public OAdaptiveLock getLockWrite() {
return lockWrite;
}
public String getServerURL() {
return serverURL;
}
}
| 1no label
|
enterprise_src_main_java_com_orientechnologies_orient_enterprise_channel_binary_OChannelBinaryAsynchClient.java
|
168 |
public interface URLHandlerService {
/**
* Checks the passed in URL to determine if there is a matching URLHandler.
* Returns null if no handler was found.
*
* @param uri
* @return
*/
public URLHandler findURLHandlerByURI(String uri);
public List<URLHandler> findAllURLHandlers();
public URLHandler saveURLHandler(URLHandler handler);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_url_service_URLHandlerService.java
|
306 |
public class ClusterHealthRequest extends MasterNodeReadOperationRequest<ClusterHealthRequest> {
private String[] indices;
private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
private ClusterHealthStatus waitForStatus;
private int waitForRelocatingShards = -1;
private int waitForActiveShards = -1;
private String waitForNodes = "";
private Priority waitForEvents = null;
ClusterHealthRequest() {
}
public ClusterHealthRequest(String... indices) {
this.indices = indices;
}
public String[] indices() {
return indices;
}
public ClusterHealthRequest indices(String[] indices) {
this.indices = indices;
return this;
}
public TimeValue timeout() {
return timeout;
}
public ClusterHealthRequest timeout(TimeValue timeout) {
this.timeout = timeout;
if (masterNodeTimeout == DEFAULT_MASTER_NODE_TIMEOUT) {
masterNodeTimeout = timeout;
}
return this;
}
public ClusterHealthRequest timeout(String timeout) {
return this.timeout(TimeValue.parseTimeValue(timeout, null));
}
public ClusterHealthStatus waitForStatus() {
return waitForStatus;
}
public ClusterHealthRequest waitForStatus(ClusterHealthStatus waitForStatus) {
this.waitForStatus = waitForStatus;
return this;
}
public ClusterHealthRequest waitForGreenStatus() {
return waitForStatus(ClusterHealthStatus.GREEN);
}
public ClusterHealthRequest waitForYellowStatus() {
return waitForStatus(ClusterHealthStatus.YELLOW);
}
public int waitForRelocatingShards() {
return waitForRelocatingShards;
}
public ClusterHealthRequest waitForRelocatingShards(int waitForRelocatingShards) {
this.waitForRelocatingShards = waitForRelocatingShards;
return this;
}
public int waitForActiveShards() {
return waitForActiveShards;
}
public ClusterHealthRequest waitForActiveShards(int waitForActiveShards) {
this.waitForActiveShards = waitForActiveShards;
return this;
}
public String waitForNodes() {
return waitForNodes;
}
/**
* Waits for N number of nodes. Use "12" for exact mapping, ">12" and "<12" for range.
*/
public ClusterHealthRequest waitForNodes(String waitForNodes) {
this.waitForNodes = waitForNodes;
return this;
}
public ClusterHealthRequest waitForEvents(Priority waitForEvents) {
this.waitForEvents = waitForEvents;
return this;
}
public Priority waitForEvents() {
return this.waitForEvents;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
if (size == 0) {
indices = Strings.EMPTY_ARRAY;
} else {
indices = new String[size];
for (int i = 0; i < indices.length; i++) {
indices[i] = in.readString();
}
}
timeout = readTimeValue(in);
if (in.readBoolean()) {
waitForStatus = ClusterHealthStatus.fromValue(in.readByte());
}
waitForRelocatingShards = in.readInt();
waitForActiveShards = in.readInt();
waitForNodes = in.readString();
readLocal(in);
if (in.readBoolean()) {
waitForEvents = Priority.fromByte(in.readByte());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (indices == null) {
out.writeVInt(0);
} else {
out.writeVInt(indices.length);
for (String index : indices) {
out.writeString(index);
}
}
timeout.writeTo(out);
if (waitForStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(waitForStatus.value());
}
out.writeInt(waitForRelocatingShards);
out.writeInt(waitForActiveShards);
out.writeString(waitForNodes);
writeLocal(out);
if (waitForEvents == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(waitForEvents.value());
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterHealthRequest.java
|
119 |
public static class StressThread extends Thread
{
private static final Object READ = new Object();
private static final Object WRITE = new Object();
private static ResourceObject resources[] = new ResourceObject[10];
private final Random rand = new Random( currentTimeMillis() );
static
{
for ( int i = 0; i < resources.length; i++ )
resources[i] = new ResourceObject( "RX" + i );
}
private final CountDownLatch startSignal;
private final String name;
private final int numberOfIterations;
private final int depthCount;
private final float readWriteRatio;
private final LockManager lm;
private volatile Exception error;
private final Transaction tx = mock( Transaction.class );
public volatile Long startedWaiting = null;
StressThread( String name, int numberOfIterations, int depthCount,
float readWriteRatio, LockManager lm, CountDownLatch startSignal )
{
super();
this.name = name;
this.numberOfIterations = numberOfIterations;
this.depthCount = depthCount;
this.readWriteRatio = readWriteRatio;
this.lm = lm;
this.startSignal = startSignal;
}
@Override
public void run()
{
try
{
startSignal.await();
java.util.Stack<Object> lockStack = new java.util.Stack<Object>();
java.util.Stack<ResourceObject> resourceStack = new java.util.Stack<ResourceObject>();
for ( int i = 0; i < numberOfIterations; i++ )
{
try
{
int depth = depthCount;
do
{
float f = rand.nextFloat();
int n = rand.nextInt( resources.length );
if ( f < readWriteRatio )
{
startedWaiting = currentTimeMillis();
lm.getReadLock( resources[n], tx );
startedWaiting = null;
lockStack.push( READ );
}
else
{
startedWaiting = currentTimeMillis();
lm.getWriteLock( resources[n], tx );
startedWaiting = null;
lockStack.push( WRITE );
}
resourceStack.push( resources[n] );
}
while ( --depth > 0 );
}
catch ( DeadlockDetectedException e )
{
// This is good
}
finally
{
releaseAllLocks( lockStack, resourceStack );
}
}
}
catch ( Exception e )
{
error = e;
}
}
private void releaseAllLocks( Stack<Object> lockStack, Stack<ResourceObject> resourceStack )
{
while ( !lockStack.isEmpty() )
{
if ( lockStack.pop() == READ )
{
lm.releaseReadLock( resourceStack.pop(), tx );
}
else
{
lm.releaseWriteLock( resourceStack.pop(), tx );
}
}
}
@Override
public String toString()
{
return this.name;
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestDeadlockDetection.java
|
3,612 |
public class TransactionManagerServiceImpl implements TransactionManagerService, ManagedService,
MembershipAwareService, ClientAwareService {
public static final String SERVICE_NAME = "hz:core:txManagerService";
public static final int RECOVER_TIMEOUT = 5000;
private final NodeEngineImpl nodeEngine;
private final ILogger logger;
private final ConcurrentMap<String, TxBackupLog> txBackupLogs = new ConcurrentHashMap<String, TxBackupLog>();
private final ConcurrentMap<SerializableXID, Transaction>
managedTransactions = new ConcurrentHashMap<SerializableXID, Transaction>();
private final ConcurrentMap<SerializableXID, RecoveredTransaction>
clientRecoveredTransactions = new ConcurrentHashMap<SerializableXID, RecoveredTransaction>();
public TransactionManagerServiceImpl(NodeEngineImpl nodeEngine) {
this.nodeEngine = nodeEngine;
logger = nodeEngine.getLogger(TransactionManagerService.class);
}
@Override
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task) throws TransactionException {
if (task == null) {
throw new NullPointerException("TransactionalTask is required!");
}
final TransactionContextImpl context = new TransactionContextImpl(this, nodeEngine, options, null);
context.beginTransaction();
try {
final T value = task.execute(context);
context.commitTransaction();
return value;
} catch (Throwable e) {
context.rollbackTransaction();
if (e instanceof TransactionException) {
throw (TransactionException) e;
}
if (e.getCause() instanceof TransactionException) {
throw (TransactionException) e.getCause();
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new TransactionException(e);
}
}
@Override
public TransactionContext newTransactionContext(TransactionOptions options) {
return new TransactionContextImpl(this, nodeEngine, options, null);
}
@Override
public TransactionContext newClientTransactionContext(TransactionOptions options, String clientUuid) {
return new TransactionContextImpl(this, nodeEngine, options, clientUuid);
}
@Override
public void init(NodeEngine nodeEngine, Properties properties) {
}
@Override
public void reset() {
txBackupLogs.clear();
}
@Override
public void shutdown(boolean terminate) {
reset();
}
@Override
public void memberAdded(MembershipServiceEvent event) {
}
public void addClientRecoveredTransaction(RecoveredTransaction rt) {
clientRecoveredTransactions.put(rt.getXid(), rt);
}
public void recoverClientTransaction(SerializableXID sXid, boolean commit) {
final RecoveredTransaction rt = clientRecoveredTransactions.remove(sXid);
if (rt == null) {
return;
}
TransactionImpl tx = new TransactionImpl(this, nodeEngine, rt.getTxnId(), rt.getTxLogs(),
rt.getTimeoutMillis(), rt.getStartTime(), rt.getCallerUuid());
if (commit) {
try {
tx.commit();
} catch (Throwable e) {
logger.warning("Error during committing recovered client transaction!", e);
}
} else {
try {
tx.rollback();
} catch (Throwable e) {
logger.warning("Error during rolling-back recovered client transaction!", e);
}
}
}
@Override
public void memberRemoved(MembershipServiceEvent event) {
final MemberImpl member = event.getMember();
String uuid = member.getUuid();
finalizeTransactionsOf(uuid);
}
@Override
public void memberAttributeChanged(MemberAttributeServiceEvent event) {
}
public void addManagedTransaction(Xid xid, Transaction transaction) {
final SerializableXID sXid = new SerializableXID(xid.getFormatId(),
xid.getGlobalTransactionId(), xid.getBranchQualifier());
((TransactionImpl) transaction).setXid(sXid);
managedTransactions.put(sXid, transaction);
}
public Transaction getManagedTransaction(Xid xid) {
final SerializableXID sXid = new SerializableXID(xid.getFormatId(),
xid.getGlobalTransactionId(), xid.getBranchQualifier());
return managedTransactions.get(sXid);
}
public void removeManagedTransaction(Xid xid) {
final SerializableXID sXid = new SerializableXID(xid.getFormatId(),
xid.getGlobalTransactionId(), xid.getBranchQualifier());
managedTransactions.remove(sXid);
}
private void finalizeTransactionsOf(String uuid) {
for (Map.Entry<String, TxBackupLog> entry : txBackupLogs.entrySet()) {
finalize(uuid, entry.getKey(), entry.getValue());
}
}
private void finalize(String uuid, String txnId, TxBackupLog log) {
OperationService operationService = nodeEngine.getOperationService();
if (!uuid.equals(log.callerUuid)) {
return;
}
//TODO shouldn't we remove TxBackupLog from map ?
if (log.state == State.ACTIVE) {
Collection<MemberImpl> memberList = nodeEngine.getClusterService().getMemberList();
Collection<Future> futures = new ArrayList<Future>(memberList.size());
for (MemberImpl member : memberList) {
Operation op = new BroadcastTxRollbackOperation(txnId);
Future f = operationService.invokeOnTarget(SERVICE_NAME, op, member.getAddress());
futures.add(f);
}
for (Future future : futures) {
try {
future.get(TransactionOptions.getDefault().getTimeoutMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
logger.warning("Error while rolling-back tx!");
}
}
} else {
if (log.state == State.COMMITTING && log.xid != null) {
logger.warning("This log is XA Managed " + log);
//Marking for recovery
log.state = State.NO_TXN;
return;
}
TransactionImpl tx = new TransactionImpl(this, nodeEngine, txnId, log.txLogs,
log.timeoutMillis, log.startTime, log.callerUuid);
if (log.state == State.COMMITTING) {
try {
tx.commit();
} catch (Throwable e) {
logger.warning("Error during committing from tx backup!", e);
}
} else {
try {
tx.rollback();
} catch (Throwable e) {
logger.warning("Error during rolling-back from tx backup!", e);
}
}
}
}
@Override
public void clientDisconnected(String clientUuid) {
finalizeTransactionsOf(clientUuid);
}
Address[] pickBackupAddresses(int durability) {
final ClusterService clusterService = nodeEngine.getClusterService();
final List<MemberImpl> members = new ArrayList<MemberImpl>(clusterService.getMemberList());
members.remove(nodeEngine.getLocalMember());
final int c = Math.min(members.size(), durability);
Collections.shuffle(members);
Address[] addresses = new Address[c];
for (int i = 0; i < c; i++) {
addresses[i] = members.get(i).getAddress();
}
return addresses;
}
public void addTxBackupLogForClientRecovery(Transaction transaction) {
TransactionImpl txnImpl = (TransactionImpl) transaction;
final String callerUuid = txnImpl.getOwnerUuid();
final SerializableXID xid = txnImpl.getXid();
final List<TransactionLog> txLogs = txnImpl.getTxLogs();
final long timeoutMillis = txnImpl.getTimeoutMillis();
final long startTime = txnImpl.getStartTime();
TxBackupLog log = new TxBackupLog(txLogs, callerUuid, State.COMMITTING, timeoutMillis, startTime, xid);
txBackupLogs.put(txnImpl.getTxnId(), log);
}
void beginTxBackupLog(String callerUuid, String txnId, SerializableXID xid) {
TxBackupLog log
= new TxBackupLog(Collections.<TransactionLog>emptyList(), callerUuid, State.ACTIVE, -1, -1, xid);
if (txBackupLogs.putIfAbsent(txnId, log) != null) {
throw new TransactionException("TxLog already exists!");
}
}
void prepareTxBackupLog(List<TransactionLog> txLogs, String callerUuid, String txnId,
long timeoutMillis, long startTime) {
TxBackupLog beginLog = txBackupLogs.get(txnId);
if (beginLog == null) {
throw new TransactionException("Could not find begin tx log!");
}
if (beginLog.state != State.ACTIVE) {
throw new TransactionException("TxLog already exists!");
}
TxBackupLog newTxBackupLog
= new TxBackupLog(txLogs, callerUuid, State.COMMITTING, timeoutMillis, startTime, beginLog.xid);
if (!txBackupLogs.replace(txnId, beginLog, newTxBackupLog)) {
throw new TransactionException("TxLog already exists!");
}
}
void rollbackTxBackupLog(String txnId) {
final TxBackupLog log = txBackupLogs.get(txnId);
if (log != null) {
log.state = State.ROLLING_BACK;
} else {
logger.warning("No tx backup log is found, tx -> " + txnId);
}
}
void purgeTxBackupLog(String txnId) {
txBackupLogs.remove(txnId);
}
public Xid[] recover() {
List<Future<SerializableCollection>> futures = invokeRecoverOperations();
Set<SerializableXID> xidSet = new HashSet<SerializableXID>();
for (Future<SerializableCollection> future : futures) {
try {
final SerializableCollection collectionWrapper = future.get(RECOVER_TIMEOUT, TimeUnit.MILLISECONDS);
for (Data data : collectionWrapper) {
final RecoveredTransaction rt = (RecoveredTransaction) nodeEngine.toObject(data);
final SerializableXID xid = rt.getXid();
TransactionImpl tx = new TransactionImpl(this, nodeEngine, rt.getTxnId(), rt.getTxLogs(),
rt.getTimeoutMillis(), rt.getStartTime(), rt.getCallerUuid());
tx.setXid(xid);
xidSet.add(xid);
managedTransactions.put(xid, tx);
}
} catch (MemberLeftException e) {
logger.warning("Member left while recovering: " + e);
} catch (Throwable e) {
if (e instanceof ExecutionException) {
e = e.getCause() != null ? e.getCause() : e;
}
if (e instanceof TargetNotMemberException) {
nodeEngine.getLogger(Transaction.class).warning("Member left while recovering: " + e);
} else {
throw ExceptionUtil.rethrow(e);
}
}
}
final Set<RecoveredTransaction> localSet = recoverLocal();
for (RecoveredTransaction rt : localSet) {
xidSet.add(rt.getXid());
}
return xidSet.toArray(new Xid[xidSet.size()]);
}
private List<Future<SerializableCollection>> invokeRecoverOperations() {
final OperationService operationService = nodeEngine.getOperationService();
final ClusterService clusterService = nodeEngine.getClusterService();
final Collection<MemberImpl> memberList = clusterService.getMemberList();
List<Future<SerializableCollection>> futures
= new ArrayList<Future<SerializableCollection>>(memberList.size() - 1);
for (MemberImpl member : memberList) {
if (member.localMember()) {
continue;
}
final Future f = operationService.createInvocationBuilder(TransactionManagerServiceImpl.SERVICE_NAME,
new RecoverTxnOperation(), member.getAddress()).invoke();
futures.add(f);
}
return futures;
}
public Set<RecoveredTransaction> recoverLocal() {
Set<RecoveredTransaction> recovered = new HashSet<RecoveredTransaction>();
if (!txBackupLogs.isEmpty()) {
final Set<Map.Entry<String, TxBackupLog>> entries = txBackupLogs.entrySet();
final Iterator<Map.Entry<String, TxBackupLog>> iter = entries.iterator();
while (iter.hasNext()) {
final Map.Entry<String, TxBackupLog> entry = iter.next();
final TxBackupLog log = entry.getValue();
final String txnId = entry.getKey();
if (log.state == State.NO_TXN && log.xid != null) {
final RecoveredTransaction rt = new RecoveredTransaction();
rt.setTxLogs(log.txLogs);
rt.setXid(log.xid);
rt.setCallerUuid(log.callerUuid);
rt.setStartTime(log.startTime);
rt.setTimeoutMillis(log.timeoutMillis);
rt.setTxnId(txnId);
recovered.add(rt);
iter.remove();
}
}
}
return recovered;
}
private static final class TxBackupLog {
private final List<TransactionLog> txLogs;
private final String callerUuid;
private final long timeoutMillis;
private final long startTime;
private final SerializableXID xid;
private volatile State state;
private TxBackupLog(List<TransactionLog> txLogs, String callerUuid, State state, long timeoutMillis,
long startTime, SerializableXID xid) {
this.txLogs = txLogs;
this.callerUuid = callerUuid;
this.state = state;
this.timeoutMillis = timeoutMillis;
this.startTime = startTime;
this.xid = xid;
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_transaction_impl_TransactionManagerServiceImpl.java
|
15 |
{
OutgoingMessageHolder temporaryOutgoing = new OutgoingMessageHolder();
@Override
public void run()
{
lock.writeLock().lock();
try
{
// Lock timeouts while we are processing the message
synchronized ( timeouts )
{
StateMachine stateMachine = stateMachines.get( message.getMessageType().getClass() );
if ( stateMachine == null )
{
return; // No StateMachine registered for this MessageType type - Ignore this
}
stateMachine.handle( message, temporaryOutgoing );
Message<? extends MessageType> tempMessage;
while ((tempMessage = temporaryOutgoing.nextOutgoingMessage()) != null)
{
outgoing.offer( tempMessage );
}
// Process and send messages
// Allow state machines to send messages to each other as well in this loop
Message<? extends MessageType> outgoingMessage;
List<Message<? extends MessageType>> toSend = new LinkedList<Message<? extends MessageType>>();
try
{
while ( ( outgoingMessage = outgoing.nextOutgoingMessage() ) != null )
{
message.copyHeadersTo( outgoingMessage, CONVERSATION_ID, CREATED_BY );
for ( MessageProcessor outgoingProcessor : outgoingProcessors )
{
try
{
if ( !outgoingProcessor.process( outgoingMessage ) )
{
break;
}
}
catch ( Throwable e )
{
logger.warn( "Outgoing message processor threw exception", e );
}
}
if ( outgoingMessage.hasHeader( Message.TO ) )
{
outgoingMessage.setHeader( Message.INSTANCE_ID, instanceIdHeaderValue );
toSend.add( outgoingMessage );
}
else
{
// Deliver internally if possible
StateMachine internalStatemachine = stateMachines.get( outgoingMessage.getMessageType()
.getClass() );
if ( internalStatemachine != null )
{
internalStatemachine.handle( (Message) outgoingMessage, temporaryOutgoing );
while ((tempMessage = temporaryOutgoing.nextOutgoingMessage()) != null)
{
outgoing.offer( tempMessage );
}
}
}
}
if ( !toSend.isEmpty() ) // the check is necessary, sender may not have started yet
{
sender.process( toSend );
}
}
catch ( Exception e )
{
logger.warn( "Error processing message " + message, e );
}
}
}
finally
{
lock.writeLock().unlock();
}
// Before returning, process delayed executions so that they are done before returning
// This will effectively trigger all notifications created by contexts
executor.drain();
}
} );
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_StateMachines.java
|
888 |
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
Tuple<String, Long>[] context1 = scrollId.getContext();
for (int i = 0; i < context1.length; i++) {
Tuple<String, Long> target = context1[i];
DiscoveryNode node = nodes.get(target.v1());
if (node != null && nodes.localNodeId().equals(node.id())) {
executeQueryPhase(i, counter, node, target.v2());
}
}
}
});
| 1no label
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchScrollQueryThenFetchAction.java
|
108 |
public static class Name {
public static final String Rules = "PageImpl_Rules_Tab";
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageImpl.java
|
491 |
client.getClientExecutionService().executeInternal(new Runnable() {
@Override
public void run() {
for (MembershipListener listener : listeners.values()) {
listener.memberAttributeChanged(event);
}
}
});
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientClusterServiceImpl.java
|
3,259 |
public class MapPermission extends InstancePermission {
private static final int PUT = 0x4;
private static final int REMOVE = 0x8;
private static final int READ = 0x16;
private static final int LISTEN = 0x32;
private static final int LOCK = 0x64;
private static final int INDEX = 0x128;
private static final int INTERCEPT = 0x256;
private static final int ALL = CREATE | DESTROY | PUT | REMOVE | READ | LISTEN | LOCK | INDEX | INTERCEPT;
public MapPermission(String name, String... actions) {
super(name, actions);
}
@Override
protected int initMask(String[] actions) {
int mask = NONE;
for (String action : actions) {
if (ActionConstants.ACTION_ALL.equals(action)) {
return ALL;
}
if (ActionConstants.ACTION_CREATE.equals(action)) {
mask |= CREATE;
} else if (ActionConstants.ACTION_DESTROY.equals(action)) {
mask |= DESTROY;
} else if (ActionConstants.ACTION_PUT.equals(action)) {
mask |= PUT;
} else if (ActionConstants.ACTION_REMOVE.equals(action)) {
mask |= REMOVE;
} else if (ActionConstants.ACTION_READ.equals(action)) {
mask |= READ;
} else if (ActionConstants.ACTION_LISTEN.equals(action)) {
mask |= LISTEN;
} else if (ActionConstants.ACTION_LOCK.equals(action)) {
mask |= LOCK;
} else if (ActionConstants.ACTION_INDEX.equals(action)) {
mask |= INDEX;
} else if (ActionConstants.ACTION_INTERCEPT.equals(action)) {
mask |= INTERCEPT;
}
}
return mask;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_security_permission_MapPermission.java
|
145 |
public interface StructuredContentItemCriteria extends QuantityBasedRule {
/**
* Returns the parent <code>StructuredContent</code> item to which this
* field belongs.
*
* @return
*/
@Nonnull
public StructuredContent getStructuredContent();
/**
* Sets the parent <code>StructuredContent</code> item.
* @param structuredContent
*/
public void setStructuredContent(@Nonnull StructuredContent structuredContent);
/**
* Builds a copy of this item. Used by the content management system when an
* item is edited.
*
* @return a copy of this item
*/
@Nonnull
public StructuredContentItemCriteria cloneEntity();
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentItemCriteria.java
|
113 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166e_ForkJoinPool.java
|
93 |
class ConvertToClassProposal extends AbstractLinkedMode implements ICompletionProposal {
private final Tree.ObjectDefinition node;
public ConvertToClassProposal(Tree.ObjectDefinition node,
CeylonEditor editor) {
super(editor);
this.node = node;
}
@Override
public Point getSelection(IDocument doc) {
return null;
}
@Override
public Image getImage() {
return node.getDeclarationModel().isShared() ?
CeylonLabelProvider.CLASS :
CeylonLabelProvider.LOCAL_CLASS;
}
@Override
public String getDisplayString() {
return "Convert " + node.getDeclarationModel().getName() + " to class";
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument doc) {
Value declaration = node.getDeclarationModel();
String name = declaration.getName();
String initialName = Character.toUpperCase(name.charAt(0))+name.substring(1);
TextChange change = new DocumentChange("Convert to Class", doc);
change.setEdit(new MultiTextEdit());
Tree.ObjectDefinition od = (Tree.ObjectDefinition) node;
int dstart = ((CommonToken) od.getMainToken()).getStartIndex();
change.addEdit(new ReplaceEdit(dstart, 6, "class"));
int start = od.getIdentifier().getStartIndex();
int length = od.getIdentifier().getStopIndex()-start+1;
change.addEdit(new ReplaceEdit(start, length, initialName + "()"));
int offset = od.getStopIndex()+1;
//TODO: handle actual object declarations
String mods = declaration.isShared() ? "shared " : "";
String ws = getDefaultLineDelimiter(doc) + getIndent(od, doc);
String impl = " = " + initialName + "();";
String dec = ws + mods + initialName + " " + name;
change.addEdit(new InsertEdit(offset, dec + impl));
try {
change.perform(new NullProgressMonitor());
LinkedPositionGroup group = new LinkedPositionGroup();
group.addPosition(new LinkedPosition(doc, start-1, length, 0));
group.addPosition(new LinkedPosition(doc, offset+ws.length()+mods.length()+1, length, 1));
group.addPosition(new LinkedPosition(doc, offset+dec.length()+4, length, 2));
linkedModeModel.addGroup(group);
enterLinkedMode(doc, -1, start-1);
openPopup();
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
public static void addConvertToClassProposal(Collection<ICompletionProposal> proposals,
Tree.Declaration declaration, CeylonEditor editor) {
if (declaration instanceof Tree.ObjectDefinition) {
ConvertToClassProposal prop =
new ConvertToClassProposal((ObjectDefinition) declaration, editor);
proposals.add(prop);
}
}
@Override
protected String getHintTemplate() {
return "Enter name for new class {0}";
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ConvertToClassProposal.java
|
2,953 |
public final class ReflectionHelper {
private static final ClassLoader THIS_CL = ReflectionHelper.class.getClassLoader();
private static final ConcurrentMap<String, Getter> GETTER_CACHE = new ConcurrentHashMap<String, Getter>(1000);
private static final int INITIAL_CAPACITY = 3;
private ReflectionHelper() {
}
public static AttributeType getAttributeType(Class klass) {
if (klass == String.class) {
return AttributeType.STRING;
} else if (klass == int.class || klass == Integer.class) {
return AttributeType.INTEGER;
} else if (klass == long.class || klass == Long.class) {
return AttributeType.LONG;
} else if (klass == boolean.class || klass == Boolean.class) {
return AttributeType.BOOLEAN;
} else if (klass == double.class || klass == Double.class) {
return AttributeType.DOUBLE;
} else if (klass == BigDecimal.class) {
return AttributeType.BIG_DECIMAL;
} else if (klass == BigInteger.class) {
return AttributeType.BIG_INTEGER;
} else if (klass == float.class || klass == Float.class) {
return AttributeType.FLOAT;
} else if (klass == byte.class || klass == Byte.class) {
return AttributeType.BYTE;
} else if (klass == char.class || klass == Character.class) {
return AttributeType.CHAR;
} else if (klass == Timestamp.class) {
return AttributeType.SQL_TIMESTAMP;
} else if (klass == java.sql.Date.class) {
return AttributeType.SQL_DATE;
} else if (klass == Date.class) {
return AttributeType.DATE;
} else if (klass.isEnum()) {
return AttributeType.ENUM;
}
return null;
}
public static void reset() {
GETTER_CACHE.clear();
}
public static AttributeType getAttributeType(QueryableEntry entry, String attribute) {
return getAttributeType(createGetter(entry, attribute).getReturnType());
}
private static Getter createGetter(QueryableEntry entry, String attribute) {
Object obj;
if (attribute.startsWith(KEY_ATTRIBUTE_NAME)) {
obj = entry.getKey();
if (attribute.length() > KEY_ATTRIBUTE_NAME.length()) {
attribute = attribute.substring(KEY_ATTRIBUTE_NAME.length() + 1);
}
} else {
obj = entry.getValue();
}
Class clazz = obj.getClass();
final String cacheKey = clazz.getName() + ":" + attribute;
Getter getter = GETTER_CACHE.get(cacheKey);
if (getter != null) {
return getter;
}
try {
Getter parent = null;
List<String> possibleMethodNames = new ArrayList<String>(INITIAL_CAPACITY);
for (final String name : attribute.split("\\.")) {
Getter localGetter = null;
possibleMethodNames.clear();
possibleMethodNames.add(name);
final String camelName = Character.toUpperCase(name.charAt(0)) + name.substring(1);
possibleMethodNames.add("get" + camelName);
possibleMethodNames.add("is" + camelName);
if (name.equals(THIS_ATTRIBUTE_NAME)) {
localGetter = new ThisGetter(parent, obj);
} else {
for (String methodName : possibleMethodNames) {
try {
final Method method = clazz.getMethod(methodName);
method.setAccessible(true);
localGetter = new MethodGetter(parent, method);
clazz = method.getReturnType();
break;
} catch (NoSuchMethodException ignored) {
}
}
if (localGetter == null) {
try {
final Field field = clazz.getField(name);
localGetter = new FieldGetter(parent, field);
clazz = field.getType();
} catch (NoSuchFieldException ignored) {
}
}
if (localGetter == null) {
Class c = clazz;
while (!Object.class.equals(c)) {
try {
final Field field = c.getDeclaredField(name);
field.setAccessible(true);
localGetter = new FieldGetter(parent, field);
clazz = field.getType();
break;
} catch (NoSuchFieldException ignored) {
c = c.getSuperclass();
}
}
}
}
if (localGetter == null) {
throw new IllegalArgumentException("There is no suitable accessor for '"
+ name + "' on class '" + clazz + "'");
}
parent = localGetter;
}
getter = parent;
if (getter.isCacheable()) {
Getter foundGetter = GETTER_CACHE.putIfAbsent(cacheKey, getter);
if (foundGetter != null) {
getter = foundGetter;
}
}
return getter;
} catch (Throwable e) {
throw new QueryException(e);
}
}
public static Comparable extractValue(QueryEntry queryEntry, String attributeName, Object object) throws Exception {
return (Comparable) createGetter(queryEntry, attributeName).getValue(object);
}
private abstract static class Getter {
protected final Getter parent;
public Getter(final Getter parent) {
this.parent = parent;
}
abstract Object getValue(Object obj) throws Exception;
abstract Class getReturnType();
abstract boolean isCacheable();
}
static class MethodGetter extends Getter {
final Method method;
MethodGetter(Getter parent, Method method) {
super(parent);
this.method = method;
}
Object getValue(Object obj) throws Exception {
obj = parent != null ? parent.getValue(obj) : obj;
return obj != null ? method.invoke(obj) : null;
}
Class getReturnType() {
return this.method.getReturnType();
}
@Override
boolean isCacheable() {
return THIS_CL.equals(method.getDeclaringClass().getClassLoader());
}
@Override
public String toString() {
return "MethodGetter [parent=" + parent + ", method=" + method.getName() + "]";
}
}
static class FieldGetter extends Getter {
final Field field;
FieldGetter(Getter parent, Field field) {
super(parent);
this.field = field;
}
@Override
Object getValue(Object obj) throws Exception {
obj = parent != null ? parent.getValue(obj) : obj;
return obj != null ? field.get(obj) : null;
}
@Override
Class getReturnType() {
return this.field.getType();
}
@Override
boolean isCacheable() {
return THIS_CL.equals(field.getDeclaringClass().getClassLoader());
}
@Override
public String toString() {
return "FieldGetter [parent=" + parent + ", field=" + field + "]";
}
}
static class ThisGetter extends Getter {
final Object object;
public ThisGetter(final Getter parent, Object object) {
super(parent);
this.object = object;
}
@Override
Object getValue(Object obj) throws Exception {
return obj;
}
@Override
Class getReturnType() {
return this.object.getClass();
}
@Override
boolean isCacheable() {
return false;
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_impl_ReflectionHelper.java
|
38 |
public class TransactionalTitanGraphTestSuite extends TransactionalGraphTestSuite {
public TransactionalTitanGraphTestSuite(final GraphTest graphTest) {
super(graphTest);
}
@Override
public void testCompetingThreads() {
TitanBlueprintsGraph graph = (TitanBlueprintsGraph) graphTest.generateGraph();
//Need to define types before hand to avoid deadlock in transactions
TitanManagement mgmt = graph.getManagementSystem();
mgmt.makeEdgeLabel("friend").make();
mgmt.makePropertyKey("test").dataType(Long.class).make();
mgmt.makePropertyKey("blah").dataType(Float.class).make();
mgmt.makePropertyKey("bloop").dataType(Integer.class).make();
mgmt.commit();
graph.shutdown();
super.testCompetingThreads();
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_blueprints_TransactionalTitanGraphTestSuite.java
|
14 |
final class DescendingKeyIterator extends AbstractEntryIterator<K, V, K> {
DescendingKeyIterator(final OMVRBTreeEntry<K, V> first) {
super(first);
}
public K next() {
return prevEntry().getKey();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
275 |
public class JMSEmailServiceProducerImpl implements JMSEmailServiceProducer {
private JmsTemplate emailServiceTemplate;
private Destination emailServiceDestination;
public void send(@SuppressWarnings("rawtypes") final HashMap props) {
emailServiceTemplate.send(emailServiceDestination, new MessageCreator() {
public Message createMessage(Session session) throws JMSException {
ObjectMessage message = session.createObjectMessage(props);
EmailInfo info = (EmailInfo) props.get(EmailPropertyType.INFO.getType());
message.setJMSPriority(Integer.parseInt(info.getSendAsyncPriority()));
return message;
}
});
}
/**
* @return the emailServiceTemplate
*/
public JmsTemplate getEmailServiceTemplate() {
return emailServiceTemplate;
}
/**
* @param emailServiceTemplate the emailServiceTemplate to set
*/
public void setEmailServiceTemplate(JmsTemplate emailServiceTemplate) {
this.emailServiceTemplate = emailServiceTemplate;
}
/**
* @return the emailServiceDestination
*/
public Destination getEmailServiceDestination() {
return emailServiceDestination;
}
/**
* @param emailServiceDestination the emailServiceDestination to set
*/
public void setEmailServiceDestination(Destination emailServiceDestination) {
this.emailServiceDestination = emailServiceDestination;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_email_service_jms_JMSEmailServiceProducerImpl.java
|
522 |
public class BLCMapUtils {
/**
* Given a collection of values and a TypedClosure that maps an appropriate key for a given value,
* returns a HashMap of the key to the value.
*
* <b>Note: If two values share the same key, the later one will override the previous one in the returned map</b>
* @see #keyedListMap(Iterable, TypedClosure)
*
* List<V> --> Map<K, V>
*
* @param values
* @param closure
* @return the map
*/
public static <K, CV extends Iterable<V>, V> Map<K, V> keyedMap(CV values, TypedClosure<K, V> closure) {
Map<K, V> map = new HashMap<K, V>();
for (V value : values) {
K key = closure.getKey(value);
map.put(key, value);
}
return map;
}
/**
* Given an array of values and a TypedClosure that maps an appropriate key for a given value,
* returns a HashMap of the key to the value.
*
* <b>Note: If two values share the same key, the later one will override the previous one in the returned map</b>
* @see #keyedListMap(Iterable, TypedClosure)
*
* V[] --> Map<K, V>
*
* @param values
* @param closure
* @return the map
*/
public static <K, V> Map<K, V> keyedMap(V[] values, TypedClosure<K, V> closure) {
Map<K, V> map = new HashMap<K, V>();
if (values != null) {
for (V value : values) {
K key = closure.getKey(value);
map.put(key, value);
}
}
return map;
}
/**
* Given a collection of values and a TypedClosure that maps an appropriate key for a given value,
* returns a HashMap of the key to a list of values that map to that key.
*
* @see #keyedMap(Iterable, TypedClosure)
*
* List<V> --> Map<K, List<V>>
*
* @param values
* @param closure
* @return the map
*/
public static <K, CV extends Iterable<V>, V> Map<K, List<V>> keyedListMap(CV values, TypedClosure<K, V> closure) {
Map<K, List<V>> map = new HashMap<K, List<V>>();
for (V value : values) {
K key = closure.getKey(value);
List<V> list = map.get(key);
if (list == null) {
list = new ArrayList<V>();
map.put(key, list);
}
list.add(value);
}
return map;
}
public static <K, V> Map<K, V> valueSortedMap(Map<K, V> map, Comparator<Entry<K, V>> comparator) {
Set<Entry<K, V>> valueSortedEntries = new TreeSet<Entry<K, V>>(comparator);
for (Entry<K, V> entry : map.entrySet()) {
valueSortedEntries.add(entry);
}
Map<K, V> sortedMap = new LinkedHashMap<K, V>(map.size());
for (Entry<K, V> entry : valueSortedEntries) {
sortedMap.put(entry.getKey(), entry.getValue());
}
return sortedMap;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_util_BLCMapUtils.java
|
216 |
public class OConsoleDatabaseListener implements ODatabaseListener {
OConsoleDatabaseApp console;
public OConsoleDatabaseListener(OConsoleDatabaseApp console) {
this.console = console;
}
public void onCreate(ODatabase iDatabase) {
}
public void onDelete(ODatabase iDatabase) {
}
public void onOpen(ODatabase iDatabase) {
}
public void onBeforeTxBegin(ODatabase iDatabase) {
}
public void onBeforeTxRollback(ODatabase iDatabase) {
}
public void onAfterTxRollback(ODatabase iDatabase) {
}
public void onBeforeTxCommit(ODatabase iDatabase) {
}
public void onAfterTxCommit(ODatabase iDatabase) {
}
public void onClose(ODatabase iDatabase) {
}
public boolean onCorruptionRepairDatabase(ODatabase iDatabase, final String iProblem, String iWhatWillbeFixed) {
final String answer = console.ask("\nDatabase seems corrupted:\n> " + iProblem + "\nAuto-repair will execute this action:\n> "
+ iWhatWillbeFixed + "\n\nDo you want to repair it (Y/n)? ");
return answer.length() == 0 || answer.equalsIgnoreCase("Y") || answer.equalsIgnoreCase("Yes");
}
}
| 0true
|
tools_src_main_java_com_orientechnologies_orient_console_OConsoleDatabaseListener.java
|
5,386 |
public class InternalSum extends MetricsAggregation.SingleValue implements Sum {
public final static Type TYPE = new Type("sum");
public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalSum readResult(StreamInput in) throws IOException {
InternalSum result = new InternalSum();
result.readFrom(in);
return result;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
private double sum;
InternalSum() {} // for serialization
InternalSum(String name, double sum) {
super(name);
this.sum = sum;
}
@Override
public double value() {
return sum;
}
public double getValue() {
return sum;
}
@Override
public Type type() {
return TYPE;
}
@Override
public InternalSum reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return (InternalSum) aggregations.get(0);
}
InternalSum reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
reduced = (InternalSum) aggregation;
} else {
reduced.sum += ((InternalSum) aggregation).sum;
}
}
if (reduced != null) {
return reduced;
}
return (InternalSum) aggregations.get(0);
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
valueFormatter = ValueFormatterStreams.readOptional(in);
sum = in.readDouble();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
ValueFormatterStreams.writeOptional(valueFormatter, out);
out.writeDouble(sum);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.field(CommonFields.VALUE, sum);
if (valueFormatter != null) {
builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(sum));
}
builder.endObject();
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_sum_InternalSum.java
|
564 |
public class Hbm2DDLExporterTask extends ExporterTask {
boolean exportToDatabase = true;
boolean scriptToConsole = true;
boolean schemaUpdate = false;
String delimiter = ";";
boolean drop = false;
boolean create = true;
boolean format = false;
private boolean haltOnError = false;
public Hbm2DDLExporterTask(HibernateToolTask parent) {
super(parent);
}
public String getName() {
return "hbm2ddl (Generates database schema)";
}
protected Exporter configureExporter(Exporter exp) {
Hbm2DDLExporter exporter = (Hbm2DDLExporter) exp;
exporter.setExport(exportToDatabase);
exporter.setConsole(scriptToConsole);
exporter.setUpdate(schemaUpdate);
exporter.setDelimiter(delimiter);
exporter.setDrop(drop);
exporter.setCreate(create);
exporter.setFormat(format);
exporter.setOutputFileName(outputFileName);
exporter.setHaltonerror(haltOnError);
return exporter;
}
protected Exporter createExporter() {
Hbm2DDLExporter exporter = new Hbm2DDLExporter(getConfiguration(), getDestdir());
return exporter;
}
public void setExport(boolean export) {
exportToDatabase = export;
}
/**
* Run SchemaUpdate instead of SchemaExport
*/
public void setUpdate(boolean update) {
this.schemaUpdate = update;
}
/**
* Output sql to console ? (default true)
*/
public void setConsole(boolean console) {
this.scriptToConsole = console;
}
/**
* Format the generated sql
*/
public void setFormat(boolean format) {
this.format = format;
}
public void setDrop(boolean drop) {
this.drop = drop;
}
public void setCreate(boolean create) {
this.create = create;
}
public void setDelimiter(String delimiter) {
this.delimiter = delimiter;
}
public String getDelimiter() {
return delimiter;
}
public void setHaltonerror(boolean haltOnError) {
this.haltOnError = haltOnError;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_util_sql_Hbm2DDLExporterTask.java
|
116 |
public class NullPageDTO extends PageDTO {
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_dto_NullPageDTO.java
|
1,511 |
public class FilterRoutingTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(FilterRoutingTests.class);
@Test
public void testClusterFilters() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.include.tag1", "value1,value2")
.put("cluster.routing.allocation.exclude.tag1", "value3,value4")
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding four nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", ImmutableMap.of("tag1", "value1")))
.put(newNode("node2", ImmutableMap.of("tag1", "value2")))
.put(newNode("node3", ImmutableMap.of("tag1", "value3")))
.put(newNode("node4", ImmutableMap.of("tag1", "value4")))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
logger.info("--> start the shards (primaries)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("--> start the shards (replicas)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
assertThat(startedShards.size(), equalTo(4));
for (MutableShardRouting startedShard : startedShards) {
assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
}
}
@Test
public void testIndexFilters() {
AllocationService strategy = createAllocationService(settingsBuilder()
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 1)
.put("index.routing.allocation.include.tag1", "value1,value2")
.put("index.routing.allocation.exclude.tag1", "value3,value4")
.build()))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", ImmutableMap.of("tag1", "value1")))
.put(newNode("node2", ImmutableMap.of("tag1", "value2")))
.put(newNode("node3", ImmutableMap.of("tag1", "value3")))
.put(newNode("node4", ImmutableMap.of("tag1", "value4")))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
logger.info("--> start the shards (primaries)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("--> start the shards (replicas)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
assertThat(startedShards.size(), equalTo(4));
for (MutableShardRouting startedShard : startedShards) {
assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
}
logger.info("--> switch between value2 and value4, shards should be relocating");
metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 1)
.put("index.routing.allocation.include.tag1", "value1,value4")
.put("index.routing.allocation.exclude.tag1", "value2,value3")
.build()))
.build();
clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(2));
logger.info("--> finish relocation");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
assertThat(startedShards.size(), equalTo(4));
for (MutableShardRouting startedShard : startedShards) {
assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node4")));
}
}
}
| 0true
|
src_test_java_org_elasticsearch_cluster_routing_allocation_FilterRoutingTests.java
|
1,270 |
public class OStorageLocal extends OStorageLocalAbstract {
private final int DELETE_MAX_RETRIES;
private final int DELETE_WAIT_TIME;
private final Map<String, OCluster> clusterMap = new LinkedHashMap<String, OCluster>();
private OCluster[] clusters = new OCluster[0];
private ODataLocal[] dataSegments = new ODataLocal[0];
private final OStorageLocalTxExecuter txManager;
private String storagePath;
private final OStorageVariableParser variableParser;
private int defaultClusterId = -1;
private static String[] ALL_FILE_EXTENSIONS = { "ocf", ".och", ".ocl", ".oda", ".odh", ".otx", ".ocs",
".oef", ".oem", OWriteAheadLog.MASTER_RECORD_EXTENSION, OWriteAheadLog.WAL_SEGMENT_EXTENSION,
OLocalHashTableIndexEngine.BUCKET_FILE_EXTENSION, OLocalHashTableIndexEngine.METADATA_FILE_EXTENSION,
OLocalHashTableIndexEngine.TREE_FILE_EXTENSION, OSBTreeIndexEngine.DATA_FILE_EXTENSION };
private long positionGenerator = 1;
private OModificationLock modificationLock = new OModificationLock();
private final Set<String> clustersToSyncImmediately = new HashSet<String>();
public OStorageLocal(final String iName, final String iFilePath, final String iMode) throws IOException {
super(iName, iFilePath, iMode);
File f = new File(url);
if (f.exists() || !exists(f.getParent())) {
// ALREADY EXISTS OR NOT LEGACY
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getPath()));
} else {
// LEGACY DB
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getParent()));
}
storagePath = OIOUtils.getPathFromDatabaseName(storagePath);
variableParser = new OStorageVariableParser(storagePath);
configuration = new OStorageConfigurationSegment(this);
txManager = new OStorageLocalTxExecuter(this, configuration.txSegment);
DELETE_MAX_RETRIES = OGlobalConfiguration.FILE_MMAP_FORCE_RETRY.getValueAsInteger();
DELETE_WAIT_TIME = OGlobalConfiguration.FILE_MMAP_FORCE_DELAY.getValueAsInteger();
final String[] clustersToSync = OGlobalConfiguration.NON_TX_CLUSTERS_SYNC_IMMEDIATELY.getValueAsString().trim()
.split("\\s*,\\s*");
clustersToSyncImmediately.addAll(Arrays.asList(clustersToSync));
installProfilerHooks();
long diskCacheSize = OGlobalConfiguration.DISK_CACHE_SIZE.getValueAsLong() * 1024 * 1024;
long writeCacheSize = (long) Math.floor((((double) OGlobalConfiguration.DISK_WRITE_CACHE_PART.getValueAsInteger()) / 100.0)
* diskCacheSize);
long readCacheSize = diskCacheSize - writeCacheSize;
diskCache = new OReadWriteDiskCache(name, readCacheSize, writeCacheSize,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_TTL.getValueAsLong() * 1000,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_FLUSH_INTERVAL.getValueAsInteger(), this, null, false, true);
}
public synchronized void open(final String iUserName, final String iUserPassword, final Map<String, Object> iProperties) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
addUser();
if (status != STATUS.CLOSED)
// ALREADY OPENED: THIS IS THE CASE WHEN A STORAGE INSTANCE IS
// REUSED
return;
if (!exists())
throw new OStorageException("Cannot open the storage '" + name + "' because it does not exist in path: " + url);
status = STATUS.OPEN;
// OPEN BASIC SEGMENTS
int pos;
pos = registerDataSegment(new OStorageDataConfiguration(configuration, OStorage.DATA_DEFAULT_NAME, 0, getStoragePath()));
dataSegments[pos].open();
addDefaultClusters();
// REGISTER DATA SEGMENT
for (int i = 0; i < configuration.dataSegments.size(); ++i) {
final OStorageDataConfiguration dataConfig = configuration.dataSegments.get(i);
if (dataConfig == null)
continue;
pos = registerDataSegment(dataConfig);
if (pos == -1) {
// CLOSE AND REOPEN TO BE SURE ALL THE FILE SEGMENTS ARE
// OPENED
dataSegments[i].close();
dataSegments[i] = new ODataLocal(this, dataConfig, i);
dataSegments[i].open();
} else
dataSegments[pos].open();
}
// REGISTER CLUSTER
for (int i = 0; i < configuration.clusters.size(); ++i) {
final OStorageClusterConfiguration clusterConfig = configuration.clusters.get(i);
if (clusterConfig != null) {
pos = createClusterFromConfig(clusterConfig);
try {
if (pos == -1) {
// CLOSE AND REOPEN TO BE SURE ALL THE FILE SEGMENTS ARE
// OPENED
if (clusters[i] != null && clusters[i] instanceof OClusterLocal)
clusters[i].close();
clusters[i] = Orient.instance().getClusterFactory().createCluster(OClusterLocal.TYPE);
clusters[i].configure(this, clusterConfig);
clusterMap.put(clusters[i].getName(), clusters[i]);
clusters[i].open();
} else {
if (clusterConfig.getName().equals(CLUSTER_DEFAULT_NAME))
defaultClusterId = pos;
clusters[pos].open();
}
} catch (FileNotFoundException e) {
OLogManager.instance().warn(
this,
"Error on loading cluster '" + clusters[i].getName() + "' (" + i
+ "): file not found. It will be excluded from current database '" + getName() + "'.");
clusterMap.remove(clusters[i].getName());
clusters[i] = null;
}
} else {
clusters = Arrays.copyOf(clusters, clusters.length + 1);
clusters[i] = null;
}
}
if (OGlobalConfiguration.USE_WAL.getValueAsBoolean())
writeAheadLog = new OWriteAheadLog(this);
txManager.open();
} catch (Exception e) {
close(true);
throw new OStorageException("Cannot open local storage '" + url + "' with mode=" + mode, e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".open", "Open a database", timer, "db.*.open");
}
}
public ODiskCache getDiskCache() {
return diskCache;
}
private void addDefaultClusters() throws IOException {
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_INTERNAL_NAME));
configuration.load();
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_INDEX_NAME));
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME));
defaultClusterId = createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
CLUSTER_DEFAULT_NAME));
}
public void create(final Map<String, Object> iProperties) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (status != STATUS.CLOSED)
throw new OStorageException("Cannot create new storage '" + name + "' because it is not closed (status:" + status + ")");
addUser();
final File storageFolder = new File(storagePath);
if (!storageFolder.exists())
storageFolder.mkdir();
if (exists())
throw new OStorageException("Cannot create new storage '" + name + "' because it already exists");
status = STATUS.OPEN;
addDataSegment(OStorage.DATA_DEFAULT_NAME);
addDataSegment(OMetadataDefault.DATASEGMENT_INDEX_NAME);
// ADD THE METADATA CLUSTER TO STORE INTERNAL STUFF
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_INTERNAL_NAME, null, null, true);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF INDEXING IN THE INDEX DATA SEGMENT
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_INDEX_NAME, null,
OMetadataDefault.DATASEGMENT_INDEX_NAME, true);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF INDEXING
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME, null, null, true);
// ADD THE DEFAULT CLUSTER
defaultClusterId = addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), CLUSTER_DEFAULT_NAME, null, null, false);
configuration.create();
writeAheadLog = new OWriteAheadLog(this);
txManager.create();
} catch (OStorageException e) {
close();
throw e;
} catch (IOException e) {
close();
throw new OStorageException("Error on creation of storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".create", "Create a database", timer, "db.*.create");
}
}
public void reload() {
}
public boolean exists() {
return exists(storagePath);
}
private boolean exists(String path) {
return new File(path + "/" + OStorage.DATA_DEFAULT_NAME + ".0" + ODataLocal.DEF_EXTENSION).exists();
}
@Override
public void close(final boolean iForce) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (!checkForClose(iForce))
return;
status = STATUS.CLOSING;
for (OCluster cluster : clusters)
if (cluster != null)
cluster.close();
clusters = new OCluster[0];
clusterMap.clear();
for (ODataLocal data : dataSegments)
if (data != null)
data.close();
dataSegments = new ODataLocal[0];
txManager.close();
if (configuration != null)
configuration.close();
level2Cache.shutdown();
OMMapManagerLocator.getInstance().flush();
super.close(iForce);
uninstallProfilerHooks();
if (diskCache != null)
diskCache.close();
if (writeAheadLog != null)
writeAheadLog.delete();
Orient.instance().unregisterStorage(this);
status = STATUS.CLOSED;
} catch (IOException e) {
OLogManager.instance().error(this, "Error on closing of storage '" + name, e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".close", "Close a database", timer, "db.*.close");
}
}
/**
* Deletes physically all the database files (that ends for ".och", ".ocl", ".oda", ".odh", ".otx"). Tries also to delete the
* container folder if the directory is empty. If files are locked, retry up to 10 times before to raise an exception.
*/
public void delete() {
// CLOSE THE DATABASE BY REMOVING THE CURRENT USER
if (status != STATUS.CLOSED) {
if (getUsers() > 0) {
while (removeUser() > 0)
;
}
}
close(true);
try {
Orient.instance().unregisterStorage(this);
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot unregister storage", e);
}
final long timer = Orient.instance().getProfiler().startChrono();
// GET REAL DIRECTORY
File dbDir = new File(OIOUtils.getPathFromDatabaseName(OSystemVariableResolver.resolveSystemVariables(url)));
if (!dbDir.exists() || !dbDir.isDirectory())
dbDir = dbDir.getParentFile();
lock.acquireExclusiveLock();
try {
if (diskCache != null)
diskCache.delete();
// RETRIES
for (int i = 0; i < DELETE_MAX_RETRIES; ++i) {
if (dbDir.exists() && dbDir.isDirectory()) {
int notDeletedFiles = 0;
// TRY TO DELETE ALL THE FILES
File[] files = dbDir.listFiles();
if (files != null) {
for (File f : files) {
// DELETE ONLY THE SUPPORTED FILES
for (String ext : ALL_FILE_EXTENSIONS)
if (f.getPath().endsWith(ext)) {
if (!f.delete()) {
notDeletedFiles++;
}
break;
}
}
}
if (notDeletedFiles == 0) {
// TRY TO DELETE ALSO THE DIRECTORY IF IT'S EMPTY
dbDir.delete();
return;
}
} else
return;
OLogManager
.instance()
.debug(
this,
"Cannot delete database files because they are still locked by the OrientDB process: waiting %d ms and retrying %d/%d...",
DELETE_WAIT_TIME, i, DELETE_MAX_RETRIES);
// FORCE FINALIZATION TO COLLECT ALL THE PENDING BUFFERS
OMemoryWatchDog.freeMemoryForResourceCleanup(DELETE_WAIT_TIME);
}
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ". Database files seem locked");
} catch (IOException ioe) {
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ".", ioe);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".drop", "Drop a database", timer, "db.*.drop");
}
}
public boolean check(final boolean iVerbose, final OCommandOutputListener iListener) {
int errors = 0;
int warnings = 0;
lock.acquireSharedLock();
try {
long totalRecors = 0;
final long start = System.currentTimeMillis();
formatMessage(iVerbose, iListener, "\nChecking database '" + getName() + "'...\n");
formatMessage(iVerbose, iListener, "\n(1) Checking data-clusters. This activity checks if pointers to data are coherent.");
final OPhysicalPosition ppos = new OPhysicalPosition();
// BROWSE ALL THE CLUSTERS
for (OCluster c : clusters) {
if (!(c instanceof OClusterLocal))
continue;
formatMessage(iVerbose, iListener, "\n- data-cluster #%-5d %s -> ", c.getId(), c.getName());
// BROWSE ALL THE RECORDS
for (final OClusterEntryIterator it = c.absoluteIterator(); it.hasNext();) {
final OPhysicalPosition physicalPosition = it.next();
totalRecors++;
try {
if (physicalPosition.dataSegmentId >= dataSegments.length) {
formatMessage(iVerbose, iListener, "WARN: Found wrong data segment %d ", physicalPosition.dataSegmentId);
warnings++;
}
if (physicalPosition.recordSize < 0) {
formatMessage(iVerbose, iListener, "WARN: Found wrong record size %d ", physicalPosition.recordSize);
warnings++;
}
if (physicalPosition.recordSize >= 1000000) {
formatMessage(iVerbose, iListener, "WARN: Found suspected big record size %d. Is it corrupted? ",
physicalPosition.recordSize);
warnings++;
}
if (physicalPosition.dataSegmentPos > dataSegments[physicalPosition.dataSegmentId].getFilledUpTo()) {
formatMessage(iVerbose, iListener, "WARN: Found wrong pointer to data chunk %d out of data segment size (%d) ",
physicalPosition.dataSegmentPos, dataSegments[physicalPosition.dataSegmentId].getFilledUpTo());
warnings++;
}
if (physicalPosition.recordVersion.isTombstone() && (c instanceof OClusterLocal)) {
// CHECK IF THE HOLE EXISTS
boolean found = false;
int tot = ((OClusterLocal) c).holeSegment.getHoles();
for (int i = 0; i < tot; ++i) {
final long recycledPosition = ((OClusterLocal) c).holeSegment.getEntryPosition(i) / OClusterLocal.RECORD_SIZE;
if (recycledPosition == physicalPosition.clusterPosition.longValue()) {
// FOUND
found = true;
break;
}
}
if (!found) {
formatMessage(iVerbose, iListener, "WARN: Cannot find hole for deleted record %d:%d ", c.getId(),
physicalPosition.clusterPosition);
warnings++;
}
}
} catch (IOException e) {
formatMessage(iVerbose, iListener, "WARN: Error while reading record #%d:%d ", e, c.getId(), ppos.clusterPosition);
warnings++;
}
}
if (c instanceof OClusterLocal) {
final int totalHoles = ((OClusterLocal) c).holeSegment.getHoles();
if (totalHoles > 0) {
formatMessage(iVerbose, iListener, " [found " + totalHoles + " hole(s)]");
// CHECK HOLES
for (int i = 0; i < totalHoles; ++i) {
long recycledPosition = -1;
try {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(((OClusterLocal) c).holeSegment.getEntryPosition(i)
/ OClusterLocal.RECORD_SIZE);
OPhysicalPosition physicalPosition = c.getPhysicalPosition(ppos);
if (physicalPosition != null && !physicalPosition.recordVersion.isTombstone()) {
formatMessage(iVerbose, iListener,
"WARN: Found wrong hole %d/%d for deleted record %d:%d. The record seems good ", i, totalHoles - 1,
c.getId(), recycledPosition);
warnings++;
}
} catch (Exception e) {
formatMessage(iVerbose, iListener, "WARN: Found wrong hole %d/%d for deleted record %d:%d. The record not exists ",
i, totalHoles - 1, c.getId(), recycledPosition);
warnings++;
}
}
}
}
formatMessage(iVerbose, iListener, "OK");
}
int totalChunks = 0;
formatMessage(iVerbose, iListener,
"\n\n(2) Checking data chunks integrity. In this phase data segments are scanned to check the back reference into the clusters.");
for (ODataLocal d : dataSegments) {
if (d == null)
continue;
formatMessage(iVerbose, iListener, "\n- data-segment %s (id=%d) size=%d/%d...", d.getName(), d.getId(), d.getFilledUpTo(),
d.getSize(), d.getHoles());
int nextPos = 0;
// GET DATA-SEGMENT HOLES
final List<ODataHoleInfo> holes = d.getHolesList();
if (iVerbose) {
formatMessage(iVerbose, iListener, "\n-- found %d holes:", holes.size());
for (ODataHoleInfo hole : holes)
formatMessage(iVerbose, iListener, "\n--- hole #%-7d offset=%-10d size=%-7d", hole.holeOffset, hole.dataOffset,
hole.size);
}
// CHECK CHUNKS
formatMessage(iVerbose, iListener, "\n-- checking chunks:");
int pos;
do {
try {
pos = nextPos;
// SEARCH IF THE RECORD IT'S BETWEEN HOLES
ODataHoleInfo foundHole = null;
for (ODataHoleInfo hole : holes) {
if (hole.dataOffset == pos) {
// HOLE FOUND!
foundHole = hole;
break;
}
}
int recordSize = d.getRecordSize(pos);
formatMessage(iVerbose, iListener, "\n--- chunk #%-7d offset=%-10d size=%-7d -> ", totalChunks, pos, recordSize);
if (recordSize < 0) {
recordSize *= -1;
// HOLE: CHECK HOLE PRESENCE
if (foundHole != null) {
if (foundHole.size != recordSize) {
formatMessage(iVerbose, iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) differs in size with the hole size %d ", d.getName(), totalChunks,
pos, recordSize, foundHole.size);
warnings++;
}
nextPos = pos + foundHole.size;
} else {
formatMessage(iVerbose, iListener, "WARN: Chunk %s:%d (offset=%d size=%d) has no hole for deleted chunk ",
d.getName(), totalChunks, pos, recordSize);
warnings++;
nextPos = pos + recordSize;
}
} else {
if (foundHole != null) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) it's between the holes (hole #%d) even if has no negative recordSize. Jump the content ",
d.getName(), totalChunks, pos, recordSize, foundHole.holeOffset);
warnings++;
nextPos = pos + foundHole.size;
} else {
// REGULAR DATA CHUNK
nextPos = pos + ODataLocal.RECORD_FIX_SIZE + recordSize;
final byte[] buffer = d.getRecord(pos);
if (buffer.length != recordSize) {
formatMessage(iVerbose, iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has wrong record size because the record length is %d ", d.getName(),
totalChunks, pos, recordSize, buffer.length);
warnings++;
}
final ORecordId rid = d.getRecordRid(pos);
if (!rid.isValid()) {
formatMessage(iVerbose, iListener, "WARN: Chunk %s:%d (offset=%d size=%d) points to invalid RID %s ",
d.getName(), totalChunks, pos, recordSize, rid);
warnings++;
} else {
if (rid.clusterId >= clusters.length) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has invalid RID because points to %s but configured clusters are %d in total ",
d.getName(), totalChunks, pos, recordSize, rid, clusters.length);
warnings++;
} else if (clusters[rid.clusterId] == null) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has invalid RID because points to %s but the cluster %d not exists ",
d.getName(), totalChunks, pos, recordSize, rid, rid.clusterId);
warnings++;
} else {
ppos.clusterPosition = rid.clusterPosition;
clusters[rid.clusterId].getPhysicalPosition(ppos);
if (ppos.dataSegmentId != d.getId()) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) point to the RID %d but it doesn't point to current data segment %d but to %d ",
d.getName(), totalChunks, pos, recordSize, rid, d.getId(), ppos.dataSegmentId);
warnings++;
}
if (ppos.dataSegmentPos != pos) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) point to the RID %d but it doesn't point to current chunk %d but to %d ",
d.getName(), totalChunks, pos, recordSize, rid, ppos.dataSegmentPos, pos);
warnings++;
}
}
}
}
}
totalChunks++;
formatMessage(iVerbose, iListener, "OK");
} catch (Exception e) {
iListener.onMessage("ERROR: " + e.toString());
// OLogManager.instance().warn(this, "ERROR: Chunk %s:%d (offset=%d) error: %s", e, d.getName(),
// totalChunks, pos, e.toString());
errors++;
}
} while (nextPos < d.getFilledUpTo());
formatMessage(iVerbose, iListener, "\n");
}
iListener.onMessage("\nCheck of database completed in " + (System.currentTimeMillis() - start)
+ "ms:\n- Total records checked: " + totalRecors + "\n- Total chunks checked.: " + totalChunks
+ "\n- Warnings.............: " + warnings + "\n- Errors...............: " + errors + "\n");
} finally {
lock.releaseSharedLock();
}
return errors == 0;
}
public ODataLocal getDataSegmentById(final int iDataSegmentId) {
checkOpeness();
lock.acquireSharedLock();
try {
if (iDataSegmentId >= dataSegments.length)
throw new IllegalArgumentException("Data segment #" + iDataSegmentId + " does not exist in database '" + name + "'");
return dataSegments[iDataSegmentId];
} finally {
lock.releaseSharedLock();
}
}
public int getDataSegmentIdByName(final String iDataSegmentName) {
if (iDataSegmentName == null)
return 0;
checkOpeness();
lock.acquireSharedLock();
try {
for (ODataLocal d : dataSegments) {
if (d != null && d.getName().equalsIgnoreCase(iDataSegmentName))
return d.getId();
}
throw new IllegalArgumentException("Data segment '" + iDataSegmentName + "' does not exist in database '" + name + "'");
} finally {
lock.releaseSharedLock();
}
}
/**
* Add a new data segment in the default segment directory and with filename equals to the cluster name.
*/
public int addDataSegment(final String iDataSegmentName) {
return addDataSegment(iDataSegmentName, null);
}
public int addDataSegment(String iSegmentName, final String iDirectory) {
checkOpeness();
iSegmentName = iSegmentName.toLowerCase();
lock.acquireExclusiveLock();
try {
final OStorageDataConfiguration conf = new OStorageDataConfiguration(configuration, iSegmentName, -1, iDirectory);
final int pos = registerDataSegment(conf);
if (pos == -1)
throw new OConfigurationException("Cannot add segment " + conf.name + " because it is already part of storage '" + name
+ "'");
dataSegments[pos].create(-1);
// UPDATE CONFIGURATION
conf.id = pos;
if (pos == configuration.dataSegments.size())
configuration.dataSegments.add(conf);
else
configuration.dataSegments.set(pos, conf);
configuration.update();
return pos;
} catch (Throwable e) {
OLogManager.instance().error(this, "Error on creation of new data segment '" + iSegmentName + "' in: " + iDirectory, e,
OStorageException.class);
return -1;
} finally {
lock.releaseExclusiveLock();
}
}
/**
* Add a new cluster into the storage. Type can be: "physical" or "logical".
*/
public int addCluster(final String iClusterType, String iClusterName, final String iLocation, final String iDataSegmentName,
boolean forceListBased, final Object... iParameters) {
checkOpeness();
lock.acquireExclusiveLock();
try {
final OCluster cluster;
if (iClusterName != null) {
iClusterName = iClusterName.toLowerCase();
// FIND THE FIRST AVAILABLE CLUSTER ID
int clusterPos = clusters.length;
for (int i = 0; i < clusters.length; ++i)
if (clusters[i] == null) {
clusterPos = i;
break;
}
cluster = Orient.instance().getClusterFactory().createCluster(iClusterType);
cluster.configure(this, clusterPos, iClusterName, iLocation, getDataSegmentIdByName(iDataSegmentName), iParameters);
} else
cluster = null;
final int clusterId = registerCluster(cluster);
if (cluster != null) {
cluster.create(-1);
configuration.update();
}
return clusterId;
} catch (Exception e) {
OLogManager.instance().exception("Error in creation of new cluster '" + iClusterName + "' of type: " + iClusterType, e,
OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return -1;
}
public int addCluster(String iClusterType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
boolean forceListBased, Object... iParameters) {
throw new UnsupportedOperationException("This operation is unsupported for " + getType()
+ " storage. If you are doing import please use parameter -preserveClusterIDs=false .");
}
public ODataLocal[] getDataSegments() {
return dataSegments;
}
public OStorageLocalTxExecuter getTxManager() {
return txManager;
}
public boolean dropCluster(final int iClusterId, final boolean iTruncate) {
lock.acquireExclusiveLock();
try {
if (iClusterId < 0 || iClusterId >= clusters.length)
throw new IllegalArgumentException("Cluster id '" + iClusterId + "' is outside the of range of configured clusters (0-"
+ (clusters.length - 1) + ") in database '" + name + "'");
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return false;
getLevel2Cache().freeCluster(iClusterId);
if (iTruncate)
cluster.truncate();
cluster.delete();
clusterMap.remove(cluster.getName());
clusters[iClusterId] = null;
// UPDATE CONFIGURATION
configuration.dropCluster(iClusterId);
return true;
} catch (Exception e) {
OLogManager.instance().exception("Error while removing cluster '" + iClusterId + "'", e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return false;
}
public boolean dropDataSegment(final String iName) {
lock.acquireExclusiveLock();
try {
final int id = getDataSegmentIdByName(iName);
final ODataLocal data = dataSegments[id];
if (data == null)
return false;
data.drop();
dataSegments[id] = null;
// UPDATE CONFIGURATION
configuration.dropDataSegment(id);
return true;
} catch (Exception e) {
OLogManager.instance().exception("Error while removing data segment '" + iName + "'", e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return false;
}
public long count(final int[] iClusterIds) {
return count(iClusterIds, false);
}
@Override
public long count(int[] iClusterIds, boolean countTombstones) {
checkOpeness();
lock.acquireSharedLock();
try {
long tot = 0;
for (int iClusterId : iClusterIds) {
if (iClusterId >= clusters.length)
throw new OConfigurationException("Cluster id " + iClusterId + " was not found in database '" + name + "'");
if (iClusterId > -1) {
final OCluster c = clusters[iClusterId];
if (c != null)
tot += c.getEntries() - (countTombstones ? 0L : c.getTombstonesCount());
}
}
return tot;
} finally {
lock.releaseSharedLock();
}
}
public OClusterPosition[] getClusterDataRange(final int iClusterId) {
if (iClusterId == -1)
return new OClusterPosition[] { OClusterPosition.INVALID_POSITION, OClusterPosition.INVALID_POSITION };
checkOpeness();
lock.acquireSharedLock();
try {
return clusters[iClusterId] != null ? new OClusterPosition[] { clusters[iClusterId].getFirstPosition(),
clusters[iClusterId].getLastPosition() } : new OClusterPosition[0];
} catch (IOException ioe) {
throw new OStorageException("Can not retrieve information about data range", ioe);
} finally {
lock.releaseSharedLock();
}
}
public long count(final int iClusterId) {
return count(iClusterId, false);
}
@Override
public long count(int iClusterId, boolean countTombstones) {
if (iClusterId == -1)
throw new OStorageException("Cluster Id " + iClusterId + " is invalid in database '" + name + "'");
// COUNT PHYSICAL CLUSTER IF ANY
checkOpeness();
lock.acquireSharedLock();
try {
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return 0;
if (countTombstones)
return cluster.getEntries();
return cluster.getEntries() - cluster.getTombstonesCount();
} finally {
lock.releaseSharedLock();
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(int iDataSegmentId, final ORecordId iRid, final byte[] iContent,
final ORecordVersion iRecordVersion, final byte iRecordType, final int iMode, ORecordCallback<OClusterPosition> iCallback) {
checkOpeness();
final OCluster cluster = getClusterById(iRid.clusterId);
final ODataLocal dataSegment = getDataSegmentById(iDataSegmentId);
final OPhysicalPosition ppos;
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (txManager.isCommitting()) {
ppos = txManager.createRecord(txManager.getCurrentTransaction().getId(), dataSegment, cluster, iRid, iContent,
iRecordVersion, iRecordType);
} else {
ppos = createRecord(dataSegment, cluster, iContent, iRecordType, iRid, iRecordVersion);
if (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean()
|| clustersToSyncImmediately.contains(cluster.getName()))
synchRecordUpdate(cluster, ppos);
if (iCallback != null)
iCallback.call(iRid, ppos.clusterPosition);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
return new OStorageOperationResult<OPhysicalPosition>(ppos);
}
public boolean updateReplica(final int dataSegmentId, final ORecordId rid, final byte[] content,
final ORecordVersion recordVersion, final byte recordType) throws IOException {
if (rid.isNew())
throw new OStorageException("Passed record with id " + rid + " is new and can not be treated as replica.");
checkOpeness();
final OCluster cluster = getClusterById(rid.clusterId);
final ODataLocal dataSegment = getDataSegmentById(dataSegmentId);
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (ppos == null) {
if (!cluster.isHashBased())
throw new OStorageException("Cluster with LH support is required.");
ppos = new OPhysicalPosition(rid.clusterPosition, recordVersion);
ppos.recordType = recordType;
ppos.dataSegmentId = dataSegment.getId();
if (!recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
}
cluster.addPhysicalPosition(ppos);
return true;
} else {
if (ppos.recordType != recordType)
throw new OStorageException("Record types of provided and stored replicas are different " + recordType + ":"
+ ppos.recordType + ".");
if (ppos.recordVersion.compareTo(recordVersion) < 0) {
cluster.updateVersion(ppos.clusterPosition, recordVersion);
if (!recordVersion.isTombstone() && !ppos.recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.setRecord(ppos.dataSegmentPos, rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, dataSegmentId, ppos.dataSegmentPos);
} else if (!recordVersion.isTombstone() && ppos.recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, dataSegmentId, ppos.dataSegmentPos);
} else if (recordVersion.isTombstone() && !ppos.recordVersion.isTombstone()) {
dataSegment.deleteRecord(ppos.dataSegmentPos);
}
return true;
}
}
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
return false;
}
@Override
public <V> V callInLock(Callable<V> iCallable, boolean iExclusiveLock) {
if (iExclusiveLock) {
modificationLock.requestModificationLock();
try {
return super.callInLock(iCallable, iExclusiveLock);
} finally {
modificationLock.releaseModificationLock();
}
} else {
return super.callInLock(iCallable, iExclusiveLock);
}
}
@Override
public <V> V callInRecordLock(Callable<V> callable, ORID rid, boolean exclusiveLock) {
if (exclusiveLock)
modificationLock.requestModificationLock();
try {
if (exclusiveLock)
lock.acquireExclusiveLock();
else
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, exclusiveLock ? LOCK.EXCLUSIVE : LOCK.SHARED);
try {
return callable.call();
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, exclusiveLock ? LOCK.EXCLUSIVE : LOCK.SHARED);
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new OException("Error on nested call in lock", e);
} finally {
if (exclusiveLock)
lock.releaseExclusiveLock();
else
lock.releaseSharedLock();
}
} finally {
if (exclusiveLock)
modificationLock.releaseModificationLock();
}
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRid, final String iFetchPlan, boolean iIgnoreCache,
ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
checkOpeness();
return new OStorageOperationResult<ORawBuffer>(readRecord(getClusterById(iRid.clusterId), iRid, true, loadTombstones));
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId iRid, final byte[] iContent,
final ORecordVersion iVersion, final byte iRecordType, final int iMode, ORecordCallback<ORecordVersion> iCallback) {
checkOpeness();
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
final OCluster cluster = getClusterById(iRid.clusterId);
if (txManager.isCommitting()) {
return new OStorageOperationResult<ORecordVersion>(txManager.updateRecord(txManager.getCurrentTransaction().getId(),
cluster, iRid, iContent, iVersion, iRecordType));
} else {
final OPhysicalPosition ppos = updateRecord(cluster, iRid, iContent, iVersion, iRecordType);
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final ORecordVersion returnValue = (ppos != null ? ppos.recordVersion : OVersionFactory.instance()
.createUntrackedVersion());
if (iCallback != null)
iCallback.call(iRid, returnValue);
return new OStorageOperationResult<ORecordVersion>(returnValue);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRid, final ORecordVersion iVersion, final int iMode,
ORecordCallback<Boolean> iCallback) {
checkOpeness();
final OCluster cluster = getClusterById(iRid.clusterId);
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (txManager.isCommitting()) {
return new OStorageOperationResult<Boolean>(txManager.deleteRecord(txManager.getCurrentTransaction().getId(), cluster,
iRid.clusterPosition, iVersion));
} else {
final OPhysicalPosition ppos = deleteRecord(cluster, iRid, iVersion,
OGlobalConfiguration.STORAGE_USE_TOMBSTONES.getValueAsBoolean());
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final boolean returnValue = ppos != null;
if (iCallback != null)
iCallback.call(iRid, returnValue);
return new OStorageOperationResult<Boolean>(returnValue);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public Set<String> getClusterNames() {
checkOpeness();
lock.acquireSharedLock();
try {
return clusterMap.keySet();
} finally {
lock.releaseSharedLock();
}
}
public int getClusterIdByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
if (iClusterName.length() == 0)
throw new IllegalArgumentException("Cluster name is empty");
if (Character.isDigit(iClusterName.charAt(0)))
return Integer.parseInt(iClusterName);
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getId();
} finally {
lock.releaseSharedLock();
}
return -1;
}
public String getClusterTypeByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getType();
} finally {
lock.releaseSharedLock();
}
return null;
}
public void commit(final OTransaction iTx, Runnable callback) {
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
try {
startStorageTx(iTx);
txManager.clearLogEntries(iTx);
txManager.commitAllPendingRecords(iTx);
if (callback != null)
callback.run();
if (OGlobalConfiguration.TX_COMMIT_SYNCH.getValueAsBoolean())
synch();
endStorageTx();
} catch (Exception e) {
// WE NEED TO CALL ROLLBACK HERE, IN THE LOCK
OLogManager.instance().debug(this, "Error during transaction commit, transaction will be rolled back (tx-id=%d)", e,
iTx.getId());
rollback(iTx);
if (e instanceof OException)
throw ((OException) e);
else
throw new OStorageException("Error during transaction commit.", e);
} finally {
try {
txManager.clearLogEntries(iTx);
if (writeAheadLog != null)
writeAheadLog.shrinkTill(writeAheadLog.end());
} catch (Exception e) {
// XXX WHAT CAN WE DO HERE ? ROLLBACK IS NOT POSSIBLE
// IF WE THROW EXCEPTION, A ROLLBACK WILL BE DONE AT DB LEVEL BUT NOT AT STORAGE LEVEL
OLogManager.instance().error(this, "Clear tx log entries failed", e);
}
}
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public void rollback(final OTransaction iTx) {
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
txManager.getTxSegment().rollback(iTx);
rollbackStorageTx();
if (OGlobalConfiguration.TX_COMMIT_SYNCH.getValueAsBoolean())
synch();
} catch (IOException ioe) {
OLogManager.instance().error(this,
"Error executing rollback for transaction with id '" + iTx.getId() + "' cause: " + ioe.getMessage(), ioe);
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public void synch() {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.synch();
for (ODataLocal data : dataSegments)
if (data != null)
data.synch();
if (configuration != null)
configuration.synch();
} catch (IOException e) {
throw new OStorageException("Error on synch storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".synch", "Synch a database", timer, "db.*.synch");
}
}
protected void synchRecordUpdate(final OCluster cluster, final OPhysicalPosition ppos) {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
cluster.synch();
final ODataLocal data = getDataSegmentById(ppos.dataSegmentId);
data.synch();
if (configuration != null)
configuration.synch();
} catch (IOException e) {
throw new OStorageException("Error on synch storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler()
.stopChrono("db." + name + "record.synch", "Synch a record to database", timer, "db.*.record.synch");
}
}
/**
* Returns the list of holes as pair of position & ODataHoleInfo
*
*/
public List<ODataHoleInfo> getHolesList() {
final List<ODataHoleInfo> holes = new ArrayList<ODataHoleInfo>();
lock.acquireSharedLock();
try {
for (ODataLocal d : dataSegments)
if (d != null)
holes.addAll(d.getHolesList());
return holes;
} finally {
lock.releaseSharedLock();
}
}
/**
* Returns the total number of holes.
*
*/
public long getHoles() {
lock.acquireSharedLock();
try {
long holes = 0;
for (ODataLocal d : dataSegments)
if (d != null)
holes += d.getHoles();
return holes;
} finally {
lock.releaseSharedLock();
}
}
/**
* Returns the total size used by holes
*
*/
public long getHoleSize() {
lock.acquireSharedLock();
try {
final List<ODataHoleInfo> holes = getHolesList();
long size = 0;
for (ODataHoleInfo h : holes)
if (h.dataOffset > -1 && h.size > 0)
size += h.size;
return size;
} finally {
lock.releaseSharedLock();
}
}
public void setDefaultClusterId(final int defaultClusterId) {
this.defaultClusterId = defaultClusterId;
}
public String getPhysicalClusterNameById(final int iClusterId) {
checkOpeness();
if (iClusterId < 0)
return null;
lock.acquireSharedLock();
try {
if (iClusterId >= clusters.length)
return null;
return clusters[iClusterId] != null ? clusters[iClusterId].getName() : null;
} finally {
lock.releaseSharedLock();
}
}
@Override
public OStorageConfiguration getConfiguration() {
return configuration;
}
public int getDefaultClusterId() {
return defaultClusterId;
}
public OCluster getClusterById(int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId == ORID.CLUSTER_ID_INVALID)
// GET THE DEFAULT CLUSTER
iClusterId = defaultClusterId;
checkClusterSegmentIndexRange(iClusterId);
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterId + " is null");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
@Override
public OCluster getClusterByName(final String iClusterName) {
lock.acquireSharedLock();
try {
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterName + " does not exist in database '" + name + "'");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
@Override
public String getURL() {
return OEngineLocal.NAME + ":" + url;
}
public long getSize() {
lock.acquireSharedLock();
try {
long size = 0;
for (OCluster c : clusters)
if (c != null)
size += c.getRecordsSize();
return size;
} catch (IOException ioe) {
throw new OStorageException("Can not calculate records size");
} finally {
lock.releaseSharedLock();
}
}
public String getStoragePath() {
return storagePath;
}
public String getMode() {
return mode;
}
public OStorageVariableParser getVariableParser() {
return variableParser;
}
public int getClusters() {
lock.acquireSharedLock();
try {
return clusterMap.size();
} finally {
lock.releaseSharedLock();
}
}
public Set<OCluster> getClusterInstances() {
final Set<OCluster> result = new HashSet<OCluster>();
lock.acquireSharedLock();
try {
// ADD ALL THE CLUSTERS
for (OCluster c : clusters)
if (c != null)
result.add(c);
} finally {
lock.releaseSharedLock();
}
return result;
}
/**
* Method that completes the cluster rename operation. <strong>IT WILL NOT RENAME A CLUSTER, IT JUST CHANGES THE NAME IN THE
* INTERNAL MAPPING</strong>
*/
public void renameCluster(final String iOldName, final String iNewName) {
clusterMap.put(iNewName, clusterMap.remove(iOldName));
}
protected int registerDataSegment(final OStorageDataConfiguration iConfig) throws IOException {
checkOpeness();
// CHECK FOR DUPLICATION OF NAMES
for (ODataLocal data : dataSegments)
if (data != null && data.getName().equals(iConfig.name)) {
// OVERWRITE CONFIG
data.config = iConfig;
return -1;
}
int pos = -1;
for (int i = 0; i < dataSegments.length; ++i)
if (dataSegments[i] == null) {
// RECYCLE POSITION
pos = i;
break;
}
if (pos == -1)
// ASSIGN LATEST
pos = dataSegments.length;
// CREATE AND ADD THE NEW REF SEGMENT
final ODataLocal segment = new ODataLocal(this, iConfig, pos);
if (pos == dataSegments.length)
dataSegments = OArrays.copyOf(dataSegments, dataSegments.length + 1);
dataSegments[pos] = segment;
return pos;
}
/**
* Create the cluster by reading the configuration received as argument and register it assigning it the higher serial id.
*
* @param iConfig
* A OStorageClusterConfiguration implementation, namely physical or logical
* @return The id (physical position into the array) of the new cluster just created. First is 0.
* @throws IOException
*/
private int createClusterFromConfig(final OStorageClusterConfiguration iConfig) throws IOException {
OCluster cluster = clusterMap.get(iConfig.getName());
if (cluster instanceof OClusterLocal && iConfig instanceof OStorageEHClusterConfiguration)
clusterMap.remove(iConfig.getName());
else if (cluster != null) {
if (cluster instanceof OClusterLocal) {
// ALREADY CONFIGURED, JUST OVERWRITE CONFIG
cluster.configure(this, iConfig);
}
return -1;
}
cluster = Orient.instance().getClusterFactory().createCluster(iConfig);
cluster.configure(this, iConfig);
return registerCluster(cluster);
}
/**
* Register the cluster internally.
*
* @param iCluster
* OCluster implementation
* @return The id (physical position into the array) of the new cluster just created. First is 0.
* @throws IOException
*/
private int registerCluster(final OCluster iCluster) throws IOException {
final int id;
if (iCluster != null) {
// CHECK FOR DUPLICATION OF NAMES
if (clusterMap.containsKey(iCluster.getName()))
throw new OConfigurationException("Cannot add segment '" + iCluster.getName()
+ "' because it is already registered in database '" + name + "'");
// CREATE AND ADD THE NEW REF SEGMENT
clusterMap.put(iCluster.getName(), iCluster);
id = iCluster.getId();
} else
id = clusters.length;
clusters = OArrays.copyOf(clusters, clusters.length + 1);
clusters[id] = iCluster;
return id;
}
private void checkClusterSegmentIndexRange(final int iClusterId) {
if (iClusterId > clusters.length - 1)
throw new IllegalArgumentException("Cluster segment #" + iClusterId + " does not exist in database '" + name + "'");
}
protected OPhysicalPosition createRecord(final ODataLocal dataSegment, final OCluster cluster, final byte[] content,
final byte recordType, final ORecordId rid, final ORecordVersion recordVersion) {
assert (lock.assertExclusiveLockHold());
checkOpeness();
if (content == null)
throw new IllegalArgumentException("Record is null");
final long timer = Orient.instance().getProfiler().startChrono();
final OPhysicalPosition ppos = new OPhysicalPosition(-1, -1, recordType);
if (cluster.isHashBased()) {
if (rid.isNew()) {
if (OGlobalConfiguration.USE_NODE_ID_CLUSTER_POSITION.getValueAsBoolean()) {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.generateUniqueClusterPosition();
} else {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(positionGenerator++);
}
} else {
ppos.clusterPosition = rid.clusterPosition;
}
}
try {
if (!cluster.addPhysicalPosition(ppos))
throw new OStorageException("Record with given id " + new ORecordId(rid.clusterId, ppos.clusterPosition)
+ " already exists.");
rid.clusterPosition = ppos.clusterPosition;
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
ppos.dataSegmentId = dataSegment.getId();
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, ppos.dataSegmentId, ppos.dataSegmentPos);
if (recordVersion.getCounter() > 0 && recordVersion.compareTo(ppos.recordVersion) != 0) {
// OVERWRITE THE VERSION
cluster.updateVersion(rid.clusterPosition, recordVersion);
ppos.recordVersion = recordVersion;
}
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} catch (IOException ioe) {
try {
if (ppos.clusterPosition != null && ppos.clusterPosition.compareTo(OClusterPosition.INVALID_POSITION) != 0)
cluster.removePhysicalPosition(ppos.clusterPosition);
} catch (IOException e) {
OLogManager.instance().error(this, "Error on removing physical position in cluster: " + cluster, e);
}
OLogManager.instance().error(this, "Error on creating record in cluster: " + cluster, ioe);
return null;
} finally {
Orient.instance().getProfiler().stopChrono(PROFILER_CREATE_RECORD, "Create a record in database", timer, "db.*.createRecord");
}
}
@Override
protected ORawBuffer readRecord(final OCluster iClusterSegment, final ORecordId iRid, boolean iAtomicLock, boolean loadTombstones) {
if (!iRid.isPersistent())
throw new IllegalArgumentException("Cannot read record " + iRid + " since the position is invalid in database '" + name
+ '\'');
// NOT FOUND: SEARCH IT IN THE STORAGE
final long timer = Orient.instance().getProfiler().startChrono();
// GET LOCK ONLY IF IT'S IN ATOMIC-MODE (SEE THE PARAMETER iAtomicLock)
// USUALLY BROWSING OPERATIONS (QUERY) AVOID ATOMIC LOCKING
// TO IMPROVE PERFORMANCES BY LOCKING THE ENTIRE CLUSTER FROM THE
// OUTSIDE.
if (iAtomicLock)
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), iRid, LOCK.SHARED);
try {
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(iRid.clusterPosition));
if (ppos != null && loadTombstones && ppos.recordVersion.isTombstone())
return new ORawBuffer(null, ppos.recordVersion, ppos.recordType);
if (ppos == null || !checkForRecordValidity(ppos))
// DELETED
return null;
final ODataLocal data = getDataSegmentById(ppos.dataSegmentId);
return new ORawBuffer(data.getRecord(ppos.dataSegmentPos), ppos.recordVersion, ppos.recordType);
} finally {
lockManager.releaseLock(Thread.currentThread(), iRid, LOCK.SHARED);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on reading record " + iRid + " (cluster: " + iClusterSegment + ')', e);
return null;
} finally {
if (iAtomicLock)
lock.releaseSharedLock();
Orient.instance().getProfiler().stopChrono(PROFILER_READ_RECORD, "Read a record from database", timer, "db.*.readRecord");
}
}
protected OPhysicalPosition updateRecord(final OCluster iClusterSegment, final ORecordId rid, final byte[] recordContent,
final ORecordVersion recordVersion, final byte iRecordType) {
assert (lock.assertExclusiveLockHold());
if (iClusterSegment == null)
throw new OStorageException("Cluster not defined for record: " + rid);
final long timer = Orient.instance().getProfiler().startChrono();
try {
// GET THE SHARED LOCK AND GET AN EXCLUSIVE LOCK AGAINST THE RECORD
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
// UPDATE IT
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (!checkForRecordValidity(ppos))
return null;
// VERSION CONTROL CHECK
switch (recordVersion.getCounter()) {
// DOCUMENT UPDATE, NO VERSION CONTROL
case -1:
ppos.recordVersion.increment();
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
break;
// DOCUMENT UPDATE, NO VERSION CONTROL, NO VERSION UPDATE
case -2:
break;
default:
// MVCC CONTROL AND RECORD UPDATE OR WRONG VERSION VALUE
if (recordVersion.getCounter() > -1) {
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (!recordVersion.equals(ppos.recordVersion))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(rid, ppos.recordVersion, recordVersion, ORecordOperation.UPDATED);
ppos.recordVersion.increment();
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
} else {
// DOCUMENT ROLLBACKED
recordVersion.clearRollbackMode();
ppos.recordVersion.copyFrom(recordVersion);
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
}
}
if (ppos.recordType != iRecordType)
iClusterSegment.updateRecordType(rid.clusterPosition, iRecordType);
final long newDataSegmentOffset;
if (ppos.dataSegmentPos == -1)
// WAS EMPTY FIRST TIME, CREATE IT NOW
newDataSegmentOffset = getDataSegmentById(ppos.dataSegmentId).addRecord(rid, recordContent);
else
newDataSegmentOffset = getDataSegmentById(ppos.dataSegmentId).setRecord(ppos.dataSegmentPos, rid, recordContent);
if (newDataSegmentOffset != ppos.dataSegmentPos) {
// UPDATE DATA SEGMENT OFFSET WITH THE NEW PHYSICAL POSITION
iClusterSegment.updateDataSegmentPosition(ppos.clusterPosition, ppos.dataSegmentId, newDataSegmentOffset);
ppos.dataSegmentPos = newDataSegmentOffset;
}
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on updating record " + rid + " (cluster: " + iClusterSegment + ")", e);
} finally {
Orient.instance().getProfiler().stopChrono(PROFILER_UPDATE_RECORD, "Update a record to database", timer, "db.*.updateRecord");
}
return null;
}
protected OPhysicalPosition deleteRecord(final OCluster iClusterSegment, final ORecordId iRid, final ORecordVersion iVersion,
boolean useTombstones) {
assert (lock.assertExclusiveLockHold());
final long timer = Orient.instance().getProfiler().startChrono();
try {
lockManager.acquireLock(Thread.currentThread(), iRid, LOCK.EXCLUSIVE);
try {
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(iRid.clusterPosition));
if (ppos == null || ppos.dataSegmentId < 0 || (useTombstones && ppos.recordVersion.isTombstone()))
// ALREADY DELETED
return null;
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (iVersion.getCounter() > -1 && !ppos.recordVersion.equals(iVersion))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(iRid, ppos.recordVersion, iVersion, ORecordOperation.DELETED);
if (!ppos.recordVersion.isTombstone() && ppos.dataSegmentPos > -1) {
try {
getDataSegmentById(ppos.dataSegmentId).deleteRecord(ppos.dataSegmentPos);
} catch (OIOException e) {
OLogManager.instance().error(this, "Cannot remove the record in data segment, however remove it from cluster", e);
}
}
if (useTombstones && iClusterSegment.hasTombstonesSupport())
iClusterSegment.convertToTombstone(iRid.clusterPosition);
else
iClusterSegment.removePhysicalPosition(iRid.clusterPosition);
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), iRid, LOCK.EXCLUSIVE);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on deleting record " + iRid + "( cluster: " + iClusterSegment + ")", e);
} finally {
Orient.instance().getProfiler()
.stopChrono(PROFILER_DELETE_RECORD, "Delete a record from database", timer, "db.*.deleteRecord");
}
return null;
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
final OCluster cluster = getClusterById(recordId.clusterId);
modificationLock.requestModificationLock();
try {
final OPhysicalPosition ppos = deleteRecord(cluster, recordId, recordVersion, false);
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final boolean returnValue = ppos != null;
if (callback != null)
callback.call(recordId, returnValue);
return returnValue;
} finally {
modificationLock.releaseModificationLock();
}
}
private void installProfilerHooks() {
Orient
.instance()
.getProfiler()
.registerHookValue("db." + name + ".data.holes", "Number of the holes in database", METRIC_TYPE.COUNTER,
new OProfilerHookValue() {
public Object getValue() {
return getHoles();
}
}, "db.*.data.holes");
Orient
.instance()
.getProfiler()
.registerHookValue("db." + name + ".data.holeSize", "Size of the holes in database", METRIC_TYPE.SIZE,
new OProfilerHookValue() {
public Object getValue() {
return getHoleSize();
}
}, "db.*.data.holeSize");
}
private void uninstallProfilerHooks() {
Orient.instance().getProfiler().unregisterHookValue("db." + name + ".data.holes");
Orient.instance().getProfiler().unregisterHookValue("db." + name + ".data.holeSize");
}
private void formatMessage(final boolean iVerbose, final OCommandOutputListener iListener, final String iMessage,
final Object... iArgs) {
if (iVerbose)
iListener.onMessage(String.format(iMessage, iArgs));
}
public void freeze(boolean throwException) {
modificationLock.prohibitModifications(throwException);
synch();
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.setSoftlyClosed(true);
for (ODataLocal data : dataSegments)
if (data != null)
data.setSoftlyClosed(true);
if (configuration != null)
configuration.setSoftlyClosed(true);
} catch (IOException e) {
throw new OStorageException("Error on freeze storage '" + name + "'", e);
}
}
public void release() {
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.setSoftlyClosed(false);
for (ODataLocal data : dataSegments)
if (data != null)
data.setSoftlyClosed(false);
if (configuration != null)
configuration.setSoftlyClosed(false);
} catch (IOException e) {
throw new OStorageException("Error on release storage '" + name + "'", e);
}
modificationLock.allowModifications();
}
public boolean wasClusterSoftlyClosed(String clusterName) {
final OCluster indexCluster = clusterMap.get(clusterName);
return !(indexCluster instanceof OClusterLocal) || ((OClusterLocal) indexCluster).isSoftlyClosed();
}
@Override
public String getType() {
return OEngineLocal.NAME;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OStorageLocal.java
|
180 |
public class OByteBufferUtilsTest {
private ByteBuffer buffer1;
private ByteBuffer buffer2;
@BeforeMethod
public void setUp() throws Exception {
buffer1 = ByteBuffer.allocate(10);
buffer2 = ByteBuffer.allocate(10);
}
@Test
public void testSplitShort() throws Exception {
short var = 42;
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitShortToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
short storedVar = OByteBufferUtils.mergeShortFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 251;
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitShortToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeShortFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSplitLong() throws Exception {
long var = 42;
buffer1.position(3);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(3);
buffer2.position(0);
long storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(4);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(4);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(5);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(5);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(6);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(6);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 2512513332512512344l;
buffer1.position(3);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(3);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(4);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(4);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(5);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(5);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(6);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(6);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSplitInt() throws Exception {
int var = 42;
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
int storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 251251333;
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSpecialSplitShort() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 1);
ByteBuffer part2 = ByteBuffer.wrap(array, 1, 1);
ByteBuffer all = ByteBuffer.wrap(array, 0, 2);
short value = Short.MAX_VALUE;
OByteBufferUtils.splitShortToBuffers(part1, part2, value);
all.position(0);
short storedValue = all.getShort();
assertEquals(value, storedValue);
}
@Test
public void testSpecialSplitInteger() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 2);
ByteBuffer part2 = ByteBuffer.wrap(array, 2, 2);
ByteBuffer all = ByteBuffer.wrap(array, 0, 4);
int value = Integer.MAX_VALUE;
OByteBufferUtils.splitIntToBuffers(part1, part2, value);
all.position(0);
int storedValue = all.getInt();
assertEquals(value, storedValue);
}
@Test
public void testSpecialSplitLong() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 4);
ByteBuffer part2 = ByteBuffer.wrap(array, 4, 4);
ByteBuffer all = ByteBuffer.wrap(array, 0, 8);
long value = Long.MAX_VALUE;
OByteBufferUtils.splitLongToBuffers(part1, part2, value);
all.position(0);
long storedValue = all.getLong();
assertEquals(value, storedValue);
}
}
| 0true
|
core_src_test_java_com_orientechnologies_common_util_OByteBufferUtilsTest.java
|
387 |
public class OMultiValueChangeTimeLine<K, V> {
private final List<OMultiValueChangeEvent<K, V>> multiValueChangeEvents = new ArrayList<OMultiValueChangeEvent<K, V>>();
/**
* @return <code>List</code> of all operations that were performed on collection starting from
* the time when it was loaded from DB.
*/
public List<OMultiValueChangeEvent<K, V>> getMultiValueChangeEvents() {
return Collections.unmodifiableList(multiValueChangeEvents);
}
/**
* Add new operation that was performed on collection to collection history.
*
* @param changeEvent Description of operation that was performed on collection.
*/
public void addCollectionChangeEvent(OMultiValueChangeEvent<K, V> changeEvent) {
multiValueChangeEvents.add(changeEvent);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_record_OMultiValueChangeTimeLine.java
|
237 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientExecutorServiceSubmitTest {
static final int CLUSTER_SIZE = 3;
static HazelcastInstance instance1;
static HazelcastInstance instance2;
static HazelcastInstance instance3;
static HazelcastInstance client;
@BeforeClass
public static void init() {
instance1 = Hazelcast.newHazelcastInstance();
instance2 = Hazelcast.newHazelcastInstance();
instance3 = Hazelcast.newHazelcastInstance();
client = HazelcastClient.newHazelcastClient();
}
@AfterClass
public static void destroy() {
client.shutdown();
Hazelcast.shutdownAll();
}
@Test(expected = NullPointerException.class)
public void testSubmitCallableNullTask() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
Callable callable = null;
final Future<String> f = service.submit(callable);
}
@Test
public void testSubmitCallableToMember() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final Callable getUuidCallable = new GetMemberUuidTask();
final Member member = instance2.getCluster().getLocalMember();
final Future<String> result = service.submitToMember(getUuidCallable, member);
assertEquals(member.getUuid(), result.get());
}
@Test
public void testSubmitCallableToMembers() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final Callable getUuidCallable = new GetMemberUuidTask();
final Collection collection = instance2.getCluster().getMembers();
final Map<Member, Future<String>> map = service.submitToMembers(getUuidCallable, collection);
for (Member member : map.keySet()) {
final Future<String> result = map.get(member);
String uuid = result.get();
assertEquals(member.getUuid(), uuid);
}
}
@Test
public void testSubmitCallable_withMemberSelector() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final MemberSelector selectAll = new SelectAllMembers();
final Future<String> f = service.submit(callable, selectAll);
assertEquals(msg + AppendCallable.APPENDAGE, f.get());
}
@Test
public void testSubmitCallableToMembers_withMemberSelector() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final Callable getUuidCallable = new GetMemberUuidTask();
final MemberSelector selectAll = new SelectAllMembers();
final Map<Member, Future<String>> map = service.submitToMembers(getUuidCallable, selectAll);
for (Member member : map.keySet()) {
final Future<String> result = map.get(member);
String uuid = result.get();
assertEquals(member.getUuid(), uuid);
}
}
@Test
public void submitCallableToAllMembers() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final Map<Member, Future<String>> map = service.submitToAllMembers(callable);
for (Member member : map.keySet()) {
final Future<String> result = map.get(member);
assertEquals(msg + AppendCallable.APPENDAGE, result.get());
}
}
@Test
public void submitRunnableToMember_withExecutionCallback() {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
final Member member = instance2.getCluster().getLocalMember();
final CountDownLatch responseLatch = new CountDownLatch(1);
service.submitToMember(runnable, member, new ExecutionCallback() {
public void onResponse(Object response) {
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
Map map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertEquals(1, map.size());
}
@Test
public void submitRunnableToMembers_withMultiExecutionCallback() {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
final Collection collection = instance2.getCluster().getMembers();
final CountDownLatch responseLatch = new CountDownLatch(CLUSTER_SIZE);
final CountDownLatch completeLatch = new CountDownLatch(1);
service.submitToMembers(runnable, collection, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
responseLatch.countDown();
}
public void onComplete(Map<Member, Object> values) {
completeLatch.countDown();
}
});
Map map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertOpenEventually(completeLatch);
assertEquals(CLUSTER_SIZE, map.size());
}
@Test
public void testSubmitCallableToMember_withExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final Callable getUuidCallable = new GetMemberUuidTask();
final Member member = instance2.getCluster().getLocalMember();
final CountDownLatch responseLatch = new CountDownLatch(1);
final AtomicReference<Object> result = new AtomicReference<Object>();
service.submitToMember(getUuidCallable, member, new ExecutionCallback() {
@Override
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
@Override
public void onFailure(Throwable t) {
}
});
assertOpenEventually(responseLatch);
assertEquals(member.getUuid(), result.get());
}
/**
* fails randomly.
* Example stack trace is here:
* https://hazelcast-l337.ci.cloudbees.com/job/Hazelcast-3.x-OpenJDK7/com.hazelcast$hazelcast-client/133/testReport/com.hazelcast.client.executor/ClientExecutorServiceSubmitTest/submitCallableToMember_withMultiExecutionCallback/
*/
@Test
@Category(ProblematicTest.class)
public void submitCallableToMember_withMultiExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final CountDownLatch responseLatch = new CountDownLatch(CLUSTER_SIZE);
final CountDownLatch completeLatch = new CountDownLatch(CLUSTER_SIZE);
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final Collection collection = instance2.getCluster().getMembers();
service.submitToMembers(callable, collection, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
if (value.equals(msg + AppendCallable.APPENDAGE)) {
responseLatch.countDown();
}
}
public void onComplete(Map<Member, Object> values) {
for (Member member : values.keySet()) {
Object value = values.get(member);
if (value.equals(msg + AppendCallable.APPENDAGE)) {
completeLatch.countDown();
}
}
}
});
assertOpenEventually(responseLatch);
assertOpenEventually(completeLatch);
}
@Test
public void submitRunnable_withExecutionCallback() {
final IExecutorService service = client.getExecutorService(randomString());
final CountDownLatch responseLatch = new CountDownLatch(1);
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
final MemberSelector selector = new SelectAllMembers();
service.submit(runnable, selector, new ExecutionCallback() {
public void onResponse(Object response) {
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
IMap map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertEquals(1, map.size());
}
@Test
public void submitRunnableToMembers_withExecutionCallback() {
final IExecutorService service = client.getExecutorService(randomString());
final CountDownLatch responseLatch = new CountDownLatch(CLUSTER_SIZE);
final CountDownLatch completeLatch = new CountDownLatch(1);
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
final MemberSelector selector = new SelectAllMembers();
service.submitToMembers(runnable, selector, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
responseLatch.countDown();
}
public void onComplete(Map<Member, Object> values) {
completeLatch.countDown();
}
});
IMap map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertOpenEventually(completeLatch);
assertEquals(CLUSTER_SIZE, map.size());
}
@Test
public void submitCallable_withExecutionCallback() {
final IExecutorService service = client.getExecutorService(randomString());
final CountDownLatch responseLatch = new CountDownLatch(1);
final String msg = randomString();
final Callable runnable = new AppendCallable(msg);
final MemberSelector selector = new SelectAllMembers();
final AtomicReference<Object> result = new AtomicReference<Object>();
service.submit(runnable, selector, new ExecutionCallback() {
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
assertOpenEventually(responseLatch);
assertEquals(msg + AppendCallable.APPENDAGE, result.get());
}
@Test
public void submitCallableToMembers_withExecutionCallback() {
final IExecutorService service = client.getExecutorService(randomString());
final CountDownLatch responseLatch = new CountDownLatch(CLUSTER_SIZE);
final CountDownLatch completeLatch = new CountDownLatch(1);
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final MemberSelector selector = new SelectAllMembers();
service.submitToMembers(callable, selector, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
if (value.equals(msg + AppendCallable.APPENDAGE)) {
responseLatch.countDown();
}
}
public void onComplete(Map<Member, Object> values) {
completeLatch.countDown();
}
});
assertOpenEventually(responseLatch);
assertOpenEventually(completeLatch);
}
@Test
public void submitRunnableToAllMembers_withMultiExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final CountDownLatch responseLatch = new CountDownLatch(CLUSTER_SIZE);
final CountDownLatch completeLatch = new CountDownLatch(1);
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
service.submitToAllMembers(runnable, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
responseLatch.countDown();
}
public void onComplete(Map<Member, Object> values) {
completeLatch.countDown();
}
});
IMap map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertOpenEventually(completeLatch);
assertEquals(CLUSTER_SIZE, map.size());
}
@Test
public void submitCallableToAllMembers_withMultiExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final CountDownLatch responseLatch = new CountDownLatch(CLUSTER_SIZE);
final CountDownLatch completeLatch = new CountDownLatch(CLUSTER_SIZE);
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
service.submitToAllMembers(callable, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
if (value.equals(msg + AppendCallable.APPENDAGE)) {
responseLatch.countDown();
}
}
public void onComplete(Map<Member, Object> values) {
for (Member member : values.keySet()) {
Object value = values.get(member);
if (value.equals(msg + AppendCallable.APPENDAGE)) {
completeLatch.countDown();
}
}
}
});
assertOpenEventually(responseLatch);
assertOpenEventually(completeLatch);
}
@Test
public void submitRunnable() {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
service.submit(runnable);
final IMap map = client.getMap(mapName);
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertEquals(1, map.size());
}
});
}
@Test
public void testSubmitRunnable_WithResult() throws ExecutionException, InterruptedException {
IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final Object givenResult = "givenResult";
final Future future = service.submit(new MapPutRunnable(mapName), givenResult);
final Object result = future.get();
final IMap map = client.getMap(mapName);
assertEquals(givenResult, result);
assertEquals(1, map.size());
}
@Test
public void testSubmitCallable() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final Future result = service.submit(callable);
assertEquals(msg + AppendCallable.APPENDAGE, result.get());
}
@Test
public void testSubmitRunnable_withExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
final CountDownLatch responseLatch = new CountDownLatch(1);
service.submit(runnable, new ExecutionCallback() {
public void onResponse(Object response) {
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
IMap map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertEquals(1, map.size());
}
@Test
public void testSubmitCallable_withExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final AtomicReference result = new AtomicReference();
final CountDownLatch responseLatch = new CountDownLatch(1);
service.submit(callable, new ExecutionCallback() {
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
assertOpenEventually(responseLatch);
assertEquals(msg + AppendCallable.APPENDAGE, result.get());
}
@Test
public void submitCallableToKeyOwner() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final Future<String> result = service.submitToKeyOwner(callable, "key");
assertEquals(msg + AppendCallable.APPENDAGE, result.get());
}
@Test
public void submitRunnableToKeyOwner() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final Runnable runnable = new MapPutRunnable(mapName);
final CountDownLatch responseLatch = new CountDownLatch(1);
service.submitToKeyOwner(runnable, "key", new ExecutionCallback() {
public void onResponse(Object response) {
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
IMap map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertEquals(1, map.size());
}
@Test
public void submitCallableToKeyOwner_withExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String msg = randomString();
final Callable callable = new AppendCallable(msg);
final CountDownLatch responseLatch = new CountDownLatch(1);
final AtomicReference result = new AtomicReference();
service.submitToKeyOwner(callable, "key", new ExecutionCallback() {
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
assertOpenEventually(responseLatch, 5);
assertEquals(msg + AppendCallable.APPENDAGE, result.get());
}
@Test
public void submitRunnablePartitionAware() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final String key = HazelcastTestSupport.generateKeyOwnedBy(instance2);
final Member member = instance2.getCluster().getLocalMember();
//this task should execute on a node owning the given key argument,
//the action is to put the UUid of the executing node into a map with the given name
final Runnable runnable = new MapPutPartitionAwareRunnable(mapName, key);
final CountDownLatch responseLatch = new CountDownLatch(1);
service.submit(runnable);
final IMap map = client.getMap(mapName);
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertTrue(map.containsKey(member.getUuid()));
}
});
}
@Test
public void submitRunnablePartitionAware_withResult() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String expectedResult = "result";
final String mapName = randomString();
final String key = HazelcastTestSupport.generateKeyOwnedBy(instance2);
final Member member = instance2.getCluster().getLocalMember();
final Runnable runnable = new MapPutPartitionAwareRunnable(mapName, key);
Future result = service.submit(runnable, expectedResult);
final IMap map = client.getMap(mapName);
assertEquals(expectedResult, result.get());
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertTrue(map.containsKey(member.getUuid()));
}
});
}
@Test
public void submitRunnablePartitionAware_withExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String expectedResult = "result";
final String mapName = randomString();
final String key = HazelcastTestSupport.generateKeyOwnedBy(instance2);
final Member member = instance2.getCluster().getLocalMember();
final Runnable runnable = new MapPutPartitionAwareRunnable(mapName, key);
final CountDownLatch responseLatch = new CountDownLatch(1);
service.submit(runnable, new ExecutionCallback() {
@Override
public void onResponse(Object response) {
responseLatch.countDown();
}
@Override
public void onFailure(Throwable t) {
}
});
final IMap map = client.getMap(mapName);
assertOpenEventually(responseLatch);
assertTrue(map.containsKey(member.getUuid()));
}
@Test
public void submitCallablePartitionAware() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final IMap map = client.getMap(mapName);
final String key = HazelcastTestSupport.generateKeyOwnedBy(instance2);
final Member member = instance2.getCluster().getLocalMember();
final Callable runnable = new MapPutPartitionAwareCallable(mapName, key);
final Future result = service.submit(runnable);
assertEquals(member.getUuid(), result.get());
assertTrue(map.containsKey(member.getUuid()));
}
@Test
public void submitCallablePartitionAware_WithExecutionCallback() throws Exception {
final IExecutorService service = client.getExecutorService(randomString());
final String mapName = randomString();
final IMap map = client.getMap(mapName);
final String key = HazelcastTestSupport.generateKeyOwnedBy(instance2);
final Member member = instance2.getCluster().getLocalMember();
final Callable runnable = new MapPutPartitionAwareCallable(mapName, key);
final AtomicReference result = new AtomicReference();
final CountDownLatch responseLatch = new CountDownLatch(1);
service.submit(runnable, new ExecutionCallback() {
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
assertOpenEventually(responseLatch);
assertEquals(member.getUuid(), result.get());
assertTrue(map.containsKey(member.getUuid()));
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
121 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166e_ForkJoinTask.java
|
5,210 |
public class DateHistogramParser implements Aggregator.Parser {
private final ImmutableMap<String, DateTimeUnit> dateFieldUnits;
public DateHistogramParser() {
dateFieldUnits = MapBuilder.<String, DateTimeUnit>newMapBuilder()
.put("year", DateTimeUnit.YEAR_OF_CENTURY)
.put("1y", DateTimeUnit.YEAR_OF_CENTURY)
.put("quarter", DateTimeUnit.QUARTER)
.put("1q", DateTimeUnit.QUARTER)
.put("month", DateTimeUnit.MONTH_OF_YEAR)
.put("1M", DateTimeUnit.MONTH_OF_YEAR)
.put("week", DateTimeUnit.WEEK_OF_WEEKYEAR)
.put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR)
.put("day", DateTimeUnit.DAY_OF_MONTH)
.put("1d", DateTimeUnit.DAY_OF_MONTH)
.put("hour", DateTimeUnit.HOUR_OF_DAY)
.put("1h", DateTimeUnit.HOUR_OF_DAY)
.put("minute", DateTimeUnit.MINUTES_OF_HOUR)
.put("1m", DateTimeUnit.MINUTES_OF_HOUR)
.put("second", DateTimeUnit.SECOND_OF_MINUTE)
.put("1s", DateTimeUnit.SECOND_OF_MINUTE)
.immutableMap();
}
@Override
public String type() {
return InternalDateHistogram.TYPE.name();
}
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
String field = null;
String script = null;
String scriptLang = null;
Map<String, Object> scriptParams = null;
boolean keyed = false;
long minDocCount = 1;
InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;
String interval = null;
boolean preZoneAdjustLargeInterval = false;
DateTimeZone preZone = DateTimeZone.UTC;
DateTimeZone postZone = DateTimeZone.UTC;
String format = null;
long preOffset = 0;
long postOffset = 0;
boolean assumeSorted = false;
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("field".equals(currentFieldName)) {
field = parser.text();
} else if ("script".equals(currentFieldName)) {
script = parser.text();
} else if ("lang".equals(currentFieldName)) {
scriptLang = parser.text();
} else if ("time_zone".equals(currentFieldName) || "timeZone".equals(currentFieldName)) {
preZone = parseZone(parser, token);
} else if ("pre_zone".equals(currentFieldName) || "preZone".equals(currentFieldName)) {
preZone = parseZone(parser, token);
} else if ("pre_zone_adjust_large_interval".equals(currentFieldName) || "preZoneAdjustLargeInterval".equals(currentFieldName)) {
preZoneAdjustLargeInterval = parser.booleanValue();
} else if ("post_zone".equals(currentFieldName) || "postZone".equals(currentFieldName)) {
postZone = parseZone(parser, token);
} else if ("pre_offset".equals(currentFieldName) || "preOffset".equals(currentFieldName)) {
preOffset = parseOffset(parser.text());
} else if ("post_offset".equals(currentFieldName) || "postOffset".equals(currentFieldName)) {
postOffset = parseOffset(parser.text());
} else if ("interval".equals(currentFieldName)) {
interval = parser.text();
} else if ("format".equals(currentFieldName)) {
format = parser.text();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
if ("keyed".equals(currentFieldName)) {
keyed = parser.booleanValue();
} else if ("script_values_sorted".equals(currentFieldName)) {
assumeSorted = parser.booleanValue();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.VALUE_NUMBER) {
if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) {
minDocCount = parser.longValue();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("params".equals(currentFieldName)) {
scriptParams = parser.map();
} else if ("order".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
String dir = parser.text();
boolean asc = "asc".equals(dir);
order = resolveOrder(currentFieldName, asc);
//TODO should we throw an error if the value is not "asc" or "desc"???
}
}
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else {
throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
}
}
if (interval == null) {
throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
}
SearchScript searchScript = null;
if (script != null) {
searchScript = context.scriptService().search(context.lookup(), scriptLang, script, scriptParams);
config.script(searchScript);
}
if (!assumeSorted) {
// we need values to be sorted and unique for efficiency
config.ensureSorted(true);
}
TimeZoneRounding.Builder tzRoundingBuilder;
DateTimeUnit dateTimeUnit = dateFieldUnits.get(interval);
if (dateTimeUnit != null) {
tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit);
} else {
// the interval is a time value?
tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.parseTimeValue(interval, null));
}
TimeZoneRounding rounding = tzRoundingBuilder
.preZone(preZone).postZone(postZone)
.preZoneAdjustLargeInterval(preZoneAdjustLargeInterval)
.preOffset(preOffset).postOffset(postOffset)
.build();
if (format != null) {
config.formatter(new ValueFormatter.DateTime(format));
}
if (field == null) {
if (searchScript != null) {
ValueParser valueParser = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER, DateFieldMapper.Defaults.TIME_UNIT));
config.parser(valueParser);
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
// falling back on the get field data context
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
FieldMapper<?> mapper = context.smartNameFieldMapper(field);
if (mapper == null) {
config.unmapped(true);
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
if (!(mapper instanceof DateFieldMapper)) {
throw new SearchParseException(context, "date histogram can only be aggregated on date fields but [" + field + "] is not a date field");
}
IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
config.fieldContext(new FieldContext(field, indexFieldData));
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
private static InternalOrder resolveOrder(String key, boolean asc) {
if ("_key".equals(key) || "_time".equals(key)) {
return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);
}
if ("_count".equals(key)) {
return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);
}
int i = key.indexOf('.');
if (i < 0) {
return new InternalOrder.Aggregation(key, null, asc);
}
return new InternalOrder.Aggregation(key.substring(0, i), key.substring(i + 1), asc);
}
private long parseOffset(String offset) throws IOException {
if (offset.charAt(0) == '-') {
return -TimeValue.parseTimeValue(offset.substring(1), null).millis();
}
int beginIndex = offset.charAt(0) == '+' ? 1 : 0;
return TimeValue.parseTimeValue(offset.substring(beginIndex), null).millis();
}
private DateTimeZone parseZone(XContentParser parser, XContentParser.Token token) throws IOException {
if (token == XContentParser.Token.VALUE_NUMBER) {
return DateTimeZone.forOffsetHours(parser.intValue());
} else {
String text = parser.text();
int index = text.indexOf(':');
if (index != -1) {
int beginIndex = text.charAt(0) == '+' ? 1 : 0;
// format like -02:30
return DateTimeZone.forOffsetHoursMinutes(
Integer.parseInt(text.substring(beginIndex, index)),
Integer.parseInt(text.substring(index + 1))
);
} else {
// id, listed here: http://joda-time.sourceforge.net/timezones.html
return DateTimeZone.forID(text);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_histogram_DateHistogramParser.java
|
1,540 |
public class PathMap {
public static final String CLASS = Tokens.makeNamespace(PathMap.class) + ".class";
public enum Counters {
VERTICES_PROCESSED,
OUT_EDGES_PROCESSED
}
public static Configuration createConfiguration(final Class<? extends Element> klass) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
configuration.setBoolean(Tokens.TITAN_HADOOP_PIPELINE_TRACK_PATHS, true);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, Text> {
private boolean isVertex;
private final Text textWritable = new Text();
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.outputs = new SafeMapperOutputs(context);
if (!context.getConfiguration().getBoolean(Tokens.TITAN_HADOOP_PIPELINE_TRACK_PATHS, false))
throw new IllegalStateException(PathMap.class.getSimpleName() + " requires that paths be enabled");
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
if (this.isVertex && value.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : value.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : edge.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_PathMap.java
|
142 |
public final class GenericError implements Portable {
private String name;
private String message;
private String details;
private int type;
public GenericError() {
}
public GenericError(String name, String message, int type) {
this(name, message, null, type);
}
public GenericError(String name, String message, String details, int type) {
this.name = name;
this.message = message;
this.details = details;
this.type = type;
}
@Override
public int getFactoryId() {
return ClientPortableHook.ID;
}
@Override
public int getClassId() {
return ClientPortableHook.GENERIC_ERROR;
}
public String getName() {
return name;
}
public String getMessage() {
return message;
}
public String getDetails() {
return details;
}
public int getType() {
return type;
}
@Override
public void writePortable(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
writer.writeUTF("m", message);
writer.writeUTF("d", details);
writer.writeInt("t", type);
}
@Override
public void readPortable(PortableReader reader) throws IOException {
name = reader.readUTF("n");
message = reader.readUTF("m");
details = reader.readUTF("d");
type = reader.readInt("t");
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("GenericError{");
sb.append("message='").append(message).append('\'');
sb.append(", type=").append(type);
sb.append('}');
return sb.toString();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_GenericError.java
|
2,609 |
static class PingRequest extends TransportRequest {
// the (assumed) node id we are pinging
private String nodeId;
PingRequest() {
}
PingRequest(String nodeId) {
this.nodeId = nodeId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodeId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(nodeId);
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_NodesFaultDetection.java
|
1,124 |
public class NativeConstantForLoopScoreScript extends AbstractSearchScript {
public static final String NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE = "native_constant_for_loop_script_score";
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeConstantForLoopScoreScript(params);
}
}
private NativeConstantForLoopScoreScript(Map<String, Object> params) {
}
@Override
public Object run() {
float score = 0;
for (int i = 0; i < 10; i++) {
score += Math.log(2);
}
return score;
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_scripts_score_script_NativeConstantForLoopScoreScript.java
|
1,174 |
NETTY {
@Override
public Transport newTransport(Settings settings, ThreadPool threadPool) {
return new NettyTransport(settings, threadPool, new NetworkService(ImmutableSettings.EMPTY), Version.CURRENT);
}
};
| 0true
|
src_test_java_org_elasticsearch_benchmark_transport_TransportBenchmark.java
|
540 |
public class DeleteMappingRequest extends AcknowledgedRequest<DeleteMappingRequest> {
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String[] types;
DeleteMappingRequest() {
}
/**
* Constructs a new delete mapping request against one or more indices. If nothing is set then
* it will be executed against all indices.
*/
public DeleteMappingRequest(String... indices) {
this.indices = indices;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (CollectionUtils.isEmpty(types)) {
validationException = addValidationError("mapping type is missing", validationException);
} else {
validationException = checkForEmptyString(validationException, types);
}
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("index is missing", validationException);
} else {
validationException = checkForEmptyString(validationException, indices);
}
return validationException;
}
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
boolean containsEmptyString = false;
for (String string : strings) {
if (!Strings.hasText(string)) {
containsEmptyString = true;
}
}
if (containsEmptyString) {
validationException = addValidationError("types must not contain empty strings", validationException);
}
return validationException;
}
/**
* Sets the indices this delete mapping operation will execute on.
*/
public DeleteMappingRequest indices(String[] indices) {
this.indices = indices;
return this;
}
/**
* The indices the mappings will be removed from.
*/
public String[] indices() {
return indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public DeleteMappingRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* The mapping types.
*/
public String[] types() {
return types;
}
/**
* The type of the mappings to remove.
*/
public DeleteMappingRequest types(String... types) {
this.types = types;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
types = in.readStringArray();
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
out.writeStringArrayNullable(types);
writeTimeout(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_DeleteMappingRequest.java
|
212 |
public final class CustomPostingsHighlighter extends XPostingsHighlighter {
private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
private static final Passage[] EMPTY_PASSAGE = new Passage[0];
private final CustomPassageFormatter passageFormatter;
private final int noMatchSize;
private final int totalContentLength;
private final String[] fieldValues;
private final int[] fieldValuesOffsets;
private int currentValueIndex = 0;
private BreakIterator breakIterator;
public CustomPostingsHighlighter(CustomPassageFormatter passageFormatter, List<Object> fieldValues, boolean mergeValues, int maxLength, int noMatchSize) {
super(maxLength);
this.passageFormatter = passageFormatter;
this.noMatchSize = noMatchSize;
if (mergeValues) {
String rawValue = Strings.collectionToDelimitedString(fieldValues, String.valueOf(getMultiValuedSeparator("")));
String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
this.fieldValues = new String[]{fieldValue};
this.fieldValuesOffsets = new int[]{0};
this.totalContentLength = fieldValue.length();
} else {
this.fieldValues = new String[fieldValues.size()];
this.fieldValuesOffsets = new int[fieldValues.size()];
int contentLength = 0;
int offset = 0;
int previousLength = -1;
for (int i = 0; i < fieldValues.size(); i++) {
String rawValue = fieldValues.get(i).toString();
String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
this.fieldValues[i] = fieldValue;
contentLength += fieldValue.length();
offset += previousLength + 1;
this.fieldValuesOffsets[i] = offset;
previousLength = fieldValue.length();
}
this.totalContentLength = contentLength;
}
}
/*
Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object
*/
public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException {
IndexReader reader = searcher.getIndexReader();
IndexReaderContext readerContext = reader.getContext();
List<AtomicReaderContext> leaves = readerContext.leaves();
String[] contents = new String[]{loadCurrentFieldValue()};
Map<Integer, Object> snippetsMap = highlightField(field, contents, getBreakIterator(field), terms, new int[]{docId}, leaves, maxPassages);
//increment the current value index so that next time we'll highlight the next value if available
currentValueIndex++;
Object snippetObject = snippetsMap.get(docId);
if (snippetObject != null && snippetObject instanceof Snippet[]) {
return (Snippet[]) snippetObject;
}
return EMPTY_SNIPPET;
}
/*
Method provided through our own fork: allows to do proper scoring when doing per value discrete highlighting.
Used to provide the total length of the field (all values) for proper scoring.
*/
@Override
protected int getContentLength(String field, int docId) {
return totalContentLength;
}
/*
Method provided through our own fork: allows to perform proper per value discrete highlighting.
Used to provide the offset for the current value.
*/
@Override
protected int getOffsetForCurrentValue(String field, int docId) {
if (currentValueIndex < fieldValuesOffsets.length) {
return fieldValuesOffsets[currentValueIndex];
}
throw new IllegalArgumentException("No more values offsets to return");
}
public void setBreakIterator(BreakIterator breakIterator) {
this.breakIterator = breakIterator;
}
@Override
protected PassageFormatter getFormatter(String field) {
return passageFormatter;
}
@Override
protected BreakIterator getBreakIterator(String field) {
if (breakIterator == null) {
return super.getBreakIterator(field);
}
return breakIterator;
}
@Override
protected char getMultiValuedSeparator(String field) {
//U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
return HighlightUtils.PARAGRAPH_SEPARATOR;
}
/*
By default the postings highlighter returns non highlighted snippet when there are no matches.
We want to return no snippets by default, unless no_match_size is greater than 0
*/
@Override
protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
if (noMatchSize > 0) {
//we want to return the first sentence of the first snippet only
return super.getEmptyHighlight(fieldName, bi, 1);
}
return EMPTY_PASSAGE;
}
/*
Not needed since we call our own loadCurrentFieldValue explicitly, but we override it anyway for consistency.
*/
@Override
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
return new String[][]{new String[]{loadCurrentFieldValue()}};
}
/*
Our own method that returns the field values, which relies on the content that was provided when creating the highlighter.
Supports per value discrete highlighting calling the highlightDoc method multiple times, one per value.
*/
protected String loadCurrentFieldValue() {
if (currentValueIndex < fieldValues.length) {
return fieldValues[currentValueIndex];
}
throw new IllegalArgumentException("No more values to return");
}
}
| 0true
|
src_main_java_org_apache_lucene_search_postingshighlight_CustomPostingsHighlighter.java
|
481 |
private static class ClientProxyFuture {
volatile Object proxy;
ClientProxy get() {
if (proxy == null) {
boolean interrupted = false;
synchronized (this) {
while (proxy == null) {
try {
wait();
} catch (InterruptedException e) {
interrupted = true;
}
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
}
if (proxy instanceof Throwable) {
throw ExceptionUtil.rethrow((Throwable)proxy);
}
return (ClientProxy)proxy;
}
void set(Object o) {
if (o == null) {
throw new IllegalArgumentException();
}
synchronized (this) {
proxy = o;
notifyAll();
}
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_ProxyManager.java
|
532 |
new Thread() {
public void run() {
try {
justBeforeBlocked.await();
sleepSeconds(1);
queue1.offer(item);
} catch (InterruptedException e) {
fail("failed"+e);
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_txn_ClientTxnQueueTest.java
|
1 |
@Service("blAdminCatalogService")
public class AdminCatalogServiceImpl implements AdminCatalogService {
private static final Log LOG = LogFactory.getLog(AdminCatalogServiceImpl.class);
@Resource(name = "blCatalogService")
protected CatalogService catalogService;
@Resource(name = "blSkuDao")
protected SkuDao skuDao;
@PersistenceContext(unitName="blPU")
protected EntityManager em;
@Override
public Integer generateSkusFromProduct(Long productId) {
Product product = catalogService.findProductById(productId);
if (CollectionUtils.isEmpty(product.getProductOptions())) {
return -1;
}
List<List<ProductOptionValue>> allPermutations = generatePermutations(0, new ArrayList<ProductOptionValue>(), product.getProductOptions());
LOG.info("Total number of permutations: " + allPermutations.size());
LOG.info(allPermutations);
//determine the permutations that I already have Skus for
List<List<ProductOptionValue>> previouslyGeneratedPermutations = new ArrayList<List<ProductOptionValue>>();
if (CollectionUtils.isNotEmpty(product.getAdditionalSkus())) {
for (Sku additionalSku : product.getAdditionalSkus()) {
if (CollectionUtils.isNotEmpty(additionalSku.getProductOptionValues())) {
previouslyGeneratedPermutations.add(additionalSku.getProductOptionValues());
}
}
}
List<List<ProductOptionValue>> permutationsToGenerate = new ArrayList<List<ProductOptionValue>>();
for (List<ProductOptionValue> permutation : allPermutations) {
boolean previouslyGenerated = false;
for (List<ProductOptionValue> generatedPermutation : previouslyGeneratedPermutations) {
if (isSamePermutation(permutation, generatedPermutation)) {
previouslyGenerated = true;
break;
}
}
if (!previouslyGenerated) {
permutationsToGenerate.add(permutation);
}
}
int numPermutationsCreated = 0;
//For each permutation, I need them to map to a specific Sku
for (List<ProductOptionValue> permutation : permutationsToGenerate) {
if (permutation.isEmpty()) continue;
Sku permutatedSku = catalogService.createSku();
permutatedSku.setProduct(product);
permutatedSku.setProductOptionValues(permutation);
permutatedSku = catalogService.saveSku(permutatedSku);
product.getAdditionalSkus().add(permutatedSku);
numPermutationsCreated++;
}
if (numPermutationsCreated != 0) {
catalogService.saveProduct(product);
}
return numPermutationsCreated;
}
protected boolean isSamePermutation(List<ProductOptionValue> perm1, List<ProductOptionValue> perm2) {
if (perm1.size() == perm2.size()) {
Collection<Long> perm1Ids = BLCCollectionUtils.collect(perm1, new TypedTransformer<Long>() {
@Override
public Long transform(Object input) {
return ((ProductOptionValue) input).getId();
}
});
Collection<Long> perm2Ids = BLCCollectionUtils.collect(perm2, new TypedTransformer<Long>() {
@Override
public Long transform(Object input) {
return ((ProductOptionValue) input).getId();
}
});
return perm1Ids.containsAll(perm2Ids);
}
return false;
}
/**
* Generates all the possible permutations for the combinations of given ProductOptions
* @param currentTypeIndex
* @param currentPermutation
* @param options
* @return a list containing all of the possible combinations of ProductOptionValues based on grouping by the ProductOptionValue
*/
public List<List<ProductOptionValue>> generatePermutations(int currentTypeIndex, List<ProductOptionValue> currentPermutation, List<ProductOption> options) {
List<List<ProductOptionValue>> result = new ArrayList<List<ProductOptionValue>>();
if (currentTypeIndex == options.size()) {
result.add(currentPermutation);
return result;
}
ProductOption currentOption = options.get(currentTypeIndex);
if (!currentOption.getUseInSkuGeneration()) {
//This flag means do not generate skus and so do not create permutations for this productoption,
//end it here and return the current list of permutations.
result.addAll(generatePermutations(currentTypeIndex + 1, currentPermutation, options));
return result;
}
for (ProductOptionValue option : currentOption.getAllowedValues()) {
List<ProductOptionValue> permutation = new ArrayList<ProductOptionValue>();
permutation.addAll(currentPermutation);
permutation.add(option);
result.addAll(generatePermutations(currentTypeIndex + 1, permutation, options));
}
if (currentOption.getAllowedValues().size() == 0) {
//There are still product options left in our array to compute permutations, even though this productOption does not have any values associated.
result.addAll(generatePermutations(currentTypeIndex + 1, currentPermutation, options));
}
return result;
}
@Override
public Boolean cloneProduct(Long productId) {
Product cloneProduct = catalogService.findProductById(productId);
//initialize the many-to-many to save off
cloneProduct.getProductOptions().size();
cloneProduct.getAllParentCategories().size();
//Detach and save a cloned Sku
Sku cloneSku = cloneProduct.getDefaultSku();
cloneSku.getSkuMedia().size();
em.detach(cloneSku);
cloneSku.setId(null);
cloneProduct.setDefaultSku(cloneSku);
em.detach(cloneProduct);
cloneProduct.setId(null);
Product derivedProduct = catalogService.saveProduct(cloneProduct);
cloneProduct = catalogService.findProductById(productId);
//Re-associate the new Skus to the new Product
for (Sku additionalSku : cloneProduct.getAdditionalSkus()) {
additionalSku.getProductOptionValues().size();
em.detach(additionalSku);
additionalSku.setId(null);
additionalSku.setProduct(derivedProduct);
catalogService.saveSku(additionalSku);
}
return true;
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_AdminCatalogServiceImpl.java
|
690 |
public class CollectionItem implements Comparable<CollectionItem>, IdentifiedDataSerializable {
protected long itemId;
protected Data value;
protected final long creationTime;
public CollectionItem() {
creationTime = Clock.currentTimeMillis();
}
public CollectionItem(long itemId, Data value) {
this();
this.itemId = itemId;
this.value = value;
}
public long getItemId() {
return itemId;
}
public Data getValue() {
return value;
}
public void setValue(Data value) {
this.value = value;
}
public long getCreationTime() {
return creationTime;
}
@Override
public int compareTo(CollectionItem o) {
long otherItemId = o.getItemId();
if (itemId > otherItemId) {
return 1;
} else if (itemId < otherItemId) {
return -1;
}
return 0;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof CollectionItem)) {
return false;
}
CollectionItem item = (CollectionItem) o;
if (value != null ? !value.equals(item.value) : item.value != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
return value != null ? value.hashCode() : 0;
}
@Override
public int getFactoryId() {
return CollectionDataSerializerHook.F_ID;
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_ITEM;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeLong(itemId);
out.writeObject(value);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
itemId = in.readLong();
value = in.readObject();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionItem.java
|
1,183 |
public class Bootstrap {
private Node node;
private static volatile Thread keepAliveThread;
private static volatile CountDownLatch keepAliveLatch;
private static Bootstrap bootstrap;
private void setup(boolean addShutdownHook, Tuple<Settings, Environment> tuple) throws Exception {
// Loggers.getLogger(Bootstrap.class, tuple.v1().get("name")).info("heap_size {}/{}", JvmStats.jvmStats().mem().heapCommitted(), JvmInfo.jvmInfo().mem().heapMax());
if (tuple.v1().getAsBoolean("bootstrap.mlockall", false)) {
Natives.tryMlockall();
}
tuple = setupJmx(tuple);
NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder().settings(tuple.v1()).loadConfigSettings(false);
node = nodeBuilder.build();
if (addShutdownHook) {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
node.close();
}
});
}
}
private static Tuple<Settings, Environment> setupJmx(Tuple<Settings, Environment> tuple) {
// We disable JMX on by default, since we don't really want the overhead of RMI (and RMI GC...)
// if (tuple.v1().get(JmxService.SettingsConstants.CREATE_CONNECTOR) == null) {
// // automatically create the connector if we are bootstrapping
// Settings updated = settingsBuilder().put(tuple.v1()).put(JmxService.SettingsConstants.CREATE_CONNECTOR, true).build();
// tuple = new Tuple<Settings, Environment>(updated, tuple.v2());
// }
return tuple;
}
private static void setupLogging(Tuple<Settings, Environment> tuple) {
try {
tuple.v1().getClassLoader().loadClass("org.apache.log4j.Logger");
LogConfigurator.configure(tuple.v1());
} catch (ClassNotFoundException e) {
// no log4j
} catch (NoClassDefFoundError e) {
// no log4j
} catch (Exception e) {
System.err.println("Failed to configure logging...");
e.printStackTrace();
}
}
private static Tuple<Settings, Environment> initialSettings() {
return InternalSettingsPreparer.prepareSettings(EMPTY_SETTINGS, true);
}
/**
* hook for JSVC
*/
public void init(String[] args) throws Exception {
Tuple<Settings, Environment> tuple = initialSettings();
setupLogging(tuple);
setup(true, tuple);
}
/**
* hook for JSVC
*/
public void start() {
node.start();
}
/**
* hook for JSVC
*/
public void stop() {
node.stop();
}
/**
* hook for JSVC
*/
public void destroy() {
node.close();
}
public static void close(String[] args) {
bootstrap.destroy();
keepAliveLatch.countDown();
}
public static void main(String[] args) {
System.setProperty("es.logger.prefix", "");
bootstrap = new Bootstrap();
final String pidFile = System.getProperty("es.pidfile", System.getProperty("es-pidfile"));
if (pidFile != null) {
try {
File fPidFile = new File(pidFile);
if (fPidFile.getParentFile() != null) {
FileSystemUtils.mkdirs(fPidFile.getParentFile());
}
FileOutputStream outputStream = new FileOutputStream(fPidFile);
outputStream.write(Long.toString(JvmInfo.jvmInfo().pid()).getBytes());
outputStream.close();
fPidFile.deleteOnExit();
} catch (Exception e) {
String errorMessage = buildErrorMessage("pid", e);
System.err.println(errorMessage);
System.err.flush();
System.exit(3);
}
}
boolean foreground = System.getProperty("es.foreground", System.getProperty("es-foreground")) != null;
// handle the wrapper system property, if its a service, don't run as a service
if (System.getProperty("wrapper.service", "XXX").equalsIgnoreCase("true")) {
foreground = false;
}
Tuple<Settings, Environment> tuple = null;
try {
tuple = initialSettings();
setupLogging(tuple);
} catch (Exception e) {
String errorMessage = buildErrorMessage("Setup", e);
System.err.println(errorMessage);
System.err.flush();
System.exit(3);
}
if (System.getProperty("es.max-open-files", "false").equals("true")) {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
logger.info("max_open_files [{}]", FileSystemUtils.maxOpenFiles(new File(tuple.v2().workFile(), "open_files")));
}
// warn if running using the client VM
if (JvmInfo.jvmInfo().vmName().toLowerCase(Locale.ROOT).contains("client")) {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
logger.warn("jvm uses the client vm, make sure to run `java` with the server vm for best performance by adding `-server` to the command line");
}
String stage = "Initialization";
try {
if (!foreground) {
Loggers.disableConsoleLogging();
System.out.close();
}
bootstrap.setup(true, tuple);
stage = "Startup";
bootstrap.start();
if (!foreground) {
System.err.close();
}
keepAliveLatch = new CountDownLatch(1);
// keep this thread alive (non daemon thread) until we shutdown
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
keepAliveLatch.countDown();
}
});
keepAliveThread = new Thread(new Runnable() {
@Override
public void run() {
try {
keepAliveLatch.await();
} catch (InterruptedException e) {
// bail out
}
}
}, "elasticsearch[keepAlive/" + Version.CURRENT + "]");
keepAliveThread.setDaemon(false);
keepAliveThread.start();
} catch (Throwable e) {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
if (bootstrap.node != null) {
logger = Loggers.getLogger(Bootstrap.class, bootstrap.node.settings().get("name"));
}
String errorMessage = buildErrorMessage(stage, e);
if (foreground) {
System.err.println(errorMessage);
System.err.flush();
} else {
logger.error(errorMessage);
}
Loggers.disableConsoleLogging();
if (logger.isDebugEnabled()) {
logger.debug("Exception", e);
}
System.exit(3);
}
}
private static String buildErrorMessage(String stage, Throwable e) {
StringBuilder errorMessage = new StringBuilder("{").append(Version.CURRENT).append("}: ");
errorMessage.append(stage).append(" Failed ...\n");
if (e instanceof CreationException) {
CreationException createException = (CreationException) e;
Set<String> seenMessages = newHashSet();
int counter = 1;
for (Message message : createException.getErrorMessages()) {
String detailedMessage;
if (message.getCause() == null) {
detailedMessage = message.getMessage();
} else {
detailedMessage = ExceptionsHelper.detailedMessage(message.getCause(), true, 0);
}
if (detailedMessage == null) {
detailedMessage = message.getMessage();
}
if (seenMessages.contains(detailedMessage)) {
continue;
}
seenMessages.add(detailedMessage);
errorMessage.append("").append(counter++).append(") ").append(detailedMessage);
}
} else {
errorMessage.append("- ").append(ExceptionsHelper.detailedMessage(e, true, 0));
}
return errorMessage.toString();
}
}
| 0true
|
src_main_java_org_elasticsearch_bootstrap_Bootstrap.java
|
246 |
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertTrue(map.containsKey(member.getUuid()));
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
94 |
public interface NamedOperationManager {
Map<String, String> manageNamedParameters(Map<String, String> parameterMap);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_service_operation_NamedOperationManager.java
|
263 |
assertTrueEventually(new AssertTask() {
@Override
public void run() {
assertEquals(executions, counter.get());
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ExecutionDelayTest.java
|
702 |
constructors[TXN_LIST_REMOVE] = new ConstructorFunction<Integer, Portable>() {
public Portable createNew(Integer arg) {
return new TxnListRemoveRequest();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionPortableHook.java
|
356 |
public interface ODatabaseDocument extends ODatabaseRecord, ODatabaseSchemaAware<ORecordInternal<?>> {
final static String TYPE = "document";
/**
* Browses all the records of the specified class and also all the subclasses. If you've a class Vehicle and Car that extends
* Vehicle then a db.browseClass("Vehicle", true) will return all the instances of Vehicle and Car. The order of the returned
* instance starts from record id with position 0 until the end. Base classes are worked at first.
*
* @param iClassName
* Class name to iterate
* @return Iterator of ODocument instances
*/
public ORecordIteratorClass<ODocument> browseClass(String iClassName);
/**
* Browses all the records of the specified class and if iPolymorphic is true also all the subclasses. If you've a class Vehicle
* and Car that extends Vehicle then a db.browseClass("Vehicle", true) will return all the instances of Vehicle and Car. The order
* of the returned instance starts from record id with position 0 until the end. Base classes are worked at first.
*
* @param iClassName
* Class name to iterate
* @param iPolymorphic
* Consider also the instances of the subclasses or not
* @return Iterator of ODocument instances
*/
public ORecordIteratorClass<ODocument> browseClass(String iClassName, boolean iPolymorphic);
/**
* Flush all indexes and cached storage content to the disk.
*
* After this call users can perform only select queries. All write-related commands will queued till {@link #release()} command
* will be called.
*
* Given command waits till all on going modifications in indexes or DB will be finished.
*
* IMPORTANT: This command is not reentrant.
*/
public void freeze();
/**
* Allows to execute write-related commands on DB. Called after {@link #freeze()} command.
*/
public void release();
/**
* Flush all indexes and cached storage content to the disk.
*
* After this call users can perform only select queries. All write-related commands will queued till {@link #release()} command
* will be called or exception will be thrown on attempt to modify DB data. Concrete behaviour depends on
* <code>throwException</code> parameter.
*
* IMPORTANT: This command is not reentrant.
*
* @param throwException
* If <code>true</code> {@link com.orientechnologies.common.concur.lock.OModificationOperationProhibitedException}
* exception will be thrown in case of write command will be performed.
*/
public void freeze(boolean throwException);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_document_ODatabaseDocument.java
|
555 |
public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder<GetMappingsRequest, GetMappingsResponse, GetMappingsRequestBuilder> {
public GetMappingsRequestBuilder(InternalGenericClient client, String... indices) {
super(client, new GetMappingsRequest().indices(indices));
}
@Override
protected void doExecute(ActionListener<GetMappingsResponse> listener) {
((IndicesAdminClient) client).getMappings(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_get_GetMappingsRequestBuilder.java
|
1,447 |
public static class Entry {
private final State state;
private final SnapshotId snapshotId;
private final boolean includeGlobalState;
private final ImmutableMap<ShardId, ShardSnapshotStatus> shards;
private final ImmutableList<String> indices;
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, ImmutableList<String> indices, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
this.state = state;
this.snapshotId = snapshotId;
this.includeGlobalState = includeGlobalState;
this.indices = indices;
if (shards == null) {
this.shards = ImmutableMap.of();
} else {
this.shards = shards;
}
}
public SnapshotId snapshotId() {
return this.snapshotId;
}
public ImmutableMap<ShardId, ShardSnapshotStatus> shards() {
return this.shards;
}
public State state() {
return state;
}
public ImmutableList<String> indices() {
return indices;
}
public boolean includeGlobalState() {
return includeGlobalState;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Entry entry = (Entry) o;
if (includeGlobalState != entry.includeGlobalState) return false;
if (!indices.equals(entry.indices)) return false;
if (!shards.equals(entry.shards)) return false;
if (!snapshotId.equals(entry.snapshotId)) return false;
if (state != entry.state) return false;
return true;
}
@Override
public int hashCode() {
int result = state.hashCode();
result = 31 * result + snapshotId.hashCode();
result = 31 * result + (includeGlobalState ? 1 : 0);
result = 31 * result + shards.hashCode();
result = 31 * result + indices.hashCode();
return result;
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_metadata_SnapshotMetaData.java
|
804 |
private class ASyncAction {
final ActionListener<MultiPercolateResponse> finalListener;
final Map<ShardId, TransportShardMultiPercolateAction.Request> requestsByShard;
final List<Object> percolateRequests;
final Map<ShardId, IntArrayList> shardToSlots;
final AtomicInteger expectedOperations;
final AtomicArray<Object> reducedResponses;
final AtomicReferenceArray<AtomicInteger> expectedOperationsPerItem;
final AtomicReferenceArray<AtomicReferenceArray> responsesByItemAndShard;
ASyncAction(List<Object> percolateRequests, ActionListener<MultiPercolateResponse> finalListener, ClusterState clusterState) {
this.finalListener = finalListener;
this.percolateRequests = percolateRequests;
responsesByItemAndShard = new AtomicReferenceArray<AtomicReferenceArray>(percolateRequests.size());
expectedOperationsPerItem = new AtomicReferenceArray<AtomicInteger>(percolateRequests.size());
reducedResponses = new AtomicArray<Object>(percolateRequests.size());
// Resolving concrete indices and routing and grouping the requests by shard
requestsByShard = new HashMap<ShardId, TransportShardMultiPercolateAction.Request>();
// Keep track what slots belong to what shard, in case a request to a shard fails on all copies
shardToSlots = new HashMap<ShardId, IntArrayList>();
int expectedResults = 0;
for (int slot = 0; slot < percolateRequests.size(); slot++) {
Object element = percolateRequests.get(slot);
assert element != null;
if (element instanceof PercolateRequest) {
PercolateRequest percolateRequest = (PercolateRequest) element;
String[] concreteIndices;
try {
concreteIndices = clusterState.metaData().concreteIndices(percolateRequest.indices(), percolateRequest.indicesOptions());
} catch (IndexMissingException e) {
reducedResponses.set(slot, e);
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
expectedOperationsPerItem.set(slot, new AtomicInteger(0));
continue;
}
Map<String, Set<String>> routing = clusterState.metaData().resolveSearchRouting(percolateRequest.routing(), percolateRequest.indices());
// TODO: I only need shardIds, ShardIterator(ShardRouting) is only needed in TransportShardMultiPercolateAction
GroupShardsIterator shards = clusterService.operationRouting().searchShards(
clusterState, percolateRequest.indices(), concreteIndices, routing, percolateRequest.preference()
);
if (shards.size() == 0) {
reducedResponses.set(slot, new UnavailableShardsException(null, "No shards available"));
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
expectedOperationsPerItem.set(slot, new AtomicInteger(0));
continue;
}
responsesByItemAndShard.set(slot, new AtomicReferenceArray(shards.size()));
expectedOperationsPerItem.set(slot, new AtomicInteger(shards.size()));
for (ShardIterator shard : shards) {
ShardId shardId = shard.shardId();
TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId);
if (requests == null) {
requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(shard.shardId().getIndex(), shardId.id(), percolateRequest.preference()));
}
logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot);
requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest)));
IntArrayList items = shardToSlots.get(shardId);
if (items == null) {
shardToSlots.put(shardId, items = new IntArrayList());
}
items.add(slot);
}
expectedResults++;
} else if (element instanceof Throwable || element instanceof MultiGetResponse.Failure) {
logger.trace("item[{}] won't be executed, reason: {}", slot, element);
reducedResponses.set(slot, element);
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
expectedOperationsPerItem.set(slot, new AtomicInteger(0));
}
}
expectedOperations = new AtomicInteger(expectedResults);
}
void run() {
if (expectedOperations.get() == 0) {
finish();
return;
}
logger.trace("mpercolate executing for shards {}", requestsByShard.keySet());
for (Map.Entry<ShardId, TransportShardMultiPercolateAction.Request> entry : requestsByShard.entrySet()) {
final ShardId shardId = entry.getKey();
TransportShardMultiPercolateAction.Request shardRequest = entry.getValue();
shardMultiPercolateAction.execute(shardRequest, new ActionListener<TransportShardMultiPercolateAction.Response>() {
@Override
public void onResponse(TransportShardMultiPercolateAction.Response response) {
onShardResponse(shardId, response);
}
@Override
public void onFailure(Throwable e) {
onShardFailure(shardId, e);
}
});
}
}
@SuppressWarnings("unchecked")
void onShardResponse(ShardId shardId, TransportShardMultiPercolateAction.Response response) {
logger.debug("{} Percolate shard response", shardId);
try {
for (TransportShardMultiPercolateAction.Response.Item item : response.items()) {
AtomicReferenceArray shardResults = responsesByItemAndShard.get(item.slot());
if (shardResults == null) {
assert false : "shardResults can't be null";
continue;
}
if (item.failed()) {
shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, item.error().string()));
} else {
shardResults.set(shardId.id(), item.response());
}
assert expectedOperationsPerItem.get(item.slot()).get() >= 1 : "slot[" + item.slot() + "] can't be lower than one";
if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) {
// Failure won't bubble up, since we fail the whole request now via the catch clause below,
// so expectedOperationsPerItem will not be decremented twice.
reduce(item.slot());
}
}
} catch (Throwable e) {
logger.error("{} Percolate original reduce error", e, shardId);
finalListener.onFailure(e);
}
}
@SuppressWarnings("unchecked")
void onShardFailure(ShardId shardId, Throwable e) {
logger.debug("{} Shard multi percolate failure", e, shardId);
try {
IntArrayList slots = shardToSlots.get(shardId);
for (int i = 0; i < slots.size(); i++) {
int slot = slots.get(i);
AtomicReferenceArray shardResults = responsesByItemAndShard.get(slot);
if (shardResults == null) {
continue;
}
shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, e));
assert expectedOperationsPerItem.get(slot).get() >= 1 : "slot[" + slot + "] can't be lower than one. Caused by: " + e.getMessage();
if (expectedOperationsPerItem.get(slot).decrementAndGet() == 0) {
reduce(slot);
}
}
} catch (Throwable t) {
logger.error("{} Percolate original reduce error, original error {}", t, shardId, e);
finalListener.onFailure(t);
}
}
void reduce(int slot) {
AtomicReferenceArray shardResponses = responsesByItemAndShard.get(slot);
PercolateResponse reducedResponse = TransportPercolateAction.reduce((PercolateRequest) percolateRequests.get(slot), shardResponses, percolatorService);
reducedResponses.set(slot, reducedResponse);
assert expectedOperations.get() >= 1 : "slot[" + slot + "] expected options should be >= 1 but is " + expectedOperations.get();
if (expectedOperations.decrementAndGet() == 0) {
finish();
}
}
void finish() {
MultiPercolateResponse.Item[] finalResponse = new MultiPercolateResponse.Item[reducedResponses.length()];
for (int slot = 0; slot < reducedResponses.length(); slot++) {
Object element = reducedResponses.get(slot);
assert element != null : "Element[" + slot + "] shouldn't be null";
if (element instanceof PercolateResponse) {
finalResponse[slot] = new MultiPercolateResponse.Item((PercolateResponse) element);
} else if (element instanceof Throwable) {
finalResponse[slot] = new MultiPercolateResponse.Item(ExceptionsHelper.detailedMessage((Throwable) element));
} else if (element instanceof MultiGetResponse.Failure) {
finalResponse[slot] = new MultiPercolateResponse.Item(((MultiGetResponse.Failure)element).getMessage());
}
}
finalListener.onResponse(new MultiPercolateResponse(finalResponse));
}
}
| 0true
|
src_main_java_org_elasticsearch_action_percolate_TransportMultiPercolateAction.java
|
4,902 |
public class RestThreadPoolAction extends AbstractCatAction {
private final static String[] SUPPORTED_NAMES = new String[] {
ThreadPool.Names.BULK,
ThreadPool.Names.FLUSH,
ThreadPool.Names.GENERIC,
ThreadPool.Names.GET,
ThreadPool.Names.INDEX,
ThreadPool.Names.MANAGEMENT,
ThreadPool.Names.MERGE,
ThreadPool.Names.OPTIMIZE,
ThreadPool.Names.PERCOLATE,
ThreadPool.Names.REFRESH,
ThreadPool.Names.SEARCH,
ThreadPool.Names.SNAPSHOT,
ThreadPool.Names.SUGGEST,
ThreadPool.Names.WARMER
};
private final static String[] SUPPORTED_ALIASES = new String[] {
"b",
"f",
"ge",
"g",
"i",
"ma",
"m",
"o",
"p",
"r",
"s",
"sn",
"su",
"w"
};
private final static String[] DEFAULT_THREAD_POOLS = new String[] {
ThreadPool.Names.BULK,
ThreadPool.Names.INDEX,
ThreadPool.Names.SEARCH,
};
private final static Map<String, String> ALIAS_TO_THREAD_POOL;
private final static Map<String, String> THREAD_POOL_TO_ALIAS;
static {
ALIAS_TO_THREAD_POOL = Maps.newHashMapWithExpectedSize(SUPPORTED_NAMES.length);
for (String supportedThreadPool : SUPPORTED_NAMES) {
ALIAS_TO_THREAD_POOL.put(supportedThreadPool.substring(0, 3), supportedThreadPool);
}
THREAD_POOL_TO_ALIAS = Maps.newHashMapWithExpectedSize(SUPPORTED_NAMES.length);
for (int i = 0; i < SUPPORTED_NAMES.length; i++) {
THREAD_POOL_TO_ALIAS.put(SUPPORTED_NAMES[i], SUPPORTED_ALIASES[i]);
}
}
@Inject
public RestThreadPoolAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(GET, "/_cat/thread_pool", this);
}
@Override
void documentation(StringBuilder sb) {
sb.append("/_cat/thread_pool\n");
}
@Override
public void doRequest(final RestRequest request, final RestChannel channel) {
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear().nodes(true);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
final String[] pools = fetchSortedPools(request, DEFAULT_THREAD_POOLS);
client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
@Override
public void onResponse(final ClusterStateResponse clusterStateResponse) {
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
nodesInfoRequest.clear().process(true);
client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
@Override
public void onResponse(final NodesInfoResponse nodesInfoResponse) {
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest();
nodesStatsRequest.clear().threadPool(true);
client.admin().cluster().nodesStats(nodesStatsRequest, new ActionListener<NodesStatsResponse>() {
@Override
public void onResponse(NodesStatsResponse nodesStatsResponse) {
try {
channel.sendResponse(RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse, pools), request, channel));
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
Table getTableWithHeader(final RestRequest request) {
Table table = new Table();
table.startHeaders();
table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id");
table.addCell("pid", "default:false;alias:p;desc:process id");
table.addCell("host", "alias:h;desc:host name");
table.addCell("ip", "alias:i;desc:ip address");
table.addCell("port", "default:false;alias:po;desc:bound transport port");
final String[] requestedPools = fetchSortedPools(request, DEFAULT_THREAD_POOLS);
for (String pool : SUPPORTED_NAMES) {
String poolAlias = THREAD_POOL_TO_ALIAS.get(pool);
boolean display = false;
for (String requestedPool : requestedPools) {
if (pool.equals(requestedPool)) {
display = true;
break;
}
}
String defaultDisplayVal = Boolean.toString(display);
table.addCell(
pool + ".active",
"alias:" + poolAlias + "a;default:" + defaultDisplayVal + ";text-align:right;desc:number of active " + pool + " threads"
);
table.addCell(
pool + ".size",
"alias:" + poolAlias + "s;default:false;text-align:right;desc:number of active " + pool + " threads"
);
table.addCell(
pool + ".queue",
"alias:" + poolAlias + "q;default:" + defaultDisplayVal + ";text-align:right;desc:number of " + pool + " threads in queue"
);
table.addCell(
pool + ".rejected",
"alias:" + poolAlias + "r;default:" + defaultDisplayVal + ";text-align:right;desc:number of rejected " + pool + " threads"
);
table.addCell(
pool + ".largest",
"alias:" + poolAlias + "l;default:false;text-align:right;desc:highest number of seen active " + pool + " threads"
);
table.addCell(
pool + ".completed",
"alias:" + poolAlias + "c;default:false;text-align:right;desc:number of completed " + pool + " threads"
);
}
table.endHeaders();
return table;
}
private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats, String[] pools) {
boolean fullId = req.paramAsBoolean("full_id", false);
DiscoveryNodes nodes = state.getState().nodes();
Table table = getTableWithHeader(req);
for (DiscoveryNode node : nodes) {
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
NodeStats stats = nodesStats.getNodesMap().get(node.id());
table.startRow();
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
table.addCell(info == null ? null : info.getProcess().id());
table.addCell(node.getHostName());
table.addCell(node.getHostAddress());
if (node.address() instanceof InetSocketTransportAddress) {
table.addCell(((InetSocketTransportAddress) node.address()).address().getPort());
} else {
table.addCell("-");
}
final Map<String, ThreadPoolStats.Stats> poolThreadStats;
if (stats == null) {
poolThreadStats = Collections.emptyMap();
} else {
poolThreadStats = new HashMap<String, ThreadPoolStats.Stats>(14);
ThreadPoolStats threadPoolStats = stats.getThreadPool();
for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) {
poolThreadStats.put(threadPoolStat.getName(), threadPoolStat);
}
}
for (String pool : SUPPORTED_NAMES) {
ThreadPoolStats.Stats poolStats = poolThreadStats.get(pool);
table.addCell(poolStats == null ? null : poolStats.getActive());
table.addCell(poolStats == null ? null : poolStats.getThreads());
table.addCell(poolStats == null ? null : poolStats.getQueue());
table.addCell(poolStats == null ? null : poolStats.getRejected());
table.addCell(poolStats == null ? null : poolStats.getLargest());
table.addCell(poolStats == null ? null : poolStats.getCompleted());
}
table.endRow();
}
return table;
}
// The thread pool columns should always be in the same order.
private String[] fetchSortedPools(RestRequest request, String[] defaults) {
String[] headers = request.paramAsStringArray("h", null);
if (headers == null) {
return defaults;
} else {
Set<String> requestedPools = new LinkedHashSet<String>(headers.length);
for (String header : headers) {
int dotIndex = header.indexOf('.');
if (dotIndex != -1) {
String headerPrefix = header.substring(0, dotIndex);
if (THREAD_POOL_TO_ALIAS.containsKey(headerPrefix)) {
requestedPools.add(headerPrefix);
}
} else if (ALIAS_TO_THREAD_POOL.containsKey(header)) {
requestedPools.add(ALIAS_TO_THREAD_POOL.get(header));
}
}
return requestedPools.toArray(new String[0]);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_cat_RestThreadPoolAction.java
|
265 |
public class ElasticsearchException extends RuntimeException {
/**
* Construct a <code>ElasticsearchException</code> with the specified detail message.
*
* @param msg the detail message
*/
public ElasticsearchException(String msg) {
super(msg);
}
/**
* Construct a <code>ElasticsearchException</code> with the specified detail message
* and nested exception.
*
* @param msg the detail message
* @param cause the nested exception
*/
public ElasticsearchException(String msg, Throwable cause) {
super(msg, cause);
}
/**
* Returns the rest status code associated with this exception.
*/
public RestStatus status() {
Throwable cause = unwrapCause();
if (cause == this) {
return RestStatus.INTERNAL_SERVER_ERROR;
} else if (cause instanceof ElasticsearchException) {
return ((ElasticsearchException) cause).status();
} else if (cause instanceof IllegalArgumentException) {
return RestStatus.BAD_REQUEST;
} else {
return RestStatus.INTERNAL_SERVER_ERROR;
}
}
/**
* Unwraps the actual cause from the exception for cases when the exception is a
* {@link ElasticsearchWrapperException}.
*
* @see org.elasticsearch.ExceptionsHelper#unwrapCause(Throwable)
*/
public Throwable unwrapCause() {
return ExceptionsHelper.unwrapCause(this);
}
/**
* Return the detail message, including the message from the nested exception
* if there is one.
*/
public String getDetailedMessage() {
if (getCause() != null) {
StringBuilder sb = new StringBuilder();
sb.append(toString()).append("; ");
if (getCause() instanceof ElasticsearchException) {
sb.append(((ElasticsearchException) getCause()).getDetailedMessage());
} else {
sb.append(getCause());
}
return sb.toString();
} else {
return super.toString();
}
}
/**
* Retrieve the innermost cause of this exception, if none, returns the current exception.
*/
public Throwable getRootCause() {
Throwable rootCause = this;
Throwable cause = getCause();
while (cause != null && cause != rootCause) {
rootCause = cause;
cause = cause.getCause();
}
return rootCause;
}
/**
* Retrieve the most specific cause of this exception, that is,
* either the innermost cause (root cause) or this exception itself.
* <p>Differs from {@link #getRootCause()} in that it falls back
* to the present exception if there is no root cause.
*
* @return the most specific cause (never <code>null</code>)
*/
public Throwable getMostSpecificCause() {
Throwable rootCause = getRootCause();
return (rootCause != null ? rootCause : this);
}
/**
* Check whether this exception contains an exception of the given type:
* either it is of the given class itself or it contains a nested cause
* of the given type.
*
* @param exType the exception type to look for
* @return whether there is a nested exception of the specified type
*/
public boolean contains(Class exType) {
if (exType == null) {
return false;
}
if (exType.isInstance(this)) {
return true;
}
Throwable cause = getCause();
if (cause == this) {
return false;
}
if (cause instanceof ElasticsearchException) {
return ((ElasticsearchException) cause).contains(exType);
} else {
while (cause != null) {
if (exType.isInstance(cause)) {
return true;
}
if (cause.getCause() == cause) {
break;
}
cause = cause.getCause();
}
return false;
}
}
}
| 0true
|
src_main_java_org_elasticsearch_ElasticsearchException.java
|
2,368 |
public class TrackableJobFuture<V>
extends AbstractCompletableFuture<V>
implements TrackableJob<V>, JobCompletableFuture<V> {
private final String name;
private final String jobId;
private final JobTracker jobTracker;
private final Collator collator;
private final CountDownLatch latch;
private final MapReduceService mapReduceService;
private volatile boolean cancelled;
public TrackableJobFuture(String name, String jobId, JobTracker jobTracker, NodeEngine nodeEngine, Collator collator) {
super(nodeEngine, nodeEngine.getLogger(TrackableJobFuture.class));
this.name = name;
this.jobId = jobId;
this.jobTracker = jobTracker;
this.collator = collator;
this.latch = new CountDownLatch(1);
this.mapReduceService = ((NodeEngineImpl) nodeEngine).getService(MapReduceService.SERVICE_NAME);
}
@Override
public void setResult(Object result) {
Object finalResult = result;
// If collator is available we need to execute it now
if (collator != null) {
finalResult = collator.collate(((Map) finalResult).entrySet());
}
if (finalResult instanceof Throwable && !(finalResult instanceof CancellationException)) {
finalResult = new ExecutionException((Throwable) finalResult);
}
super.setResult(finalResult);
latch.countDown();
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
Address jobOwner = mapReduceService.getLocalAddress();
if (!mapReduceService.registerJobSupervisorCancellation(name, jobId, jobOwner)) {
return false;
}
JobSupervisor supervisor = mapReduceService.getJobSupervisor(name, jobId);
if (supervisor == null || !supervisor.isOwnerNode()) {
return false;
}
Exception exception = new CancellationException("Operation was cancelled by the user");
cancelled = supervisor.cancelAndNotify(exception);
return cancelled;
}
@Override
public boolean isCancelled() {
return cancelled;
}
@Override
public V get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
ValidationUtil.isNotNull(unit, "unit");
if (!latch.await(timeout, unit) || !isDone()) {
throw new TimeoutException("timeout reached");
}
return getResult();
}
@Override
public JobTracker getJobTracker() {
return jobTracker;
}
@Override
public String getName() {
return name;
}
@Override
public String getJobId() {
return jobId;
}
@Override
public ICompletableFuture<V> getCompletableFuture() {
JobSupervisor supervisor = mapReduceService.getJobSupervisor(name, jobId);
if (supervisor == null || !supervisor.isOwnerNode()) {
return null;
}
return this;
}
@Override
public JobProcessInformation getJobProcessInformation() {
JobSupervisor supervisor = mapReduceService.getJobSupervisor(name, jobId);
if (supervisor == null || !supervisor.isOwnerNode()) {
return null;
}
return new JobProcessInformationAdapter(supervisor.getJobProcessInformation());
}
/**
* This class is just an adapter for retrieving the JobProcess information
* from user codebase to prevent exposing the internal array.
*/
private static final class JobProcessInformationAdapter
implements JobProcessInformation {
private final JobProcessInformation processInformation;
private JobProcessInformationAdapter(JobProcessInformation processInformation) {
this.processInformation = processInformation;
}
@Override
public JobPartitionState[] getPartitionStates() {
JobPartitionState[] partitionStates = processInformation.getPartitionStates();
return Arrays.copyOf(partitionStates, partitionStates.length);
}
@Override
public int getProcessedRecords() {
return processInformation.getProcessedRecords();
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_TrackableJobFuture.java
|
319 |
new Thread() {
public void run() {
map.lock(key);
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
1,813 |
@Component("blBasicFieldPersistenceProvider")
@Scope("prototype")
public class BasicFieldPersistenceProvider extends FieldPersistenceProviderAdapter {
protected static final Log LOG = LogFactory.getLog(BasicFieldPersistenceProvider.class);
protected boolean canHandlePersistence(PopulateValueRequest populateValueRequest, Serializable instance) {
BasicFieldMetadata metadata = populateValueRequest.getMetadata();
Property property = populateValueRequest.getProperty();
//don't handle map fields here - we'll get them in a separate provider
boolean response = detectBasicType(metadata, property);
if (!response) {
//we'll allow this provider to handle money filter mapping for persistence
response = metadata.getFieldType() == SupportedFieldType.MONEY;
}
return response;
}
protected boolean detectBasicType(BasicFieldMetadata metadata, Property property) {
return (metadata.getFieldType() == SupportedFieldType.BOOLEAN ||
metadata.getFieldType() == SupportedFieldType.DATE ||
metadata.getFieldType() == SupportedFieldType.INTEGER ||
metadata.getFieldType() == SupportedFieldType.DECIMAL ||
metadata.getFieldType() == SupportedFieldType.EMAIL ||
metadata.getFieldType() == SupportedFieldType.FOREIGN_KEY ||
metadata.getFieldType() == SupportedFieldType.ADDITIONAL_FOREIGN_KEY ||
metadata.getFieldType() == SupportedFieldType.STRING ||
metadata.getFieldType() == SupportedFieldType.ID) &&
(property == null ||
!property.getName().contains(FieldManager.MAPFIELDSEPARATOR));
}
protected boolean detectAdditionalSearchTypes(BasicFieldMetadata metadata, Property property) {
return (metadata.getFieldType() == SupportedFieldType.BROADLEAF_ENUMERATION ||
metadata.getFieldType() == SupportedFieldType.EXPLICIT_ENUMERATION ||
metadata.getFieldType() == SupportedFieldType.DATA_DRIVEN_ENUMERATION) &&
(property == null || !property.getName().contains(FieldManager.MAPFIELDSEPARATOR));
}
protected boolean canHandleExtraction(ExtractValueRequest extractValueRequest, Property property) {
BasicFieldMetadata metadata = extractValueRequest.getMetadata();
//don't handle map fields here - we'll get them in a separate provider
return detectBasicType(metadata, property);
}
protected boolean canHandleSearchMapping(AddSearchMappingRequest addSearchMappingRequest, List<FilterMapping> filterMappings) {
BasicFieldMetadata metadata = (BasicFieldMetadata) addSearchMappingRequest.getMergedProperties().get(addSearchMappingRequest.getPropertyName());
Property property = null;
//don't handle map fields here - we'll get them in a separate provider
boolean response = detectBasicType(metadata, property) || detectAdditionalSearchTypes(metadata, property);
if (!response) {
//we'll allow this provider to handle money filter mapping for searches
response = metadata.getFieldType() == SupportedFieldType.MONEY;
}
return response;
}
@Override
public FieldProviderResponse populateValue(PopulateValueRequest populateValueRequest, Serializable instance) {
if (!canHandlePersistence(populateValueRequest, instance)) {
return FieldProviderResponse.NOT_HANDLED;
}
try {
switch (populateValueRequest.getMetadata().getFieldType()) {
case BOOLEAN:
boolean v = Boolean.valueOf(populateValueRequest.getRequestedValue());
try {
populateValueRequest.getFieldManager().setFieldValue(instance,
populateValueRequest.getProperty().getName(), v);
} catch (IllegalArgumentException e) {
char c = v ? 'Y' : 'N';
populateValueRequest.getFieldManager().setFieldValue(instance,
populateValueRequest.getProperty().getName(), c);
}
break;
case DATE:
populateValueRequest.getFieldManager().setFieldValue(instance,
populateValueRequest.getProperty().getName(), populateValueRequest.getDataFormatProvider().getSimpleDateFormatter().parse(populateValueRequest.getRequestedValue()));
break;
case DECIMAL:
if (BigDecimal.class.isAssignableFrom(populateValueRequest.getReturnType())) {
populateValueRequest.getFieldManager().setFieldValue(instance,
populateValueRequest.getProperty().getName(), new BigDecimal(populateValueRequest.getRequestedValue()));
} else {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), new Double(populateValueRequest.getRequestedValue()));
}
break;
case MONEY:
if (BigDecimal.class.isAssignableFrom(populateValueRequest.getReturnType())) {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), new BigDecimal(populateValueRequest.getRequestedValue()));
} else if (Double.class.isAssignableFrom(populateValueRequest.getReturnType())) {
LOG.warn("The requested Money field is of type double and could result in a loss of precision." +
" Broadleaf recommends that the type of all Money fields are 'BigDecimal' in order to avoid" +
" this loss of precision that could occur.");
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), new Double(populateValueRequest.getRequestedValue()));
} else {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), new Money(new BigDecimal(populateValueRequest.getRequestedValue())));
}
break;
case INTEGER:
if (int.class.isAssignableFrom(populateValueRequest.getReturnType()) || Integer.class.isAssignableFrom(populateValueRequest.getReturnType())) {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), Integer.valueOf(populateValueRequest.getRequestedValue()));
} else if (byte.class.isAssignableFrom(populateValueRequest.getReturnType()) || Byte.class.isAssignableFrom(populateValueRequest.getReturnType())) {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), Byte.valueOf(populateValueRequest.getRequestedValue()));
} else if (short.class.isAssignableFrom(populateValueRequest.getReturnType()) || Short.class.isAssignableFrom(populateValueRequest.getReturnType())) {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), Short.valueOf(populateValueRequest.getRequestedValue()));
} else if (long.class.isAssignableFrom(populateValueRequest.getReturnType()) || Long.class.isAssignableFrom(populateValueRequest.getReturnType())) {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), Long.valueOf(populateValueRequest.getRequestedValue()));
}
break;
case STRING:
case EMAIL:
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), populateValueRequest.getRequestedValue());
break;
case FOREIGN_KEY: {
Serializable foreignInstance;
if (StringUtils.isEmpty(populateValueRequest.getRequestedValue())) {
foreignInstance = null;
} else {
if (SupportedFieldType.INTEGER.toString().equals(populateValueRequest.getMetadata().getSecondaryType().toString())) {
foreignInstance = populateValueRequest.getPersistenceManager().getDynamicEntityDao().retrieve(Class.forName(populateValueRequest.getMetadata().getForeignKeyClass()), Long.valueOf(populateValueRequest.getRequestedValue()));
} else {
foreignInstance = populateValueRequest.getPersistenceManager().getDynamicEntityDao().retrieve(Class.forName(populateValueRequest.getMetadata().getForeignKeyClass()), populateValueRequest.getRequestedValue());
}
}
if (Collection.class.isAssignableFrom(populateValueRequest.getReturnType())) {
Collection collection;
try {
collection = (Collection) populateValueRequest.getFieldManager().getFieldValue(instance, populateValueRequest.getProperty().getName());
} catch (FieldNotAvailableException e) {
throw new IllegalArgumentException(e);
}
if (!collection.contains(foreignInstance)) {
collection.add(foreignInstance);
}
} else if (Map.class.isAssignableFrom(populateValueRequest.getReturnType())) {
throw new IllegalArgumentException("Map structures are not supported for foreign key fields.");
} else {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), foreignInstance);
}
break;
}
case ADDITIONAL_FOREIGN_KEY: {
Serializable foreignInstance;
if (StringUtils.isEmpty(populateValueRequest.getRequestedValue())) {
foreignInstance = null;
} else {
if (SupportedFieldType.INTEGER.toString().equals(populateValueRequest.getMetadata().getSecondaryType().toString())) {
foreignInstance = populateValueRequest.getPersistenceManager().getDynamicEntityDao().retrieve(Class.forName(populateValueRequest.getMetadata().getForeignKeyClass()), Long.valueOf(populateValueRequest.getRequestedValue()));
} else {
foreignInstance = populateValueRequest.getPersistenceManager().getDynamicEntityDao().retrieve(Class.forName(populateValueRequest.getMetadata().getForeignKeyClass()), populateValueRequest.getRequestedValue());
}
}
if (Collection.class.isAssignableFrom(populateValueRequest.getReturnType())) {
Collection collection;
try {
collection = (Collection) populateValueRequest.getFieldManager().getFieldValue(instance, populateValueRequest.getProperty().getName());
} catch (FieldNotAvailableException e) {
throw new IllegalArgumentException(e);
}
if (!collection.contains(foreignInstance)) {
collection.add(foreignInstance);
}
} else if (Map.class.isAssignableFrom(populateValueRequest.getReturnType())) {
throw new IllegalArgumentException("Map structures are not supported for foreign key fields.");
} else {
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), foreignInstance);
}
break;
}
case ID:
if (populateValueRequest.getSetId()) {
switch (populateValueRequest.getMetadata().getSecondaryType()) {
case INTEGER:
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), Long.valueOf(populateValueRequest.getRequestedValue()));
break;
case STRING:
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), populateValueRequest.getRequestedValue());
break;
}
}
break;
}
} catch (Exception e) {
throw new PersistenceException(e);
}
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse extractValue(ExtractValueRequest extractValueRequest, Property property) throws PersistenceException {
if (!canHandleExtraction(extractValueRequest, property)) {
return FieldProviderResponse.NOT_HANDLED;
}
try {
if (extractValueRequest.getRequestedValue() != null) {
String val = null;
if (extractValueRequest.getMetadata().getForeignKeyCollection()) {
((BasicFieldMetadata) property.getMetadata()).setFieldType(extractValueRequest.getMetadata().getFieldType());
} else if (extractValueRequest.getMetadata().getFieldType().equals(SupportedFieldType.BOOLEAN) && extractValueRequest.getRequestedValue() instanceof Character) {
val = (extractValueRequest.getRequestedValue().equals('Y')) ? "true" : "false";
} else if (Date.class.isAssignableFrom(extractValueRequest.getRequestedValue().getClass())) {
val = extractValueRequest.getDataFormatProvider().getSimpleDateFormatter
().format((Date) extractValueRequest.getRequestedValue());
} else if (Timestamp.class.isAssignableFrom(extractValueRequest.getRequestedValue().getClass())) {
val = extractValueRequest.getDataFormatProvider().getSimpleDateFormatter
().format(new Date(((Timestamp) extractValueRequest.getRequestedValue()).getTime()));
} else if (Calendar.class.isAssignableFrom(extractValueRequest.getRequestedValue().getClass())) {
val = extractValueRequest.getDataFormatProvider().getSimpleDateFormatter
().format(((Calendar) extractValueRequest.getRequestedValue()).getTime());
} else if (Double.class.isAssignableFrom(extractValueRequest.getRequestedValue().getClass())) {
val = extractValueRequest.getDataFormatProvider().getDecimalFormatter().format(extractValueRequest.getRequestedValue());
} else if (BigDecimal.class.isAssignableFrom(extractValueRequest.getRequestedValue().getClass())) {
val = extractValueRequest.getDataFormatProvider().getDecimalFormatter().format(extractValueRequest.getRequestedValue());
} else if (extractValueRequest.getMetadata().getForeignKeyClass() != null) {
try {
val = extractValueRequest.getFieldManager().getFieldValue
(extractValueRequest.getRequestedValue(), extractValueRequest.getMetadata().getForeignKeyProperty()).toString();
//see if there's a name property and use it for the display value
String entityName = null;
if (extractValueRequest.getRequestedValue() instanceof AdminMainEntity) {
entityName = ((AdminMainEntity) extractValueRequest.getRequestedValue()).getMainEntityName();
}
Object temp = null;
if (!StringUtils.isEmpty(extractValueRequest.getMetadata().getForeignKeyDisplayValueProperty())) {
String nameProperty = extractValueRequest.getMetadata().getForeignKeyDisplayValueProperty();
try {
temp = extractValueRequest.getFieldManager().getFieldValue(extractValueRequest.getRequestedValue(), nameProperty);
} catch (FieldNotAvailableException e) {
//do nothing
}
}
if (temp == null && StringUtils.isEmpty(entityName)) {
try {
temp = extractValueRequest.getFieldManager().getFieldValue(extractValueRequest.getRequestedValue(), "name");
} catch (FieldNotAvailableException e) {
//do nothing
}
}
if (temp != null) {
extractValueRequest.setDisplayVal(temp.toString());
} else if (!StringUtils.isEmpty(entityName)) {
extractValueRequest.setDisplayVal(entityName);
}
} catch (FieldNotAvailableException e) {
throw new IllegalArgumentException(e);
}
} else {
val = extractValueRequest.getRequestedValue().toString();
}
property.setValue(val);
property.setDisplayValue(extractValueRequest.getDisplayVal());
}
} catch (IllegalAccessException e) {
throw new PersistenceException(e);
}
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse addSearchMapping(AddSearchMappingRequest addSearchMappingRequest, List<FilterMapping> filterMappings) {
if (!canHandleSearchMapping(addSearchMappingRequest, filterMappings)) {
return FieldProviderResponse.NOT_HANDLED;
}
Class clazz;
try {
clazz = Class.forName(addSearchMappingRequest.getMergedProperties().get(addSearchMappingRequest
.getPropertyName()).getInheritedFromType());
} catch (ClassNotFoundException e) {
throw new PersistenceException(e);
}
Field field = addSearchMappingRequest.getFieldManager().getField(clazz, addSearchMappingRequest.getPropertyName());
Class<?> targetType = null;
if (field != null) {
targetType = field.getType();
}
BasicFieldMetadata metadata = (BasicFieldMetadata) addSearchMappingRequest.getMergedProperties().get
(addSearchMappingRequest.getPropertyName());
FilterAndSortCriteria fasc = addSearchMappingRequest.getRequestedCto().get(addSearchMappingRequest.getPropertyName());
FilterMapping filterMapping = new FilterMapping()
.withInheritedFromClass(clazz)
.withFullPropertyName(addSearchMappingRequest.getPropertyName())
.withFilterValues(fasc.getFilterValues())
.withSortDirection(fasc.getSortDirection());
filterMappings.add(filterMapping);
if (fasc.hasSpecialFilterValue()) {
filterMapping.setDirectFilterValues(new EmptyFilterValues());
// Handle special values on a case by case basis
List<String> specialValues = fasc.getSpecialFilterValues();
if (specialValues.contains(FilterAndSortCriteria.IS_NULL_FILTER_VALUE)) {
filterMapping.setRestriction(new Restriction().withPredicateProvider(new IsNullPredicateProvider()));
}
} else {
switch (metadata.getFieldType()) {
case BOOLEAN:
if (targetType == null || targetType.equals(Boolean.class) || targetType.equals(boolean.class)) {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.BOOLEAN.getType(), addSearchMappingRequest.getPropertyName()));
} else {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.CHARACTER.getType(), addSearchMappingRequest.getPropertyName()));
}
break;
case DATE:
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.DATE.getType(), addSearchMappingRequest.getPropertyName()));
break;
case DECIMAL:
case MONEY:
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.DECIMAL.getType(), addSearchMappingRequest.getPropertyName()));
break;
case INTEGER:
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.LONG.getType(), addSearchMappingRequest.getPropertyName()));
break;
case BROADLEAF_ENUMERATION:
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.STRING_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
break;
default:
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.STRING_LIKE.getType(), addSearchMappingRequest.getPropertyName()));
break;
case FOREIGN_KEY:
if (!addSearchMappingRequest.getRequestedCto().get(addSearchMappingRequest.getPropertyName()).getFilterValues().isEmpty()) {
ForeignKey foreignKey = (ForeignKey) addSearchMappingRequest.getPersistencePerspective().getPersistencePerspectiveItems().get
(PersistencePerspectiveItemType.FOREIGNKEY);
if (metadata.getForeignKeyCollection()) {
if (ForeignKeyRestrictionType.COLLECTION_SIZE_EQ.toString().equals(foreignKey
.getRestrictionType().toString())) {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory()
.getRestriction(RestrictionType.COLLECTION_SIZE_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath());
} else {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.LONG.getType(), addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath().withTargetProperty(addSearchMappingRequest.getPropertyName() + "." + metadata.getForeignKeyProperty()));
}
} else if (addSearchMappingRequest.getRequestedCto().get(addSearchMappingRequest.getPropertyName())
.getFilterValues().get(0) == null || "null".equals(addSearchMappingRequest.getRequestedCto().get
(addSearchMappingRequest.getPropertyName()).getFilterValues().get(0))) {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.IS_NULL_LONG.getType(), addSearchMappingRequest.getPropertyName()));
} else if (metadata.getSecondaryType() == SupportedFieldType.STRING) {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.STRING_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath().withTargetProperty(addSearchMappingRequest.getPropertyName() + "." + metadata.getForeignKeyProperty()));
} else {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.LONG_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath().withTargetProperty(addSearchMappingRequest.getPropertyName() + "." + metadata.getForeignKeyProperty()));
}
}
break;
case ADDITIONAL_FOREIGN_KEY:
int additionalForeignKeyIndexPosition = Arrays.binarySearch(addSearchMappingRequest
.getPersistencePerspective()
.getAdditionalForeignKeys(), new ForeignKey(addSearchMappingRequest.getPropertyName(),
null, null),
new Comparator<ForeignKey>() {
@Override
public int compare(ForeignKey o1, ForeignKey o2) {
return o1.getManyToField().compareTo(o2.getManyToField());
}
});
ForeignKey foreignKey = null;
if (additionalForeignKeyIndexPosition >= 0) {
foreignKey = addSearchMappingRequest.getPersistencePerspective().getAdditionalForeignKeys()[additionalForeignKeyIndexPosition];
}
// in the case of a to-one lookup, an explicit ForeignKey is not passed in. The system should then
// default to just using a ForeignKeyRestrictionType.ID_EQ
if (metadata.getForeignKeyCollection()) {
if (ForeignKeyRestrictionType.COLLECTION_SIZE_EQ.toString().equals(foreignKey
.getRestrictionType().toString())) {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory()
.getRestriction(RestrictionType.COLLECTION_SIZE_EQUAL.getType(),
addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath());
} else {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.LONG.getType(), addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath().withTargetProperty(addSearchMappingRequest.getPropertyName() + "." + metadata.getForeignKeyProperty()));
}
} else if (CollectionUtils.isEmpty(addSearchMappingRequest.getRequestedCto().get(addSearchMappingRequest.getPropertyName()).getFilterValues()) ||
addSearchMappingRequest.getRequestedCto().get(addSearchMappingRequest.getPropertyName()) .getFilterValues().get(0) == null || "null".equals(addSearchMappingRequest.getRequestedCto().get
(addSearchMappingRequest.getPropertyName()).getFilterValues().get(0))) {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.IS_NULL_LONG.getType(), addSearchMappingRequest.getPropertyName()));
} else if (metadata.getSecondaryType() == SupportedFieldType.STRING) {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.STRING_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath().withTargetProperty(addSearchMappingRequest.getPropertyName() + "." + metadata.getForeignKeyProperty()));
} else {
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.LONG_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
filterMapping.setFieldPath(new FieldPath().withTargetProperty(addSearchMappingRequest.getPropertyName() + "." + metadata.getForeignKeyProperty()));
}
break;
case ID:
switch (metadata.getSecondaryType()) {
case INTEGER:
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.LONG_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
break;
case STRING:
filterMapping.setRestriction(addSearchMappingRequest.getRestrictionFactory().getRestriction(RestrictionType.STRING_EQUAL.getType(), addSearchMappingRequest.getPropertyName()));
break;
}
break;
}
}
return FieldProviderResponse.HANDLED;
}
@Override
public int getOrder() {
return FieldPersistenceProvider.BASIC;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_persistence_module_provider_BasicFieldPersistenceProvider.java
|
1,487 |
@SuppressWarnings("unchecked")
public class OObjectDatabaseTx extends ODatabasePojoAbstract<Object> implements ODatabaseObject, OUserObject2RecordHandler {
public static final String TYPE = "object";
protected ODictionary<Object> dictionary;
protected OEntityManager entityManager;
protected boolean saveOnlyDirty;
protected boolean lazyLoading;
protected boolean automaticSchemaGeneration;
protected OMetadataObject metadata;
public OObjectDatabaseTx(final String iURL) {
super(new ODatabaseDocumentTx(iURL));
underlying.setDatabaseOwner(this);
init();
}
public <T> T newInstance(final Class<T> iType) {
return (T) newInstance(iType.getSimpleName(), null, new Object[0]);
}
public <T> T newInstance(final Class<T> iType, Object... iArgs) {
return (T) newInstance(iType.getSimpleName(), null, iArgs);
}
public <RET> RET newInstance(String iClassName) {
return (RET) newInstance(iClassName, null, new Object[0]);
}
@Override
public <THISDB extends ODatabase> THISDB open(String iUserName, String iUserPassword) {
super.open(iUserName, iUserPassword);
entityManager.registerEntityClass(OUser.class);
entityManager.registerEntityClass(ORole.class);
metadata = new OMetadataObject(underlying.getMetadata());
return (THISDB) this;
}
@Override
public OMetadataObject getMetadata() {
checkOpeness();
if (metadata == null)
metadata = new OMetadataObject(underlying.getMetadata());
return metadata;
}
/**
* Create a new POJO by its class name. Assure to have called the registerEntityClasses() declaring the packages that are part of
* entity classes.
*
* @see OEntityManager#registerEntityClasses(String)
*/
public <RET extends Object> RET newInstance(final String iClassName, final Object iEnclosingClass, Object... iArgs) {
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, iClassName);
try {
Class<?> entityClass = entityManager.getEntityClass(iClassName);
if (entityClass != null) {
RET enhanced = (RET) OObjectEntityEnhancer.getInstance().getProxiedInstance(entityManager.getEntityClass(iClassName),
iEnclosingClass, underlying.newInstance(iClassName), null, iArgs);
return (RET) enhanced;
} else {
throw new OSerializationException("Type " + iClassName
+ " cannot be serialized because is not part of registered entities. To fix this error register this class");
}
} catch (Exception e) {
OLogManager.instance().error(this, "Error on creating object of class " + iClassName, e, ODatabaseException.class);
}
return null;
}
/**
* Create a new POJO by its class name. Assure to have called the registerEntityClasses() declaring the packages that are part of
* entity classes.
*
* @see OEntityManager#registerEntityClasses(String)
*/
public <RET extends Object> RET newInstance(final String iClassName, final Object iEnclosingClass, ODocument iDocument,
Object... iArgs) {
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, iClassName);
try {
Class<?> entityClass = entityManager.getEntityClass(iClassName);
if (entityClass != null) {
RET enhanced = (RET) OObjectEntityEnhancer.getInstance().getProxiedInstance(entityManager.getEntityClass(iClassName),
iEnclosingClass, iDocument, null, iArgs);
return (RET) enhanced;
} else {
throw new OSerializationException("Type " + iClassName
+ " cannot be serialized because is not part of registered entities. To fix this error register this class");
}
} catch (Exception e) {
OLogManager.instance().error(this, "Error on creating object of class " + iClassName, e, ODatabaseException.class);
}
return null;
}
public <RET> OObjectIteratorClass<RET> browseClass(final Class<RET> iClusterClass) {
return browseClass(iClusterClass, true);
}
public <RET> OObjectIteratorClass<RET> browseClass(final Class<RET> iClusterClass, final boolean iPolymorphic) {
if (iClusterClass == null)
return null;
return browseClass(iClusterClass.getSimpleName(), iPolymorphic);
}
public <RET> OObjectIteratorClass<RET> browseClass(final String iClassName) {
return browseClass(iClassName, true);
}
public <RET> OObjectIteratorClass<RET> browseClass(final String iClassName, final boolean iPolymorphic) {
checkOpeness();
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_READ, iClassName);
return new OObjectIteratorClass<RET>(this, (ODatabaseRecordAbstract) getUnderlying().getUnderlying(), iClassName, iPolymorphic);
}
public <RET> OObjectIteratorCluster<RET> browseCluster(final String iClusterName) {
checkOpeness();
checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, iClusterName);
return (OObjectIteratorCluster<RET>) new OObjectIteratorCluster<Object>(this, (ODatabaseRecordAbstract) getUnderlying()
.getUnderlying(), getClusterIdByName(iClusterName));
}
public <RET> RET load(final Object iPojo) {
return (RET) load(iPojo, null);
}
public <RET> RET reload(final Object iPojo) {
return (RET) reload(iPojo, null, true);
}
public <RET> RET reload(final Object iPojo, final boolean iIgnoreCache) {
return (RET) reload(iPojo, null, iIgnoreCache);
}
public <RET> RET reload(Object iPojo, final String iFetchPlan, final boolean iIgnoreCache) {
checkOpeness();
if (iPojo == null)
return null;
// GET THE ASSOCIATED DOCUMENT
final ODocument record = getRecordByUserObject(iPojo, true);
underlying.reload(record, iFetchPlan, iIgnoreCache);
iPojo = stream2pojo(record, iPojo, iFetchPlan, true);
return (RET) iPojo;
}
public <RET> RET load(final Object iPojo, final String iFetchPlan) {
return (RET) load(iPojo, iFetchPlan, false);
}
@Override
public void attach(final Object iPojo) {
OObjectEntitySerializer.attach(iPojo, this);
}
public <RET> RET attachAndSave(final Object iPojo) {
attach(iPojo);
return (RET) save(iPojo);
}
@Override
/**
* Method that detaches all fields contained in the document to the given object. It returns by default a proxied instance. To get
* a detached non proxied instance @see {@link OObjectEntitySerializer.detach(T o, ODatabaseObject db, boolean
* returnNonProxiedInstance)}
*
* @param <T>
* @param o
* :- the object to detach
* @return the detached object
*/
public <RET> RET detach(final Object iPojo) {
return (RET) OObjectEntitySerializer.detach(iPojo, this);
}
/**
* Method that detaches all fields contained in the document to the given object.
*
* @param <RET>
* @param iPojo
* :- the object to detach
* @param returnNonProxiedInstance
* :- defines if the return object will be a proxied instance or not. If set to TRUE and the object does not contains @Id
* and @Version fields it could procude data replication
* @return the object serialized or with detached data
*/
public <RET> RET detach(final Object iPojo, boolean returnNonProxiedInstance) {
return (RET) OObjectEntitySerializer.detach(iPojo, this, returnNonProxiedInstance);
}
/**
* Method that detaches all fields contained in the document to the given object and recursively all object tree. This may throw a
* {@link StackOverflowError} with big objects tree. To avoid it set the stack size with -Xss java option
*
* @param <RET>
* @param iPojo
* :- the object to detach
* @param returnNonProxiedInstance
* :- defines if the return object will be a proxied instance or not. If set to TRUE and the object does not contains @Id
* and @Version fields it could procude data replication
* @return the object serialized or with detached data
*/
public <RET> RET detachAll(final Object iPojo, boolean returnNonProxiedInstance) {
return (RET) OObjectEntitySerializer.detachAll(iPojo, this, returnNonProxiedInstance);
}
public <RET> RET load(final Object iPojo, final String iFetchPlan, final boolean iIgnoreCache) {
return (RET) load(iPojo, iFetchPlan, iIgnoreCache, false);
}
@Override
public <RET> RET load(Object iPojo, String iFetchPlan, boolean iIgnoreCache, boolean loadTombstone) {
checkOpeness();
if (iPojo == null)
return null;
// GET THE ASSOCIATED DOCUMENT
ODocument record = getRecordByUserObject(iPojo, true);
try {
record.setInternalStatus(ORecordElement.STATUS.UNMARSHALLING);
record = underlying.load(record, iFetchPlan, iIgnoreCache, loadTombstone);
return (RET) stream2pojo(record, iPojo, iFetchPlan);
} finally {
record.setInternalStatus(ORecordElement.STATUS.LOADED);
}
}
public <RET> RET load(final ORID iRecordId) {
return (RET) load(iRecordId, null);
}
public <RET> RET load(final ORID iRecordId, final String iFetchPlan) {
return (RET) load(iRecordId, iFetchPlan, false);
}
public <RET> RET load(final ORID iRecordId, final String iFetchPlan, final boolean iIgnoreCache) {
return (RET) load(iRecordId, iFetchPlan, iIgnoreCache, false);
}
@Override
public <RET> RET load(ORID iRecordId, String iFetchPlan, boolean iIgnoreCache, boolean loadTombstone) {
checkOpeness();
if (iRecordId == null)
return null;
// GET THE ASSOCIATED DOCUMENT
final ODocument record = (ODocument) underlying.load(iRecordId, iFetchPlan, iIgnoreCache, loadTombstone);
if (record == null)
return null;
return (RET) OObjectEntityEnhancer.getInstance().getProxiedInstance(record.getClassName(), entityManager, record, null);
}
/**
* Saves an object to the databasein synchronous mode . First checks if the object is new or not. In case it's new a new ODocument
* is created and bound to the object, otherwise the ODocument is retrieved and updated. The object is introspected using the Java
* Reflection to extract the field values. <br/>
* If a multi value (array, collection or map of objects) is passed, then each single object is stored separately.
*/
public <RET> RET save(final Object iContent) {
return (RET) save(iContent, (String) null, OPERATION_MODE.SYNCHRONOUS, false, null, null);
}
/**
* Saves an object to the database specifying the mode. First checks if the object is new or not. In case it's new a new ODocument
* is created and bound to the object, otherwise the ODocument is retrieved and updated. The object is introspected using the Java
* Reflection to extract the field values. <br/>
* If a multi value (array, collection or map of objects) is passed, then each single object is stored separately.
*/
public <RET> RET save(final Object iContent, OPERATION_MODE iMode, boolean iForceCreate,
final ORecordCallback<? extends Number> iRecordCreatedCallback, ORecordCallback<ORecordVersion> iRecordUpdatedCallback) {
return (RET) save(iContent, null, iMode, false, iRecordCreatedCallback, iRecordUpdatedCallback);
}
/**
* Saves an object in synchronous mode to the database forcing a record cluster where to store it. First checks if the object is
* new or not. In case it's new a new ODocument is created and bound to the object, otherwise the ODocument is retrieved and
* updated. The object is introspected using the Java Reflection to extract the field values. <br/>
* If a multi value (array, collection or map of objects) is passed, then each single object is stored separately.
*
* Before to use the specified cluster a check is made to know if is allowed and figures in the configured and the record is valid
* following the constraints declared in the schema.
*
* @see ORecordSchemaAware#validate()
*/
public <RET> RET save(final Object iPojo, final String iClusterName) {
return (RET) save(iPojo, iClusterName, OPERATION_MODE.SYNCHRONOUS, false, null, null);
}
@Override
public boolean updatedReplica(Object iPojo) {
OSerializationThreadLocal.INSTANCE.get().clear();
// GET THE ASSOCIATED DOCUMENT
final Object proxiedObject = OObjectEntitySerializer.serializeObject(iPojo, this);
final ODocument record = getRecordByUserObject(proxiedObject, true);
boolean result;
try {
record.setInternalStatus(com.orientechnologies.orient.core.db.record.ORecordElement.STATUS.MARSHALLING);
result = underlying.updatedReplica(record);
((OObjectProxyMethodHandler) ((ProxyObject) proxiedObject).getHandler()).updateLoadedFieldMap(proxiedObject);
// RE-REGISTER FOR NEW RECORDS SINCE THE ID HAS CHANGED
registerUserObject(proxiedObject, record);
} finally {
record.setInternalStatus(com.orientechnologies.orient.core.db.record.ORecordElement.STATUS.LOADED);
}
return result;
}
/**
* Saves an object to the database forcing a record cluster where to store it. First checks if the object is new or not. In case
* it's new a new ODocument is created and bound to the object, otherwise the ODocument is retrieved and updated. The object is
* introspected using the Java Reflection to extract the field values. <br/>
* If a multi value (array, collection or map of objects) is passed, then each single object is stored separately.
*
* Before to use the specified cluster a check is made to know if is allowed and figures in the configured and the record is valid
* following the constraints declared in the schema.
*
* @see ORecordSchemaAware#validate()
*/
public <RET> RET save(final Object iPojo, final String iClusterName, OPERATION_MODE iMode, boolean iForceCreate,
final ORecordCallback<? extends Number> iRecordCreatedCallback, ORecordCallback<ORecordVersion> iRecordUpdatedCallback) {
checkOpeness();
if (iPojo == null)
return (RET) iPojo;
else if (OMultiValue.isMultiValue(iPojo)) {
// MULTI VALUE OBJECT: STORE SINGLE POJOS
for (Object pojo : OMultiValue.getMultiValueIterable(iPojo)) {
save(pojo, iClusterName);
}
return (RET) iPojo;
} else {
OSerializationThreadLocal.INSTANCE.get().clear();
// GET THE ASSOCIATED DOCUMENT
final Object proxiedObject = OObjectEntitySerializer.serializeObject(iPojo, this);
final ODocument record = getRecordByUserObject(proxiedObject, true);
try {
record.setInternalStatus(ORecordElement.STATUS.MARSHALLING);
if (!saveOnlyDirty || record.isDirty()) {
// REGISTER BEFORE TO SERIALIZE TO AVOID PROBLEMS WITH CIRCULAR DEPENDENCY
// registerUserObject(iPojo, record);
deleteOrphans((((OObjectProxyMethodHandler) ((ProxyObject) proxiedObject).getHandler())));
ODocument savedRecord = underlying.save(record, iClusterName, iMode, iForceCreate, iRecordCreatedCallback,
iRecordUpdatedCallback);
((OObjectProxyMethodHandler) ((ProxyObject) proxiedObject).getHandler()).setDoc(savedRecord);
((OObjectProxyMethodHandler) ((ProxyObject) proxiedObject).getHandler()).updateLoadedFieldMap(proxiedObject);
// RE-REGISTER FOR NEW RECORDS SINCE THE ID HAS CHANGED
registerUserObject(proxiedObject, record);
}
} finally {
record.setInternalStatus(ORecordElement.STATUS.LOADED);
}
return (RET) proxiedObject;
}
}
public ODatabaseObject delete(final Object iPojo) {
checkOpeness();
if (iPojo == null)
return this;
ODocument record = getRecordByUserObject(iPojo, false);
if (record == null) {
final ORecordId rid = OObjectSerializerHelper.getObjectID(this, iPojo);
if (rid == null)
throw new OObjectNotDetachedException("Cannot retrieve the object's ID for '" + iPojo + "' because has not been detached");
record = (ODocument) underlying.load(rid);
}
deleteCascade(record);
underlying.delete(record);
if (getTransaction() instanceof OTransactionNoTx)
unregisterPojo(iPojo, record);
return this;
}
@Override
public ODatabaseObject delete(final ORID iRID) {
checkOpeness();
if (iRID == null)
return this;
final ORecordInternal<?> record = iRID.getRecord();
if (record instanceof ODocument) {
Object iPojo = getUserObjectByRecord(record, null);
deleteCascade((ODocument) record);
underlying.delete(record);
if (getTransaction() instanceof OTransactionNoTx)
unregisterPojo(iPojo, (ODocument) record);
}
return this;
}
@Override
public ODatabaseObject delete(final ORID iRID, final ORecordVersion iVersion) {
deleteRecord(iRID, iVersion, false);
return this;
}
@Override
public ODatabaseComplex<Object> cleanOutRecord(ORID iRID, ORecordVersion iVersion) {
deleteRecord(iRID, iVersion, true);
return this;
}
private boolean deleteRecord(ORID iRID, ORecordVersion iVersion, boolean prohibitTombstones) {
checkOpeness();
if (iRID == null)
return true;
ODocument record = iRID.getRecord();
if (record != null) {
Object iPojo = getUserObjectByRecord(record, null);
deleteCascade(record);
if (prohibitTombstones)
underlying.cleanOutRecord(iRID, iVersion);
else
underlying.delete(iRID, iVersion);
if (getTransaction() instanceof OTransactionNoTx)
unregisterPojo(iPojo, record);
}
return false;
}
protected void deleteCascade(final ODocument record) {
if (record == null)
return;
List<String> toDeleteCascade = OObjectEntitySerializer.getCascadeDeleteFields(record.getClassName());
if (toDeleteCascade != null) {
for (String field : toDeleteCascade) {
Object toDelete = record.field(field);
if (toDelete instanceof OIdentifiable) {
if (toDelete != null)
delete(((OIdentifiable) toDelete).getIdentity());
} else if (toDelete instanceof Collection) {
for (OIdentifiable cascadeRecord : ((Collection<OIdentifiable>) toDelete)) {
if (cascadeRecord != null)
delete(((OIdentifiable) cascadeRecord).getIdentity());
}
} else if (toDelete instanceof Map) {
for (OIdentifiable cascadeRecord : ((Map<Object, OIdentifiable>) toDelete).values()) {
if (cascadeRecord != null)
delete(((OIdentifiable) cascadeRecord).getIdentity());
}
}
}
}
}
public long countClass(final String iClassName) {
checkOpeness();
return underlying.countClass(iClassName);
}
public long countClass(final Class<?> iClass) {
checkOpeness();
return underlying.countClass(iClass.getSimpleName());
}
public ODictionary<Object> getDictionary() {
checkOpeness();
if (dictionary == null)
dictionary = new ODictionaryWrapper(this, underlying.getDictionary().getIndex());
return dictionary;
}
@Override
public ODatabasePojoAbstract<Object> commit() {
try {
// BY PASS DOCUMENT DB
((ODatabaseRecordTx) underlying.getUnderlying()).commit();
if (getTransaction().getAllRecordEntries() != null) {
// UPDATE ID & VERSION FOR ALL THE RECORDS
Object pojo = null;
for (ORecordOperation entry : getTransaction().getAllRecordEntries()) {
switch (entry.type) {
case ORecordOperation.CREATED:
case ORecordOperation.UPDATED:
break;
case ORecordOperation.DELETED:
final ORecordInternal<?> rec = entry.getRecord();
if (rec instanceof ODocument)
unregisterPojo(pojo, (ODocument) rec);
break;
}
}
}
} finally {
getTransaction().close();
}
return this;
}
@Override
public ODatabasePojoAbstract<Object> rollback() {
try {
// COPY ALL TX ENTRIES
final List<ORecordOperation> newEntries;
if (getTransaction().getCurrentRecordEntries() != null) {
newEntries = new ArrayList<ORecordOperation>();
for (ORecordOperation entry : getTransaction().getCurrentRecordEntries())
if (entry.type == ORecordOperation.CREATED)
newEntries.add(entry);
} else
newEntries = null;
// BY PASS DOCUMENT DB
((ODatabaseRecordTx) underlying.getUnderlying()).rollback();
} finally {
getTransaction().close();
}
return this;
}
public OEntityManager getEntityManager() {
return entityManager;
}
@Override
public ODatabaseDocument getUnderlying() {
return underlying;
}
/**
* Returns the version number of the object. Version starts from 0 assigned on creation.
*
* @param iPojo
* User object
*/
@Override
public ORecordVersion getVersion(final Object iPojo) {
checkOpeness();
final ODocument record = getRecordByUserObject(iPojo, false);
if (record != null)
return record.getRecordVersion();
return OObjectSerializerHelper.getObjectVersion(iPojo);
}
/**
* Returns the object unique identity.
*
* @param iPojo
* User object
*/
@Override
public ORID getIdentity(final Object iPojo) {
checkOpeness();
if (iPojo instanceof OIdentifiable)
return ((OIdentifiable) iPojo).getIdentity();
final ODocument record = getRecordByUserObject(iPojo, false);
if (record != null)
return record.getIdentity();
return OObjectSerializerHelper.getObjectID(this, iPojo);
}
public boolean isSaveOnlyDirty() {
return saveOnlyDirty;
}
public void setSaveOnlyDirty(boolean saveOnlyDirty) {
this.saveOnlyDirty = saveOnlyDirty;
}
public boolean isAutomaticSchemaGeneration() {
return automaticSchemaGeneration;
}
public void setAutomaticSchemaGeneration(boolean automaticSchemaGeneration) {
this.automaticSchemaGeneration = automaticSchemaGeneration;
}
public Object newInstance() {
checkOpeness();
return new ODocument();
}
public <DBTYPE extends ODatabase> DBTYPE checkSecurity(final String iResource, final byte iOperation) {
return (DBTYPE) underlying.checkSecurity(iResource, iOperation);
}
public <DBTYPE extends ODatabase> DBTYPE checkSecurity(final String iResource, final int iOperation, Object iResourceSpecific) {
return (DBTYPE) underlying.checkSecurity(iResource, iOperation, iResourceSpecific);
}
public <DBTYPE extends ODatabase> DBTYPE checkSecurity(final String iResource, final int iOperation, Object... iResourcesSpecific) {
return (DBTYPE) underlying.checkSecurity(iResource, iOperation, iResourcesSpecific);
}
@Override
public ODocument pojo2Stream(final Object iPojo, final ODocument iRecord) {
if (iPojo instanceof ProxyObject) {
return ((OObjectProxyMethodHandler) ((ProxyObject) iPojo).getHandler()).getDoc();
}
return OObjectSerializerHelper.toStream(iPojo, iRecord, getEntityManager(),
getMetadata().getSchema().getClass(iPojo.getClass().getSimpleName()), this, this, saveOnlyDirty);
}
@Override
public Object stream2pojo(ODocument iRecord, final Object iPojo, final String iFetchPlan) {
return stream2pojo(iRecord, iPojo, iFetchPlan, false);
}
public Object stream2pojo(ODocument iRecord, final Object iPojo, final String iFetchPlan, boolean iReload) {
if (iRecord.getInternalStatus() == ORecordElement.STATUS.NOT_LOADED)
iRecord = (ODocument) iRecord.load();
if (iReload) {
if (iPojo != null) {
if (iPojo instanceof Proxy) {
((OObjectProxyMethodHandler) ((ProxyObject) iPojo).getHandler()).setDoc(iRecord);
((OObjectProxyMethodHandler) ((ProxyObject) iPojo).getHandler()).updateLoadedFieldMap(iPojo);
return iPojo;
} else
return OObjectEntityEnhancer.getInstance().getProxiedInstance(iPojo.getClass(), iRecord);
} else
return OObjectEntityEnhancer.getInstance().getProxiedInstance(iRecord.getClassName(), entityManager, iRecord, null);
} else if (!(iPojo instanceof Proxy))
return OObjectEntityEnhancer.getInstance().getProxiedInstance(iPojo.getClass(), iRecord);
else
return iPojo;
}
public boolean isLazyLoading() {
return lazyLoading;
}
public void setLazyLoading(final boolean lazyLoading) {
this.lazyLoading = lazyLoading;
}
public String getType() {
return TYPE;
}
@Override
public ODocument getRecordByUserObject(Object iPojo, boolean iCreateIfNotAvailable) {
if (iPojo instanceof Proxy)
return OObjectEntitySerializer.getDocument((Proxy) iPojo);
return OObjectEntitySerializer.getDocument((Proxy) OObjectEntitySerializer.serializeObject(iPojo, this));
}
@Override
public Object getUserObjectByRecord(final OIdentifiable iRecord, final String iFetchPlan, final boolean iCreate) {
final ODocument document = iRecord.getRecord();
return OObjectEntityEnhancer.getInstance().getProxiedInstance(document.getClassName(), getEntityManager(), document, null);
}
@Override
public void registerUserObject(final Object iObject, final ORecordInternal<?> iRecord) {
}
public void registerUserObjectAfterLinkSave(ORecordInternal<?> iRecord) {
}
@Override
public void unregisterPojo(final Object iObject, final ODocument iRecord) {
}
public void registerClassMethodFilter(Class<?> iClass, OObjectMethodFilter iMethodFilter) {
OObjectEntityEnhancer.getInstance().registerClassMethodFilter(iClass, iMethodFilter);
}
public void deregisterClassMethodFilter(final Class<?> iClass) {
OObjectEntityEnhancer.getInstance().deregisterClassMethodFilter(iClass);
}
protected void init() {
entityManager = OEntityManager.getEntityManagerByDatabaseURL(getURL());
entityManager.setClassHandler(OObjectEntityClassHandler.getInstance());
saveOnlyDirty = OGlobalConfiguration.OBJECT_SAVE_ONLY_DIRTY.getValueAsBoolean();
OObjectSerializerHelper.register();
lazyLoading = true;
if (!isClosed() && entityManager.getEntityClass(OUser.class.getSimpleName()) == null) {
entityManager.registerEntityClass(OUser.class);
entityManager.registerEntityClass(ORole.class);
}
}
protected void deleteOrphans(final OObjectProxyMethodHandler handler) {
for (ORID orphan : handler.getOrphans()) {
final ODocument doc = orphan.getRecord();
deleteCascade(doc);
underlying.delete(doc);
}
handler.getOrphans().clear();
}
}
| 1no label
|
object_src_main_java_com_orientechnologies_orient_object_db_OObjectDatabaseTx.java
|
689 |
public class CollectionGetAllOperation extends CollectionOperation {
public CollectionGetAllOperation() {
}
public CollectionGetAllOperation(String name) {
super(name);
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_GET_ALL;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
final Collection<Data> all = getOrCreateContainer().getAll();
response = new SerializableCollection(all);
}
@Override
public void afterRun() throws Exception {
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionGetAllOperation.java
|
1,294 |
clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return false;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
allNodesAcked.set(true);
latch.countDown();
}
@Override
public void onAckTimeout() {
ackTimeout.set(true);
latch.countDown();
}
@Override
public TimeValue ackTimeout() {
return TimeValue.timeValueSeconds(10);
}
@Override
public TimeValue timeout() {
return TimeValue.timeValueSeconds(10);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
executed.set(true);
return ClusterState.builder(currentState).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("failed to execute callback in test {}", t, source);
onFailure.set(true);
latch.countDown();
}
});
| 0true
|
src_test_java_org_elasticsearch_cluster_ClusterServiceTests.java
|
1,564 |
public class VerticesMap {
public static final String PROCESS_EDGES = Tokens.makeNamespace(VerticesMap.class) + ".processEdges";
public enum Counters {
VERTICES_PROCESSED,
EDGES_PROCESSED
}
public static Configuration createConfiguration(final boolean processEdges) {
final Configuration configuration = new EmptyConfiguration();
configuration.setBoolean(PROCESS_EDGES, processEdges);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean processEdges;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.processEdges = context.getConfiguration().getBoolean(PROCESS_EDGES, true);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
value.startPath();
long edgesProcessed = 0;
if (this.processEdges) {
for (final Edge edge : value.getEdges(Direction.BOTH)) {
((StandardFaunusEdge) edge).clearPaths();
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_PROCESSED, edgesProcessed);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
context.write(NullWritable.get(), value);
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_VerticesMap.java
|
1,311 |
public final class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
public static final String INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL = "cluster.info.update.interval";
private volatile TimeValue updateFrequency;
private volatile ImmutableMap<String, DiskUsage> usages;
private volatile ImmutableMap<String, Long> shardSizes;
private volatile boolean isMaster = false;
private volatile boolean enabled;
private final TransportNodesStatsAction transportNodesStatsAction;
private final TransportIndicesStatsAction transportIndicesStatsAction;
private final ClusterService clusterService;
private final ThreadPool threadPool;
@Inject
public InternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService,
TransportNodesStatsAction transportNodesStatsAction,
TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService,
ThreadPool threadPool) {
super(settings);
this.usages = ImmutableMap.of();
this.shardSizes = ImmutableMap.of();
this.transportNodesStatsAction = transportNodesStatsAction;
this.transportIndicesStatsAction = transportIndicesStatsAction;
this.clusterService = clusterService;
this.threadPool = threadPool;
this.updateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, TimeValue.timeValueSeconds(30));
this.enabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false);
nodeSettingsService.addListener(new ApplySettings());
// Add InternalClusterInfoService to listen for Master changes
this.clusterService.add((LocalNodeMasterListener)this);
// Add to listen for state changes (when nodes are added)
this.clusterService.add((ClusterStateListener)this);
}
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
TimeValue newUpdateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, null);
// ClusterInfoService is only enabled if the DiskThresholdDecider is enabled
Boolean newEnabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null);
if (newUpdateFrequency != null) {
if (newUpdateFrequency.getMillis() < TimeValue.timeValueSeconds(10).getMillis()) {
logger.warn("[{}] set too low [{}] (< 10s)", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, newUpdateFrequency);
throw new IllegalStateException("Unable to set " + INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL + " less than 10 seconds");
} else {
logger.info("updating [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, updateFrequency, newUpdateFrequency);
InternalClusterInfoService.this.updateFrequency = newUpdateFrequency;
}
}
// We don't log about enabling it here, because the DiskThresholdDecider will already be logging about enable/disable
if (newEnabled != null) {
InternalClusterInfoService.this.enabled = newEnabled;
}
}
}
@Override
public void onMaster() {
this.isMaster = true;
if (logger.isTraceEnabled()) {
logger.trace("I have been elected master, scheduling a ClusterInfoUpdateJob");
}
try {
// Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running
threadPool.schedule(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob());
if (clusterService.state().getNodes().getDataNodes().size() > 1) {
// Submit an info update job to be run immediately
threadPool.executor(executorName()).execute(new ClusterInfoUpdateJob(false));
}
} catch (EsRejectedExecutionException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Couldn't schedule cluster info update task - node might be shutting down", ex);
}
}
}
@Override
public void offMaster() {
this.isMaster = false;
}
@Override
public String executorName() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (!this.enabled) {
return;
}
// Check whether it was a data node that was added
boolean dataNodeAdded = false;
for (DiscoveryNode addedNode : event.nodesDelta().addedNodes()) {
if (addedNode.dataNode()) {
dataNodeAdded = true;
break;
}
}
if (this.isMaster && dataNodeAdded && clusterService.state().getNodes().getDataNodes().size() > 1) {
if (logger.isDebugEnabled()) {
logger.debug("data node was added, retrieving new cluster info");
}
threadPool.executor(executorName()).execute(new ClusterInfoUpdateJob(false));
}
if (this.isMaster && event.nodesRemoved()) {
for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) {
if (removedNode.dataNode()) {
if (logger.isTraceEnabled()) {
logger.trace("Removing node from cluster info: {}", removedNode.getId());
}
Map<String, DiskUsage> newUsages = new HashMap<String, DiskUsage>(usages);
newUsages.remove(removedNode.getId());
usages = ImmutableMap.copyOf(newUsages);
}
}
}
}
@Override
public ClusterInfo getClusterInfo() {
return new ClusterInfo(usages, shardSizes);
}
/**
* Class used to submit {@link ClusterInfoUpdateJob}s on the
* {@link InternalClusterInfoService} threadpool, these jobs will
* reschedule themselves by placing a new instance of this class onto the
* scheduled threadpool.
*/
public class SubmitReschedulingClusterInfoUpdatedJob implements Runnable {
@Override
public void run() {
if (logger.isTraceEnabled()) {
logger.trace("Submitting new rescheduling cluster info update job");
}
try {
threadPool.executor(executorName()).execute(new ClusterInfoUpdateJob(true));
} catch (EsRejectedExecutionException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Couldn't re-schedule cluster info update task - node might be shutting down", ex);
}
}
}
}
/**
* Runnable class that performs a {@Link NodesStatsRequest} to retrieve
* disk usages for nodes in the cluster and an {@link IndicesStatsRequest}
* to retrieve the sizes of all shards to ensure they can fit on nodes
* during shard balancing.
*/
public class ClusterInfoUpdateJob implements Runnable {
// This boolean is used to signal to the ClusterInfoUpdateJob that it
// needs to reschedule itself to run again at a later time. It can be
// set to false to only run once
private final boolean reschedule;
public ClusterInfoUpdateJob(boolean reschedule) {
this.reschedule = reschedule;
}
@Override
public void run() {
if (logger.isTraceEnabled()) {
logger.trace("Performing ClusterInfoUpdateJob");
}
if (isMaster && this.reschedule) {
if (logger.isTraceEnabled()) {
logger.trace("Scheduling next run for updating cluster info in: {}", updateFrequency.toString());
}
try {
threadPool.schedule(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob());
} catch (EsRejectedExecutionException ex) {
logger.debug("Reschedule cluster info service was rejected", ex);
}
}
if (!enabled) {
// Short-circuit if not enabled
if (logger.isTraceEnabled()) {
logger.trace("Skipping ClusterInfoUpdatedJob since it is disabled");
}
return;
}
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true");
nodesStatsRequest.clear();
nodesStatsRequest.fs(true);
nodesStatsRequest.timeout(TimeValue.timeValueSeconds(15));
transportNodesStatsAction.execute(nodesStatsRequest, new ActionListener<NodesStatsResponse>() {
@Override
public void onResponse(NodesStatsResponse nodeStatses) {
Map<String, DiskUsage> newUsages = new HashMap<String, DiskUsage>();
for (NodeStats nodeStats : nodeStatses.getNodes()) {
if (nodeStats.getFs() == null) {
logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name());
} else {
long available = 0;
long total = 0;
for (FsStats.Info info : nodeStats.getFs()) {
available += info.getAvailable().bytes();
total += info.getTotal().bytes();
}
String nodeId = nodeStats.getNode().id();
if (logger.isTraceEnabled()) {
logger.trace("node: [{}], total disk: {}, available disk: {}", nodeId, total, available);
}
newUsages.put(nodeId, new DiskUsage(nodeId, total, available));
}
}
usages = ImmutableMap.copyOf(newUsages);
}
@Override
public void onFailure(Throwable e) {
logger.error("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e);
}
});
IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
indicesStatsRequest.clear();
indicesStatsRequest.store(true);
transportIndicesStatsAction.execute(indicesStatsRequest, new ActionListener<IndicesStatsResponse>() {
@Override
public void onResponse(IndicesStatsResponse indicesStatsResponse) {
ShardStats[] stats = indicesStatsResponse.getShards();
HashMap<String, Long> newShardSizes = new HashMap<String, Long>();
for (ShardStats s : stats) {
long size = s.getStats().getStore().sizeInBytes();
String sid = shardIdentifierFromRouting(s.getShardRouting());
if (logger.isTraceEnabled()) {
logger.trace("shard: {} size: {}", sid, size);
}
newShardSizes.put(sid, size);
}
shardSizes = ImmutableMap.copyOf(newShardSizes);
}
@Override
public void onFailure(Throwable e) {
logger.error("Failed to execute IndicesStatsAction for ClusterInfoUpdateJob", e);
}
});
if (logger.isTraceEnabled()) {
logger.trace("Finished ClusterInfoUpdateJob");
}
}
}
/**
* Method that incorporates the ShardId for the shard into a string that
* includes a 'p' or 'r' depending on whether the shard is a primary.
*/
public static String shardIdentifierFromRouting(ShardRouting shardRouting) {
return shardRouting.shardId().toString() + "[" + (shardRouting.primary() ? "p" : "r") + "]";
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_InternalClusterInfoService.java
|
1,203 |
longObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongObjectOpenHashMap>() {
@Override
public LongObjectOpenHashMap newInstance(int sizing) {
return new LongObjectOpenHashMap(size(sizing));
}
@Override
public void clear(LongObjectOpenHashMap value) {
value.clear();
}
});
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java
|
15 |
exe.submit(new Runnable() {
private final int number = atomicInt.incrementAndGet();
@Override
public void run() {
try {
Thread.sleep(150);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println(number);
}
});
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestBed.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.