Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
933 | final Object myKey = makeDbCall(iMyDb, new ODbRelatedCall<Object>() {
public Object call() {
return myEntry.getKey();
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_record_impl_ODocumentHelper.java |
1,524 | @Ignore("not a JUnit test")
public final class TestUtil {
static final private SerializationService serializationService = new SerializationServiceBuilder().build();
private TestUtil() {
}
public static Data toData(Object obj) {
return serializationService.toData(obj);
}
public static Object toObject(Data data) {
return serializationService.toObject(data);
}
public static Node getNode(HazelcastInstance hz) {
HazelcastInstanceImpl impl = getHazelcastInstanceImpl(hz);
return impl != null ? impl.node : null;
}
public static HazelcastInstanceImpl getHazelcastInstanceImpl(HazelcastInstance hz) {
HazelcastInstanceImpl impl = null;
if (hz instanceof HazelcastInstanceProxy) {
impl = ((HazelcastInstanceProxy) hz).original;
} else if (hz instanceof HazelcastInstanceImpl) {
impl = (HazelcastInstanceImpl) hz;
}
return impl;
}
public static void terminateInstance(HazelcastInstance hz) {
final Node node = getNode(hz);
node.getConnectionManager().shutdown();
node.shutdown(true);
}
public static void warmUpPartitions(HazelcastInstance...instances) throws InterruptedException {
for (HazelcastInstance instance : instances) {
final PartitionService ps = instance.getPartitionService();
for (Partition partition : ps.getPartitions()) {
while (partition.getOwner() == null) {
Thread.sleep(10);
}
}
}
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_instance_TestUtil.java |
2,124 | private static class SystemOutStream extends OutputStream {
public SystemOutStream() {
}
public void close() {
}
public void flush() {
System.out.flush();
}
public void write(final byte[] b) throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.out.write(b);
}
public void write(final byte[] b, final int off, final int len)
throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.out.write(b, off, len);
}
public void write(final int b) throws IOException {
if (!Loggers.consoleLoggingEnabled()) {
return;
}
System.out.write(b);
}
} | 0true
| src_main_java_org_elasticsearch_common_logging_log4j_ConsoleAppender.java |
549 | public abstract class AbstractClientTxnCollectionProxy<E> extends ClientTxnProxy {
protected AbstractClientTxnCollectionProxy(String name, TransactionContextProxy proxy) {
super(name, proxy);
}
void onDestroy() {
}
public String getName() {
return (String) getId();
}
protected void throwExceptionIfNull(Object o) {
if (o == null) {
throw new NullPointerException("Object is null");
}
}
} | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_txn_proxy_AbstractClientTxnCollectionProxy.java |
252 | public interface EmailReportingDao {
public Long createTracking(String emailAddress, String type, String extraValue) ;
public void recordOpen(Long emailId, String userAgent);
public void recordClick(Long emailId, String customerId, String destinationUri, String queryString);
public EmailTracking retrieveTracking(Long emailId);
public EmailTarget createTarget();
} | 0true
| common_src_main_java_org_broadleafcommerce_common_email_dao_EmailReportingDao.java |
890 | abstract class BaseCountDownLatchOperation extends AbstractNamedOperation
implements PartitionAwareOperation {
protected BaseCountDownLatchOperation() {
}
protected BaseCountDownLatchOperation(String name) {
super(name);
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
public final String getServiceName() {
return CountDownLatchService.SERVICE_NAME;
}
protected WaitNotifyKey waitNotifyKey() {
return new LatchKey(name);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_countdownlatch_operations_BaseCountDownLatchOperation.java |
1,373 | public class OTransactionNoTx extends OTransactionAbstract {
public OTransactionNoTx(final ODatabaseRecordTx iDatabase) {
super(iDatabase);
}
public void begin() {
}
public void commit() {
}
public void rollback() {
}
public void close() {
}
public ORecordInternal<?> loadRecord(final ORID iRid, final ORecordInternal<?> iRecord, final String iFetchPlan,
boolean ignonreCache, boolean loadTombstone) {
if (iRid.isNew())
return null;
return database.executeReadRecord((ORecordId) iRid, iRecord, iFetchPlan, ignonreCache, loadTombstone);
}
/**
* Update the record.
*
* @param iForceCreate
* @param iRecordCreatedCallback
* @param iRecordUpdatedCallback
*/
public void saveRecord(final ORecordInternal<?> iRecord, final String iClusterName, final OPERATION_MODE iMode,
boolean iForceCreate, final ORecordCallback<? extends Number> iRecordCreatedCallback,
ORecordCallback<ORecordVersion> iRecordUpdatedCallback) {
try {
database.executeSaveRecord(iRecord, iClusterName, iRecord.getRecordVersion(), iRecord.getRecordType(), true, iMode,
iForceCreate, iRecordCreatedCallback, null);
} catch (Exception e) {
// REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS
final ORecordId rid = (ORecordId) iRecord.getIdentity();
if (rid.isValid())
database.getLevel1Cache().freeRecord(rid);
if (e instanceof RuntimeException)
throw (RuntimeException) e;
throw new OException(e);
}
}
@Override
public boolean updateReplica(ORecordInternal<?> iRecord) {
try {
return database.executeUpdateReplica(iRecord);
} catch (Exception e) {
// REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS
final ORecordId rid = (ORecordId) iRecord.getIdentity();
database.getLevel1Cache().freeRecord(rid);
if (e instanceof RuntimeException)
throw (RuntimeException) e;
throw new OException(e);
}
}
/**
* Deletes the record.
*/
public void deleteRecord(final ORecordInternal<?> iRecord, final OPERATION_MODE iMode) {
if (!iRecord.getIdentity().isPersistent())
return;
try {
database.executeDeleteRecord(iRecord, iRecord.getRecordVersion(), true, true, iMode, false);
} catch (Exception e) {
// REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS
final ORecordId rid = (ORecordId) iRecord.getIdentity();
if (rid.isValid())
database.getLevel1Cache().freeRecord(rid);
if (e instanceof RuntimeException)
throw (RuntimeException) e;
throw new OException(e);
}
}
public Collection<ORecordOperation> getCurrentRecordEntries() {
return null;
}
public Collection<ORecordOperation> getAllRecordEntries() {
return null;
}
public List<ORecordOperation> getRecordEntriesByClass(String iClassName) {
return null;
}
public List<ORecordOperation> getNewRecordEntriesByClusterIds(int[] iIds) {
return null;
}
public void clearRecordEntries() {
}
public int getRecordEntriesSize() {
return 0;
}
public ORecordInternal<?> getRecord(final ORID rid) {
return null;
}
public ORecordOperation getRecordEntry(final ORID rid) {
return null;
}
public boolean isUsingLog() {
return false;
}
public void setUsingLog(final boolean useLog) {
}
public ODocument getIndexChanges() {
return null;
}
public OTransactionIndexChangesPerKey getIndexEntry(final String iIndexName, final Object iKey) {
return null;
}
public void addIndexEntry(final OIndex<?> delegate, final String indexName, final OPERATION status, final Object key,
final OIdentifiable value) {
switch (status) {
case CLEAR:
delegate.clear();
break;
case PUT:
delegate.put(key, value);
break;
case REMOVE:
assert key != null;
delegate.remove(key, value);
break;
}
}
public void clearIndexEntries() {
}
public OTransactionIndexChanges getIndexChanges(final String iName) {
return null;
}
public int getId() {
return 0;
}
public List<String> getInvolvedIndexes() {
return null;
}
public void updateIdentityAfterCommit(ORID oldRid, ORID newRid) {
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionNoTx.java |
3,903 | public class NestedFilterParser implements FilterParser {
public static final String NAME = "nested";
@Inject
public NestedFilterParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
boolean queryFound = false;
Filter filter = null;
boolean filterFound = false;
float boost = 1.0f;
boolean join = true;
String path = null;
boolean cache = false;
CacheKeyFilter.Key cacheKey = null;
String filterName = null;
// we need a late binding filter so we can inject a parent nested filter inner nested queries
NestedQueryParser.LateBindingParentFilter currentParentFilterContext = NestedQueryParser.parentFilterContext.get();
NestedQueryParser.LateBindingParentFilter usAsParentFilter = new NestedQueryParser.LateBindingParentFilter();
NestedQueryParser.parentFilterContext.set(usAsParentFilter);
try {
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
queryFound = true;
query = parseContext.parseInnerQuery();
} else if ("filter".equals(currentFieldName)) {
filterFound = true;
filter = parseContext.parseInnerFilter();
} else {
throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("join".equals(currentFieldName)) {
join = parser.booleanValue();
} else if ("path".equals(currentFieldName)) {
path = parser.text();
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [nested] filter has been removed, use nested filter as a facet_filter in the relevant facet");
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else if ("_cache".equals(currentFieldName)) {
cache = parser.booleanValue();
} else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
cacheKey = new CacheKeyFilter.Key(parser.text());
} else {
throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound && !filterFound) {
throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field");
}
if (path == null) {
throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field");
}
if (query == null && filter == null) {
return null;
}
if (filter != null) {
query = new XConstantScoreQuery(filter);
}
query.setBoost(boost);
MapperService.SmartNameObjectMapper mapper = parseContext.smartObjectMapper(path);
if (mapper == null) {
throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
}
ObjectMapper objectMapper = mapper.mapper();
if (objectMapper == null) {
throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
}
if (!objectMapper.nested().isNested()) {
throw new QueryParsingException(parseContext.index(), "[nested] nested object under path [" + path + "] is not of nested type");
}
Filter childFilter = parseContext.cacheFilter(objectMapper.nestedTypeFilter(), null);
usAsParentFilter.filter = childFilter;
// wrap the child query to only work on the nested path type
query = new XFilteredQuery(query, childFilter);
Filter parentFilter = currentParentFilterContext;
if (parentFilter == null) {
parentFilter = NonNestedDocsFilter.INSTANCE;
// don't do special parent filtering, since we might have same nested mapping on two different types
//if (mapper.hasDocMapper()) {
// // filter based on the type...
// parentFilter = mapper.docMapper().typeFilter();
//}
parentFilter = parseContext.cacheFilter(parentFilter, null);
}
Filter nestedFilter;
if (join) {
ToParentBlockJoinQuery joinQuery = new ToParentBlockJoinQuery(query, parentFilter, ScoreMode.None);
nestedFilter = new QueryWrapperFilter(joinQuery);
} else {
nestedFilter = new QueryWrapperFilter(query);
}
if (cache) {
nestedFilter = parseContext.cacheFilter(nestedFilter, cacheKey);
}
if (filterName != null) {
parseContext.addNamedFilter(filterName, nestedFilter);
}
return nestedFilter;
} finally {
// restore the thread local one...
NestedQueryParser.parentFilterContext.set(currentParentFilterContext);
}
}
} | 1no label
| src_main_java_org_elasticsearch_index_query_NestedFilterParser.java |
27 | public class GetCommandParser implements CommandParser {
public TextCommand parser(SocketTextReader socketTextReader, String cmd, int space) {
String key = cmd.substring(space + 1);
if (key.indexOf(' ') == -1) {
GetCommand r = new GetCommand(key);
socketTextReader.publishRequest(r);
} else {
StringTokenizer st = new StringTokenizer(key);
while (st.hasMoreTokens()) {
PartialGetCommand r = new PartialGetCommand(st.nextToken());
socketTextReader.publishRequest(r);
}
socketTextReader.publishRequest(new EndCommand());
}
return null;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_ascii_memcache_GetCommandParser.java |
1,996 | public class Matchers {
private Matchers() {
}
/**
* Returns a matcher which matches any input.
*/
public static Matcher<Object> any() {
return ANY;
}
private static final Matcher<Object> ANY = new Any();
private static class Any extends AbstractMatcher<Object> implements Serializable {
public boolean matches(Object o) {
return true;
}
@Override
public String toString() {
return "any()";
}
public Object readResolve() {
return any();
}
private static final long serialVersionUID = 0;
}
/**
* Inverts the given matcher.
*/
public static <T> Matcher<T> not(final Matcher<? super T> p) {
return new Not<T>(p);
}
private static class Not<T> extends AbstractMatcher<T> implements Serializable {
final Matcher<? super T> delegate;
private Not(Matcher<? super T> delegate) {
this.delegate = checkNotNull(delegate, "delegate");
}
public boolean matches(T t) {
return !delegate.matches(t);
}
@Override
public boolean equals(Object other) {
return other instanceof Not
&& ((Not) other).delegate.equals(delegate);
}
@Override
public int hashCode() {
return -delegate.hashCode();
}
@Override
public String toString() {
return "not(" + delegate + ")";
}
private static final long serialVersionUID = 0;
}
private static void checkForRuntimeRetention(
Class<? extends Annotation> annotationType) {
Retention retention = annotationType.getAnnotation(Retention.class);
checkArgument(retention != null && retention.value() == RetentionPolicy.RUNTIME,
"Annotation " + annotationType.getSimpleName() + " is missing RUNTIME retention");
}
/**
* Returns a matcher which matches elements (methods, classes, etc.)
* with a given annotation.
*/
public static Matcher<AnnotatedElement> annotatedWith(
final Class<? extends Annotation> annotationType) {
return new AnnotatedWithType(annotationType);
}
private static class AnnotatedWithType extends AbstractMatcher<AnnotatedElement>
implements Serializable {
private final Class<? extends Annotation> annotationType;
public AnnotatedWithType(Class<? extends Annotation> annotationType) {
this.annotationType = checkNotNull(annotationType, "annotation type");
checkForRuntimeRetention(annotationType);
}
public boolean matches(AnnotatedElement element) {
return element.getAnnotation(annotationType) != null;
}
@Override
public boolean equals(Object other) {
return other instanceof AnnotatedWithType
&& ((AnnotatedWithType) other).annotationType.equals(annotationType);
}
@Override
public int hashCode() {
return 37 * annotationType.hashCode();
}
@Override
public String toString() {
return "annotatedWith(" + annotationType.getSimpleName() + ".class)";
}
private static final long serialVersionUID = 0;
}
/**
* Returns a matcher which matches elements (methods, classes, etc.)
* with a given annotation.
*/
public static Matcher<AnnotatedElement> annotatedWith(
final Annotation annotation) {
return new AnnotatedWith(annotation);
}
private static class AnnotatedWith extends AbstractMatcher<AnnotatedElement>
implements Serializable {
private final Annotation annotation;
public AnnotatedWith(Annotation annotation) {
this.annotation = checkNotNull(annotation, "annotation");
checkForRuntimeRetention(annotation.annotationType());
}
public boolean matches(AnnotatedElement element) {
Annotation fromElement = element.getAnnotation(annotation.annotationType());
return fromElement != null && annotation.equals(fromElement);
}
@Override
public boolean equals(Object other) {
return other instanceof AnnotatedWith
&& ((AnnotatedWith) other).annotation.equals(annotation);
}
@Override
public int hashCode() {
return 37 * annotation.hashCode();
}
@Override
public String toString() {
return "annotatedWith(" + annotation + ")";
}
private static final long serialVersionUID = 0;
}
/**
* Returns a matcher which matches subclasses of the given type (as well as
* the given type).
*/
public static Matcher<Class> subclassesOf(final Class<?> superclass) {
return new SubclassesOf(superclass);
}
private static class SubclassesOf extends AbstractMatcher<Class>
implements Serializable {
private final Class<?> superclass;
public SubclassesOf(Class<?> superclass) {
this.superclass = checkNotNull(superclass, "superclass");
}
public boolean matches(Class subclass) {
return superclass.isAssignableFrom(subclass);
}
@Override
public boolean equals(Object other) {
return other instanceof SubclassesOf
&& ((SubclassesOf) other).superclass.equals(superclass);
}
@Override
public int hashCode() {
return 37 * superclass.hashCode();
}
@Override
public String toString() {
return "subclassesOf(" + superclass.getSimpleName() + ".class)";
}
private static final long serialVersionUID = 0;
}
/**
* Returns a matcher which matches objects equal to the given object.
*/
public static Matcher<Object> only(Object value) {
return new Only(value);
}
private static class Only extends AbstractMatcher<Object>
implements Serializable {
private final Object value;
public Only(Object value) {
this.value = checkNotNull(value, "value");
}
public boolean matches(Object other) {
return value.equals(other);
}
@Override
public boolean equals(Object other) {
return other instanceof Only
&& ((Only) other).value.equals(value);
}
@Override
public int hashCode() {
return 37 * value.hashCode();
}
@Override
public String toString() {
return "only(" + value + ")";
}
private static final long serialVersionUID = 0;
}
/**
* Returns a matcher which matches only the given object.
*/
public static Matcher<Object> identicalTo(final Object value) {
return new IdenticalTo(value);
}
private static class IdenticalTo extends AbstractMatcher<Object>
implements Serializable {
private final Object value;
public IdenticalTo(Object value) {
this.value = checkNotNull(value, "value");
}
public boolean matches(Object other) {
return value == other;
}
@Override
public boolean equals(Object other) {
return other instanceof IdenticalTo
&& ((IdenticalTo) other).value == value;
}
@Override
public int hashCode() {
return 37 * System.identityHashCode(value);
}
@Override
public String toString() {
return "identicalTo(" + value + ")";
}
private static final long serialVersionUID = 0;
}
/**
* Returns a matcher which matches classes in the given package. Packages are specific to their
* classloader, so classes with the same package name may not have the same package at runtime.
*/
public static Matcher<Class> inPackage(final Package targetPackage) {
return new InPackage(targetPackage);
}
private static class InPackage extends AbstractMatcher<Class> implements Serializable {
private final transient Package targetPackage;
private final String packageName;
public InPackage(Package targetPackage) {
this.targetPackage = checkNotNull(targetPackage, "package");
this.packageName = targetPackage.getName();
}
public boolean matches(Class c) {
return c.getPackage().equals(targetPackage);
}
@Override
public boolean equals(Object other) {
return other instanceof InPackage
&& ((InPackage) other).targetPackage.equals(targetPackage);
}
@Override
public int hashCode() {
return 37 * targetPackage.hashCode();
}
@Override
public String toString() {
return "inPackage(" + targetPackage.getName() + ")";
}
public Object readResolve() {
return inPackage(Package.getPackage(packageName));
}
private static final long serialVersionUID = 0;
}
/**
* Returns a matcher which matches classes in the given package and its subpackages. Unlike
* {@link #inPackage(Package) inPackage()}, this matches classes from any classloader.
*
* @since 2.0
*/
public static Matcher<Class> inSubpackage(final String targetPackageName) {
return new InSubpackage(targetPackageName);
}
private static class InSubpackage extends AbstractMatcher<Class> implements Serializable {
private final String targetPackageName;
public InSubpackage(String targetPackageName) {
this.targetPackageName = targetPackageName;
}
public boolean matches(Class c) {
String classPackageName = c.getPackage().getName();
return classPackageName.equals(targetPackageName)
|| classPackageName.startsWith(targetPackageName + ".");
}
@Override
public boolean equals(Object other) {
return other instanceof InSubpackage
&& ((InSubpackage) other).targetPackageName.equals(targetPackageName);
}
@Override
public int hashCode() {
return 37 * targetPackageName.hashCode();
}
@Override
public String toString() {
return "inSubpackage(" + targetPackageName + ")";
}
private static final long serialVersionUID = 0;
}
/**
* Returns a matcher which matches methods with matching return types.
*/
public static Matcher<Method> returns(
final Matcher<? super Class<?>> returnType) {
return new Returns(returnType);
}
private static class Returns extends AbstractMatcher<Method> implements Serializable {
private final Matcher<? super Class<?>> returnType;
public Returns(Matcher<? super Class<?>> returnType) {
this.returnType = checkNotNull(returnType, "return type matcher");
}
public boolean matches(Method m) {
return returnType.matches(m.getReturnType());
}
@Override
public boolean equals(Object other) {
return other instanceof Returns
&& ((Returns) other).returnType.equals(returnType);
}
@Override
public int hashCode() {
return 37 * returnType.hashCode();
}
@Override
public String toString() {
return "returns(" + returnType + ")";
}
private static final long serialVersionUID = 0;
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_matcher_Matchers.java |
159 | public abstract class MultiTargetClientRequest extends ClientRequest {
public static final int TRY_COUNT = 100;
@Override
final void process() throws Exception {
ClientEndpoint endpoint = getEndpoint();
OperationFactory operationFactory = createOperationFactory();
Collection<Address> targets = getTargets();
if (targets.isEmpty()) {
endpoint.sendResponse(reduce(new HashMap<Address, Object>()), getCallId());
return;
}
MultiTargetCallback callback = new MultiTargetCallback(targets);
for (Address target : targets) {
Operation op = operationFactory.createOperation();
op.setCallerUuid(endpoint.getUuid());
InvocationBuilder builder = clientEngine.createInvocationBuilder(getServiceName(), op, target)
.setTryCount(TRY_COUNT)
.setResultDeserialized(false)
.setCallback(new SingleTargetCallback(target, callback));
builder.invoke();
}
}
protected abstract OperationFactory createOperationFactory();
protected abstract Object reduce(Map<Address, Object> map);
public abstract Collection<Address> getTargets();
private final class MultiTargetCallback {
final Collection<Address> targets;
final ConcurrentMap<Address, Object> results;
private MultiTargetCallback(Collection<Address> targets) {
this.targets = synchronizedSet(new HashSet<Address>(targets));
this.results = new ConcurrentHashMap<Address, Object>(targets.size());
}
public void notify(Address target, Object result) {
if (targets.remove(target)) {
results.put(target, result);
} else {
if (results.containsKey(target)) {
throw new IllegalArgumentException("Duplicate response from -> " + target);
}
throw new IllegalArgumentException("Unknown target! -> " + target);
}
if (targets.isEmpty()) {
Object response = reduce(results);
endpoint.sendResponse(response, getCallId());
}
}
}
private static final class SingleTargetCallback implements Callback<Object> {
final Address target;
final MultiTargetCallback parent;
private SingleTargetCallback(Address target, MultiTargetCallback parent) {
this.target = target;
this.parent = parent;
}
@Override
public void notify(Object object) {
parent.notify(target, object);
}
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_MultiTargetClientRequest.java |
1,830 | class Initializer {
/**
* the only thread that we'll use to inject members.
*/
private final Thread creatingThread = Thread.currentThread();
/**
* zero means everything is injected.
*/
private final CountDownLatch ready = new CountDownLatch(1);
/**
* Maps instances that need injection to a source that registered them
*/
private final Map<Object, InjectableReference<?>> pendingInjection = Maps.newIdentityHashMap();
/**
* Registers an instance for member injection when that step is performed.
*
* @param instance an instance that optionally has members to be injected (each annotated with
* @param source the source location that this injection was requested
* @Inject).
*/
public <T> Initializable<T> requestInjection(InjectorImpl injector, T instance, Object source,
Set<InjectionPoint> injectionPoints) {
checkNotNull(source);
// short circuit if the object has no injections
if (instance == null
|| (injectionPoints.isEmpty() && !injector.membersInjectorStore.hasTypeListeners())) {
return Initializables.of(instance);
}
InjectableReference<T> initializable = new InjectableReference<T>(injector, instance, source);
pendingInjection.put(instance, initializable);
return initializable;
}
/**
* Prepares member injectors for all injected instances. This prompts Guice to do static analysis
* on the injected instances.
*/
void validateOustandingInjections(Errors errors) {
for (InjectableReference<?> reference : pendingInjection.values()) {
try {
reference.validate(errors);
} catch (ErrorsException e) {
errors.merge(e.getErrors());
}
}
}
/**
* Performs creation-time injections on all objects that require it. Whenever fulfilling an
* injection depends on another object that requires injection, we inject it first. If the two
* instances are codependent (directly or transitively), ordering of injection is arbitrary.
*/
void injectAll(final Errors errors) {
// loop over a defensive copy since ensureInjected() mutates the set. Unfortunately, that copy
// is made complicated by a bug in IBM's JDK, wherein entrySet().toArray(Object[]) doesn't work
for (InjectableReference<?> reference : Lists.newArrayList(pendingInjection.values())) {
try {
reference.get(errors);
} catch (ErrorsException e) {
errors.merge(e.getErrors());
}
}
if (!pendingInjection.isEmpty()) {
throw new AssertionError("Failed to satisfy " + pendingInjection);
}
ready.countDown();
}
private class InjectableReference<T> implements Initializable<T> {
private final InjectorImpl injector;
private final T instance;
private final Object source;
private MembersInjectorImpl<T> membersInjector;
public InjectableReference(InjectorImpl injector, T instance, Object source) {
this.injector = injector;
this.instance = checkNotNull(instance, "instance");
this.source = checkNotNull(source, "source");
}
public void validate(Errors errors) throws ErrorsException {
@SuppressWarnings("unchecked") // the type of 'T' is a TypeLiteral<T>
TypeLiteral<T> type = TypeLiteral.get((Class<T>) instance.getClass());
membersInjector = injector.membersInjectorStore.get(type, errors.withSource(source));
}
/**
* Reentrant. If {@code instance} was registered for injection at injector-creation time, this
* method will ensure that all its members have been injected before returning.
*/
public T get(Errors errors) throws ErrorsException {
if (ready.getCount() == 0) {
return instance;
}
// just wait for everything to be injected by another thread
if (Thread.currentThread() != creatingThread) {
try {
ready.await();
return instance;
} catch (InterruptedException e) {
// Give up, since we don't know if our injection is ready
throw new RuntimeException(e);
}
}
// toInject needs injection, do it right away. we only do this once, even if it fails
if (pendingInjection.remove(instance) != null) {
membersInjector.injectAndNotify(instance, errors.withSource(source));
}
return instance;
}
@Override
public String toString() {
return instance.toString();
}
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_Initializer.java |
1,402 | final NodeIndexDeletedAction.Listener nodeIndexDeleteListener = new NodeIndexDeletedAction.Listener() {
@Override
public void onNodeIndexDeleted(String index, String nodeId) {
if (index.equals(request.index)) {
if (counter.decrementAndGet() == 0) {
listener.onResponse(new Response(true));
nodeIndexDeletedAction.remove(this);
}
}
}
@Override
public void onNodeIndexStoreDeleted(String index, String nodeId) {
if (index.equals(request.index)) {
if (counter.decrementAndGet() == 0) {
listener.onResponse(new Response(true));
nodeIndexDeletedAction.remove(this);
}
}
}
}; | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaDataDeleteIndexService.java |
3,559 | public static class Builder extends NumberFieldMapper.Builder<Builder, ByteFieldMapper> {
protected Byte nullValue = Defaults.NULL_VALUE;
public Builder(String name) {
super(name, new FieldType(Defaults.FIELD_TYPE));
builder = this;
}
public Builder nullValue(byte nullValue) {
this.nullValue = nullValue;
return this;
}
@Override
public ByteFieldMapper build(BuilderContext context) {
fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
ByteFieldMapper fieldMapper = new ByteFieldMapper(buildNames(context),
precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context),
coerce(context), postingsProvider, docValuesProvider, similarity, normsLoading,
fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll);
return fieldMapper;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_ByteFieldMapper.java |
1,221 | addOperation(operations, new Runnable() {
public void run() {
IQueue q = hazelcast.getQueue("myQ");
q.remove(new byte[100]);
}
}, 1); | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
316 | public static final Configuration EMPTY = new Configuration() {
@Override
public boolean has(ConfigOption option, String... umbrellaElements) {
return false;
}
@Override
public <O> O get(ConfigOption<O> option, String... umbrellaElements) {
return option.getDefaultValue();
}
@Override
public Set<String> getContainedNamespaces(ConfigNamespace umbrella, String... umbrellaElements) {
return Sets.newHashSet();
}
@Override
public Map<String, Object> getSubset(ConfigNamespace umbrella, String... umbrellaElements) {
return Maps.newHashMap();
}
@Override
public Configuration restrictTo(String... umbrellaElements) {
return EMPTY;
}
}; | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_configuration_Configuration.java |
378 | public enum ConflictAvoidanceMode {
/**
* Disables ID allocation conflict avoidance. This setting does not
* compromise correctness. However, in a clustered Titan deployment, this
* may result in ID allocations frequently failing due to contention and
* then retrying, slowing overall write throughput.
*/
NONE,
/**
* <b>Expert feature: use with care.</b> The user manually assigns each
* Titan instance a unique conflict avoidance tag in its local graph
* configuration. The tag becomes part of globally-visible graph element IDs
* allocated by that Titan instance. Each Titan instance assumes it has
* exclusive control over its tag, and it uses datacenter-local-quorum-level
* consistency (on storage backends that support this concept) when
* allocating IDs with its tag.
* <p>
* This is useful for reducing write latency in Titan deployments atop a
* multi-datacenter distributed store.
* <p>
* <b>When this mode is set, the user is responsible for setting a unique
* tag in each Titan graph configuration. Setting the same tag on two
* different Titan configurations can lead to silent graph corruption in
* this mode! Each tag must be unique. If you're unsure about this or its
* implications, then use one of the other modes instead.</b>
*/
LOCAL_MANUAL,
/**
* The user assigns a tag to each Titan instance. The tags should be
* globally unique for optimal performance, but duplicates will not
* compromise correctness. The tag becomes part of globally-visible graph
* element IDs allocated by the instance. If each instance has a unique tag,
* then ID allocations will never conflict.
* <p>
* Unlike {@link #LOCAL_MANUAL}, setting the same tag on multiple instances
* is safe in this mode. Titan uses global-quorum-level or greater on
* storage backends that have a notion of eventual consistency, so Titan
* will detect contention and avoid double allocation even when multiple
* instances share a tag.
*/
GLOBAL_MANUAL,
/**
* Titan randomly selects a tag from the space of all possible tags when
* performing allocations. Like {@link #GLOBAL_MANUAL}, this uses at least
* global-quorum-level consistency, so even if two instances happen to
* select the same ID simultaneously, the conflict will still be detected.
*/
GLOBAL_AUTO;
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_idmanagement_ConflictAvoidanceMode.java |
1,995 | public interface Matcher<T> {
/**
* Returns {@code true} if this matches {@code t}, {@code false} otherwise.
*/
boolean matches(T t);
/**
* Returns a new matcher which returns {@code true} if both this and the
* given matcher return {@code true}.
*/
Matcher<T> and(Matcher<? super T> other);
/**
* Returns a new matcher which returns {@code true} if either this or the
* given matcher return {@code true}.
*/
Matcher<T> or(Matcher<? super T> other);
} | 0true
| src_main_java_org_elasticsearch_common_inject_matcher_Matcher.java |
2,006 | private class SimpleMapLoader implements MapLoader {
final int size;
final boolean slow;
SimpleMapLoader(int size, boolean slow) {
this.size = size;
this.slow = slow;
}
@Override
public Object load(Object key) {
return null;
}
@Override
public Map loadAll(Collection keys) {
if (slow) {
try {
Thread.sleep(150);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
Map result = new HashMap();
for (Object key : keys) {
result.put(key, key);
}
return result;
}
@Override
public Set loadAllKeys() {
Set keys = new HashSet();
for (int i = 0; i < size; i++) {
keys.add(i);
}
return keys;
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java |
1,610 | public class LockInfo implements DataSerializable {
private String name;
private String key;
private int ownerMemberIndex;
private long acquireTime;
private int waitingThreadCount;
public LockInfo() {
}
public LockInfo(String name, String key, long acquireTime,
int ownerMemberIndex, int waitingThreadCount) {
this.acquireTime = acquireTime;
this.key = key;
this.name = name;
this.ownerMemberIndex = ownerMemberIndex;
this.waitingThreadCount = waitingThreadCount;
}
public long getAcquireTime() {
return acquireTime;
}
public String getKey() {
return key;
}
public String getName() {
return name;
}
public int getOwnerMemberIndex() {
return ownerMemberIndex;
}
public int getWaitingThreadCount() {
return waitingThreadCount;
}
@Override
public void readData(ObjectDataInput in) throws IOException {
name = in.readUTF();
key = in.readUTF();
ownerMemberIndex = in.readInt();
acquireTime = in.readLong();
waitingThreadCount = in.readInt();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(name);
out.writeUTF(key);
out.writeInt(ownerMemberIndex);
out.writeLong(acquireTime);
out.writeInt(waitingThreadCount);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_management_LockInfo.java |
1,103 | public class SemaphoreConfig {
public final static int DEFAULT_SYNC_BACKUP_COUNT = 1;
public final static int DEFAULT_ASYNC_BACKUP_COUNT = 0;
private String name;
private int initialPermits;
private int backupCount = DEFAULT_SYNC_BACKUP_COUNT;
private int asyncBackupCount = DEFAULT_ASYNC_BACKUP_COUNT;
private SemaphoreConfigReadOnly readOnly;
/**
* Creates a default configured {@link SemaphoreConfig}.
*/
public SemaphoreConfig() {
}
/**
* Creates a SemaphoreConfig by cloning another one.
*
* @param config the SemaphoreConfig to copy
* @throws IllegalArgumentException if config is null.
*/
public SemaphoreConfig(SemaphoreConfig config) {
isNotNull(config,"config");
this.name = config.getName();
this.initialPermits = config.getInitialPermits();
this.backupCount = config.getBackupCount();
this.asyncBackupCount = config.getAsyncBackupCount();
}
public SemaphoreConfigReadOnly getAsReadOnly() {
if (readOnly == null) {
readOnly = new SemaphoreConfigReadOnly(this);
}
return readOnly;
}
/**
* Gets the name of the semaphore. If no name has been configured, null is returned.
*
* @return the name of the semaphore.
*/
public String getName() {
return name;
}
/**
* Sets the name of the semaphore.
*
* @param name the name
* @return the updated SemaphoreConfig
* @throws IllegalArgumentException if name is null or empty.
*/
public SemaphoreConfig setName(String name) {
this.name = hasText(name,"name");
return this;
}
/**
* Gets the initial number of permits
*
* @return the initial number of permits.
*/
public int getInitialPermits() {
return initialPermits;
}
/**
* Sets the initial number of permits. The initial number of permits can be 0; meaning that there is no permit but
* it can also be negative meaning that there is a shortage of permits.
*
* @param initialPermits the initial number of permits.
* @return the updated SemaphoreConfig
*/
public SemaphoreConfig setInitialPermits(int initialPermits) {
this.initialPermits = initialPermits;
return this;
}
/**
* Returns the number of synchronous backups.
*
* @return the number of synchronous backups.
* @see #setBackupCount(int)
*/
public int getBackupCount() {
return backupCount;
}
/**
* Sets the number of synchronous backups.
*
* @param backupCount the number of synchronous backups
* @return the updated SemaphoreConfig
* @throws new IllegalArgumentException if backupCount smaller than 0.
* @see #setAsyncBackupCount(int)
* @see #getBackupCount()
*/
public SemaphoreConfig setBackupCount(int backupCount) {
if(backupCount<0){
throw new IllegalArgumentException("backupCount can't be smaller than 0");
}
this.backupCount = backupCount;
return this;
}
/**
* Returns the number of asynchronous backups.
*
* @return the number of asynchronous backups.
* @see #setAsyncBackupCount(int)
*/
public int getAsyncBackupCount() {
return asyncBackupCount;
}
/**
* Sets the number of asynchronous backups.
*
* @param asyncBackupCount the number of asynchronous backups
* @return the updated SemaphoreConfig
* @throws new IllegalArgumentException if asyncBackupCount smaller than 0.
* @see #setBackupCount(int) (int)
* @see #getAsyncBackupCount()
*/
public SemaphoreConfig setAsyncBackupCount(int asyncBackupCount) {
if(backupCount<0){
throw new IllegalArgumentException("asyncBackupCount can't be smaller than 0");
}
this.asyncBackupCount = asyncBackupCount;
return this;
}
/**
* Returns the total number of backups; the returned value will always equal or bigger than 0.
*
* @return total number of backups.
*/
public int getTotalBackupCount(){
return asyncBackupCount + backupCount;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("SemaphoreConfig{");
sb.append("name='").append(name).append('\'');
sb.append(", initialPermits=").append(initialPermits);
sb.append(", backupCount=").append(backupCount);
sb.append(", asyncBackupCount=").append(asyncBackupCount);
sb.append('}');
return sb.toString();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_SemaphoreConfig.java |
179 | public class ExpectedValueCheckingTest {
private IMocksControl ctrl;
private ExpectedValueCheckingStoreManager expectManager;
private KeyColumnValueStoreManager backingManager;
private LockerProvider lockerProvider;
private StoreFeatures backingFeatures;
private ModifiableConfiguration globalConfig;
private ModifiableConfiguration localConfig;
private ModifiableConfiguration defaultConfig;
private StoreTransaction consistentTx;
private StoreTransaction inconsistentTx;
private StoreTransaction expectTx;
private Locker backingLocker;
private KeyColumnValueStore backingStore;
private KeyColumnValueStore expectStore;
private Capture<BaseTransactionConfig> txConfigCapture;
private BaseTransactionConfig defaultTxConfig;
private static final String STORE_NAME = "ExpectTestStore";
private static final String LOCK_SUFFIX = "_expecttest";
private static final String LOCKER_NAME = STORE_NAME + LOCK_SUFFIX;
private static final StaticBuffer DATA_KEY = BufferUtil.getIntBuffer(1);
private static final StaticBuffer DATA_COL = BufferUtil.getIntBuffer(2);
private static final StaticBuffer DATA_VAL = BufferUtil.getIntBuffer(4);
private static final StaticBuffer LOCK_KEY = BufferUtil.getIntBuffer(32);
private static final StaticBuffer LOCK_COL = BufferUtil.getIntBuffer(64);
private static final StaticBuffer LOCK_VAL = BufferUtil.getIntBuffer(128);
@Before
public void setupMocks() throws BackendException {
// Initialize mock controller
ctrl = EasyMock.createStrictControl();
ctrl.checkOrder(true);
// Setup some config mocks and objects
backingManager = ctrl.createMock(KeyColumnValueStoreManager.class);
lockerProvider = ctrl.createMock(LockerProvider.class);
globalConfig = GraphDatabaseConfiguration.buildConfiguration();
localConfig = GraphDatabaseConfiguration.buildConfiguration();
defaultConfig = GraphDatabaseConfiguration.buildConfiguration();
// Set some properties on the configs, just so that global/local/default can be easily distinguished
globalConfig.set(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID, "global");
localConfig.set(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID, "local");
defaultConfig.set(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID, "default");
defaultTxConfig = new StandardBaseTransactionConfig.Builder().customOptions(defaultConfig).timestampProvider(Timestamps.MICRO).build();
backingFeatures = new StandardStoreFeatures.Builder().keyConsistent(globalConfig, localConfig).build();
// Setup behavior specification starts below this line
// 1. Construct manager
// The EVCSManager ctor retrieves the backing store's features and stores it in an instance field
expect(backingManager.getFeatures()).andReturn(backingFeatures).once();
// 2. Begin transaction
// EVCTx begins two transactions on the backingManager: one with globalConfig and one with localConfig
// The capture is used in the @After method to check the config
txConfigCapture = new Capture<BaseTransactionConfig>(CaptureType.ALL);
inconsistentTx = ctrl.createMock(StoreTransaction.class);
consistentTx = ctrl.createMock(StoreTransaction.class);
expect(backingManager.beginTransaction(capture(txConfigCapture))).andReturn(inconsistentTx);
expect(backingManager.beginTransaction(capture(txConfigCapture))).andReturn(consistentTx);
// 3. Open a database
backingLocker = ctrl.createMock(Locker.class);
backingStore = ctrl.createMock(KeyColumnValueStore.class);
expect(backingManager.openDatabase(STORE_NAME)).andReturn(backingStore);
expect(backingStore.getName()).andReturn(STORE_NAME);
expect(lockerProvider.getLocker(LOCKER_NAME)).andReturn(backingLocker);
// Carry out setup behavior against mocks
ctrl.replay();
// 1. Construct manager
expectManager = new ExpectedValueCheckingStoreManager(backingManager, LOCK_SUFFIX, lockerProvider, new StandardDuration(1L, TimeUnit.SECONDS));
// 2. Begin transaction
expectTx = expectManager.beginTransaction(defaultTxConfig);
// 3. Open a database
expectStore = expectManager.openDatabase(STORE_NAME);
// Verify behavior and reset the mocks for test methods to use
ctrl.verify();
ctrl.reset();
}
@After
public void verifyMocks() {
ctrl.verify();
ctrl.reset();
// Check capture created in the @Before method
assertTrue(txConfigCapture.hasCaptured());
List<BaseTransactionConfig> txCfgs = txConfigCapture.getValues();
assertEquals(2, txCfgs.size());
// First backing store transaction should use default tx config
assertEquals("default", txCfgs.get(0).getCustomOption(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID));
// Second backing store transaction should use global strong consistency config
assertEquals("global", txCfgs.get(1).getCustomOption(GraphDatabaseConfiguration.UNIQUE_INSTANCE_ID));
// The order in which these transactions are opened isn't really significant;
// testing them in order is kind of overspecifying the impl's behavior.
// Could probably relax the ordering selectively here with some thought, but
// I want to keep order checking on in general for the EasyMock control.
}
@Test
public void testMutateWithLockUsesConsistentTx() throws BackendException {
final ImmutableList<Entry> adds = ImmutableList.of(StaticArrayEntry.of(DATA_COL, DATA_VAL));
final ImmutableList<StaticBuffer> dels = ImmutableList.<StaticBuffer>of();
final KeyColumn kc = new KeyColumn(LOCK_KEY, LOCK_COL);
// 1. Acquire a lock
backingLocker.writeLock(kc, consistentTx);
// 2. Run a mutation
// N.B. mutation coordinates do not overlap with the lock, but consistentTx should be used anyway
// 2.1. Check locks & expected values before mutating data
backingLocker.checkLocks(consistentTx);
StaticBuffer nextBuf = BufferUtil.nextBiggerBuffer(kc.getColumn());
KeySliceQuery expectedValueQuery = new KeySliceQuery(kc.getKey(), kc.getColumn(), nextBuf);
expect(backingStore.getSlice(expectedValueQuery, consistentTx)) // expected value read must use strong consistency
.andReturn(StaticArrayEntryList.of(StaticArrayEntry.of(LOCK_COL, LOCK_VAL)));
// 2.2. Mutate data
backingStore.mutate(DATA_KEY, adds, dels, consistentTx); // writes by txs with locks must use strong consistency
ctrl.replay();
// 1. Lock acquisition
expectStore.acquireLock(LOCK_KEY, LOCK_COL, LOCK_VAL, expectTx);
// 2. Mutate
expectStore.mutate(DATA_KEY, adds, dels, expectTx);
}
@Test
public void testMutateWithoutLockUsesInconsistentTx() throws BackendException {
// Run a mutation
final ImmutableList<Entry> adds = ImmutableList.of(StaticArrayEntry.of(DATA_COL, DATA_VAL));
final ImmutableList<StaticBuffer> dels = ImmutableList.<StaticBuffer>of();
backingStore.mutate(DATA_KEY, adds, dels, inconsistentTx); // consistency level is unconstrained w/o locks
ctrl.replay();
expectStore.mutate(DATA_KEY, adds, dels, expectTx);
}
@Test
public void testMutateManyWithLockUsesConsistentTx() throws BackendException {
final ImmutableList<Entry> adds = ImmutableList.of(StaticArrayEntry.of(DATA_COL, DATA_VAL));
final ImmutableList<StaticBuffer> dels = ImmutableList.<StaticBuffer>of();
Map<String, Map<StaticBuffer, KCVMutation>> mutations =
ImmutableMap.<String, Map<StaticBuffer, KCVMutation>>of(STORE_NAME,
ImmutableMap.<StaticBuffer, KCVMutation>of(DATA_KEY, new KCVMutation(adds, dels)));
final KeyColumn kc = new KeyColumn(LOCK_KEY, LOCK_COL);
// Acquire a lock
backingLocker.writeLock(kc, consistentTx);
// 2. Run mutateMany
// 2.1. Check locks & expected values before mutating data
backingLocker.checkLocks(consistentTx);
StaticBuffer nextBuf = BufferUtil.nextBiggerBuffer(kc.getColumn());
KeySliceQuery expectedValueQuery = new KeySliceQuery(kc.getKey(), kc.getColumn(), nextBuf);
expect(backingStore.getSlice(expectedValueQuery, consistentTx)) // expected value read must use strong consistency
.andReturn(StaticArrayEntryList.of(StaticArrayEntry.of(LOCK_COL, LOCK_VAL)));
// 2.2. Run mutateMany on backing manager to modify data
backingManager.mutateMany(mutations, consistentTx); // writes by txs with locks must use strong consistency
ctrl.replay();
// Lock acquisition
expectStore.acquireLock(LOCK_KEY, LOCK_COL, LOCK_VAL, expectTx);
// Mutate
expectManager.mutateMany(mutations, expectTx);
}
@Test
public void testMutateManyWithoutLockUsesInconsistentTx() throws BackendException {
final ImmutableList<Entry> adds = ImmutableList.of(StaticArrayEntry.of(DATA_COL, DATA_VAL));
final ImmutableList<StaticBuffer> dels = ImmutableList.<StaticBuffer>of();
Map<String, Map<StaticBuffer, KCVMutation>> mutations =
ImmutableMap.<String, Map<StaticBuffer, KCVMutation>>of(STORE_NAME,
ImmutableMap.<StaticBuffer, KCVMutation>of(DATA_KEY, new KCVMutation(adds, dels)));
// Run mutateMany
backingManager.mutateMany(mutations, inconsistentTx); // consistency level is unconstrained w/o locks
ctrl.replay();
// Run mutateMany
expectManager.mutateMany(mutations, expectTx);
}
} | 0true
| titan-test_src_main_java_com_thinkaurelius_titan_diskstorage_ExpectedValueCheckingTest.java |
1,270 | public class OStorageLocal extends OStorageLocalAbstract {
private final int DELETE_MAX_RETRIES;
private final int DELETE_WAIT_TIME;
private final Map<String, OCluster> clusterMap = new LinkedHashMap<String, OCluster>();
private OCluster[] clusters = new OCluster[0];
private ODataLocal[] dataSegments = new ODataLocal[0];
private final OStorageLocalTxExecuter txManager;
private String storagePath;
private final OStorageVariableParser variableParser;
private int defaultClusterId = -1;
private static String[] ALL_FILE_EXTENSIONS = { "ocf", ".och", ".ocl", ".oda", ".odh", ".otx", ".ocs",
".oef", ".oem", OWriteAheadLog.MASTER_RECORD_EXTENSION, OWriteAheadLog.WAL_SEGMENT_EXTENSION,
OLocalHashTableIndexEngine.BUCKET_FILE_EXTENSION, OLocalHashTableIndexEngine.METADATA_FILE_EXTENSION,
OLocalHashTableIndexEngine.TREE_FILE_EXTENSION, OSBTreeIndexEngine.DATA_FILE_EXTENSION };
private long positionGenerator = 1;
private OModificationLock modificationLock = new OModificationLock();
private final Set<String> clustersToSyncImmediately = new HashSet<String>();
public OStorageLocal(final String iName, final String iFilePath, final String iMode) throws IOException {
super(iName, iFilePath, iMode);
File f = new File(url);
if (f.exists() || !exists(f.getParent())) {
// ALREADY EXISTS OR NOT LEGACY
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getPath()));
} else {
// LEGACY DB
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getParent()));
}
storagePath = OIOUtils.getPathFromDatabaseName(storagePath);
variableParser = new OStorageVariableParser(storagePath);
configuration = new OStorageConfigurationSegment(this);
txManager = new OStorageLocalTxExecuter(this, configuration.txSegment);
DELETE_MAX_RETRIES = OGlobalConfiguration.FILE_MMAP_FORCE_RETRY.getValueAsInteger();
DELETE_WAIT_TIME = OGlobalConfiguration.FILE_MMAP_FORCE_DELAY.getValueAsInteger();
final String[] clustersToSync = OGlobalConfiguration.NON_TX_CLUSTERS_SYNC_IMMEDIATELY.getValueAsString().trim()
.split("\\s*,\\s*");
clustersToSyncImmediately.addAll(Arrays.asList(clustersToSync));
installProfilerHooks();
long diskCacheSize = OGlobalConfiguration.DISK_CACHE_SIZE.getValueAsLong() * 1024 * 1024;
long writeCacheSize = (long) Math.floor((((double) OGlobalConfiguration.DISK_WRITE_CACHE_PART.getValueAsInteger()) / 100.0)
* diskCacheSize);
long readCacheSize = diskCacheSize - writeCacheSize;
diskCache = new OReadWriteDiskCache(name, readCacheSize, writeCacheSize,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_TTL.getValueAsLong() * 1000,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_FLUSH_INTERVAL.getValueAsInteger(), this, null, false, true);
}
public synchronized void open(final String iUserName, final String iUserPassword, final Map<String, Object> iProperties) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
addUser();
if (status != STATUS.CLOSED)
// ALREADY OPENED: THIS IS THE CASE WHEN A STORAGE INSTANCE IS
// REUSED
return;
if (!exists())
throw new OStorageException("Cannot open the storage '" + name + "' because it does not exist in path: " + url);
status = STATUS.OPEN;
// OPEN BASIC SEGMENTS
int pos;
pos = registerDataSegment(new OStorageDataConfiguration(configuration, OStorage.DATA_DEFAULT_NAME, 0, getStoragePath()));
dataSegments[pos].open();
addDefaultClusters();
// REGISTER DATA SEGMENT
for (int i = 0; i < configuration.dataSegments.size(); ++i) {
final OStorageDataConfiguration dataConfig = configuration.dataSegments.get(i);
if (dataConfig == null)
continue;
pos = registerDataSegment(dataConfig);
if (pos == -1) {
// CLOSE AND REOPEN TO BE SURE ALL THE FILE SEGMENTS ARE
// OPENED
dataSegments[i].close();
dataSegments[i] = new ODataLocal(this, dataConfig, i);
dataSegments[i].open();
} else
dataSegments[pos].open();
}
// REGISTER CLUSTER
for (int i = 0; i < configuration.clusters.size(); ++i) {
final OStorageClusterConfiguration clusterConfig = configuration.clusters.get(i);
if (clusterConfig != null) {
pos = createClusterFromConfig(clusterConfig);
try {
if (pos == -1) {
// CLOSE AND REOPEN TO BE SURE ALL THE FILE SEGMENTS ARE
// OPENED
if (clusters[i] != null && clusters[i] instanceof OClusterLocal)
clusters[i].close();
clusters[i] = Orient.instance().getClusterFactory().createCluster(OClusterLocal.TYPE);
clusters[i].configure(this, clusterConfig);
clusterMap.put(clusters[i].getName(), clusters[i]);
clusters[i].open();
} else {
if (clusterConfig.getName().equals(CLUSTER_DEFAULT_NAME))
defaultClusterId = pos;
clusters[pos].open();
}
} catch (FileNotFoundException e) {
OLogManager.instance().warn(
this,
"Error on loading cluster '" + clusters[i].getName() + "' (" + i
+ "): file not found. It will be excluded from current database '" + getName() + "'.");
clusterMap.remove(clusters[i].getName());
clusters[i] = null;
}
} else {
clusters = Arrays.copyOf(clusters, clusters.length + 1);
clusters[i] = null;
}
}
if (OGlobalConfiguration.USE_WAL.getValueAsBoolean())
writeAheadLog = new OWriteAheadLog(this);
txManager.open();
} catch (Exception e) {
close(true);
throw new OStorageException("Cannot open local storage '" + url + "' with mode=" + mode, e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".open", "Open a database", timer, "db.*.open");
}
}
public ODiskCache getDiskCache() {
return diskCache;
}
private void addDefaultClusters() throws IOException {
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_INTERNAL_NAME));
configuration.load();
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_INDEX_NAME));
createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME));
defaultClusterId = createClusterFromConfig(new OStoragePhysicalClusterConfigurationLocal(configuration, clusters.length, 0,
CLUSTER_DEFAULT_NAME));
}
public void create(final Map<String, Object> iProperties) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (status != STATUS.CLOSED)
throw new OStorageException("Cannot create new storage '" + name + "' because it is not closed (status:" + status + ")");
addUser();
final File storageFolder = new File(storagePath);
if (!storageFolder.exists())
storageFolder.mkdir();
if (exists())
throw new OStorageException("Cannot create new storage '" + name + "' because it already exists");
status = STATUS.OPEN;
addDataSegment(OStorage.DATA_DEFAULT_NAME);
addDataSegment(OMetadataDefault.DATASEGMENT_INDEX_NAME);
// ADD THE METADATA CLUSTER TO STORE INTERNAL STUFF
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_INTERNAL_NAME, null, null, true);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF INDEXING IN THE INDEX DATA SEGMENT
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_INDEX_NAME, null,
OMetadataDefault.DATASEGMENT_INDEX_NAME, true);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF INDEXING
addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME, null, null, true);
// ADD THE DEFAULT CLUSTER
defaultClusterId = addCluster(OStorage.CLUSTER_TYPE.PHYSICAL.toString(), CLUSTER_DEFAULT_NAME, null, null, false);
configuration.create();
writeAheadLog = new OWriteAheadLog(this);
txManager.create();
} catch (OStorageException e) {
close();
throw e;
} catch (IOException e) {
close();
throw new OStorageException("Error on creation of storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".create", "Create a database", timer, "db.*.create");
}
}
public void reload() {
}
public boolean exists() {
return exists(storagePath);
}
private boolean exists(String path) {
return new File(path + "/" + OStorage.DATA_DEFAULT_NAME + ".0" + ODataLocal.DEF_EXTENSION).exists();
}
@Override
public void close(final boolean iForce) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (!checkForClose(iForce))
return;
status = STATUS.CLOSING;
for (OCluster cluster : clusters)
if (cluster != null)
cluster.close();
clusters = new OCluster[0];
clusterMap.clear();
for (ODataLocal data : dataSegments)
if (data != null)
data.close();
dataSegments = new ODataLocal[0];
txManager.close();
if (configuration != null)
configuration.close();
level2Cache.shutdown();
OMMapManagerLocator.getInstance().flush();
super.close(iForce);
uninstallProfilerHooks();
if (diskCache != null)
diskCache.close();
if (writeAheadLog != null)
writeAheadLog.delete();
Orient.instance().unregisterStorage(this);
status = STATUS.CLOSED;
} catch (IOException e) {
OLogManager.instance().error(this, "Error on closing of storage '" + name, e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".close", "Close a database", timer, "db.*.close");
}
}
/**
* Deletes physically all the database files (that ends for ".och", ".ocl", ".oda", ".odh", ".otx"). Tries also to delete the
* container folder if the directory is empty. If files are locked, retry up to 10 times before to raise an exception.
*/
public void delete() {
// CLOSE THE DATABASE BY REMOVING THE CURRENT USER
if (status != STATUS.CLOSED) {
if (getUsers() > 0) {
while (removeUser() > 0)
;
}
}
close(true);
try {
Orient.instance().unregisterStorage(this);
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot unregister storage", e);
}
final long timer = Orient.instance().getProfiler().startChrono();
// GET REAL DIRECTORY
File dbDir = new File(OIOUtils.getPathFromDatabaseName(OSystemVariableResolver.resolveSystemVariables(url)));
if (!dbDir.exists() || !dbDir.isDirectory())
dbDir = dbDir.getParentFile();
lock.acquireExclusiveLock();
try {
if (diskCache != null)
diskCache.delete();
// RETRIES
for (int i = 0; i < DELETE_MAX_RETRIES; ++i) {
if (dbDir.exists() && dbDir.isDirectory()) {
int notDeletedFiles = 0;
// TRY TO DELETE ALL THE FILES
File[] files = dbDir.listFiles();
if (files != null) {
for (File f : files) {
// DELETE ONLY THE SUPPORTED FILES
for (String ext : ALL_FILE_EXTENSIONS)
if (f.getPath().endsWith(ext)) {
if (!f.delete()) {
notDeletedFiles++;
}
break;
}
}
}
if (notDeletedFiles == 0) {
// TRY TO DELETE ALSO THE DIRECTORY IF IT'S EMPTY
dbDir.delete();
return;
}
} else
return;
OLogManager
.instance()
.debug(
this,
"Cannot delete database files because they are still locked by the OrientDB process: waiting %d ms and retrying %d/%d...",
DELETE_WAIT_TIME, i, DELETE_MAX_RETRIES);
// FORCE FINALIZATION TO COLLECT ALL THE PENDING BUFFERS
OMemoryWatchDog.freeMemoryForResourceCleanup(DELETE_WAIT_TIME);
}
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ". Database files seem locked");
} catch (IOException ioe) {
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ".", ioe);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".drop", "Drop a database", timer, "db.*.drop");
}
}
public boolean check(final boolean iVerbose, final OCommandOutputListener iListener) {
int errors = 0;
int warnings = 0;
lock.acquireSharedLock();
try {
long totalRecors = 0;
final long start = System.currentTimeMillis();
formatMessage(iVerbose, iListener, "\nChecking database '" + getName() + "'...\n");
formatMessage(iVerbose, iListener, "\n(1) Checking data-clusters. This activity checks if pointers to data are coherent.");
final OPhysicalPosition ppos = new OPhysicalPosition();
// BROWSE ALL THE CLUSTERS
for (OCluster c : clusters) {
if (!(c instanceof OClusterLocal))
continue;
formatMessage(iVerbose, iListener, "\n- data-cluster #%-5d %s -> ", c.getId(), c.getName());
// BROWSE ALL THE RECORDS
for (final OClusterEntryIterator it = c.absoluteIterator(); it.hasNext();) {
final OPhysicalPosition physicalPosition = it.next();
totalRecors++;
try {
if (physicalPosition.dataSegmentId >= dataSegments.length) {
formatMessage(iVerbose, iListener, "WARN: Found wrong data segment %d ", physicalPosition.dataSegmentId);
warnings++;
}
if (physicalPosition.recordSize < 0) {
formatMessage(iVerbose, iListener, "WARN: Found wrong record size %d ", physicalPosition.recordSize);
warnings++;
}
if (physicalPosition.recordSize >= 1000000) {
formatMessage(iVerbose, iListener, "WARN: Found suspected big record size %d. Is it corrupted? ",
physicalPosition.recordSize);
warnings++;
}
if (physicalPosition.dataSegmentPos > dataSegments[physicalPosition.dataSegmentId].getFilledUpTo()) {
formatMessage(iVerbose, iListener, "WARN: Found wrong pointer to data chunk %d out of data segment size (%d) ",
physicalPosition.dataSegmentPos, dataSegments[physicalPosition.dataSegmentId].getFilledUpTo());
warnings++;
}
if (physicalPosition.recordVersion.isTombstone() && (c instanceof OClusterLocal)) {
// CHECK IF THE HOLE EXISTS
boolean found = false;
int tot = ((OClusterLocal) c).holeSegment.getHoles();
for (int i = 0; i < tot; ++i) {
final long recycledPosition = ((OClusterLocal) c).holeSegment.getEntryPosition(i) / OClusterLocal.RECORD_SIZE;
if (recycledPosition == physicalPosition.clusterPosition.longValue()) {
// FOUND
found = true;
break;
}
}
if (!found) {
formatMessage(iVerbose, iListener, "WARN: Cannot find hole for deleted record %d:%d ", c.getId(),
physicalPosition.clusterPosition);
warnings++;
}
}
} catch (IOException e) {
formatMessage(iVerbose, iListener, "WARN: Error while reading record #%d:%d ", e, c.getId(), ppos.clusterPosition);
warnings++;
}
}
if (c instanceof OClusterLocal) {
final int totalHoles = ((OClusterLocal) c).holeSegment.getHoles();
if (totalHoles > 0) {
formatMessage(iVerbose, iListener, " [found " + totalHoles + " hole(s)]");
// CHECK HOLES
for (int i = 0; i < totalHoles; ++i) {
long recycledPosition = -1;
try {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(((OClusterLocal) c).holeSegment.getEntryPosition(i)
/ OClusterLocal.RECORD_SIZE);
OPhysicalPosition physicalPosition = c.getPhysicalPosition(ppos);
if (physicalPosition != null && !physicalPosition.recordVersion.isTombstone()) {
formatMessage(iVerbose, iListener,
"WARN: Found wrong hole %d/%d for deleted record %d:%d. The record seems good ", i, totalHoles - 1,
c.getId(), recycledPosition);
warnings++;
}
} catch (Exception e) {
formatMessage(iVerbose, iListener, "WARN: Found wrong hole %d/%d for deleted record %d:%d. The record not exists ",
i, totalHoles - 1, c.getId(), recycledPosition);
warnings++;
}
}
}
}
formatMessage(iVerbose, iListener, "OK");
}
int totalChunks = 0;
formatMessage(iVerbose, iListener,
"\n\n(2) Checking data chunks integrity. In this phase data segments are scanned to check the back reference into the clusters.");
for (ODataLocal d : dataSegments) {
if (d == null)
continue;
formatMessage(iVerbose, iListener, "\n- data-segment %s (id=%d) size=%d/%d...", d.getName(), d.getId(), d.getFilledUpTo(),
d.getSize(), d.getHoles());
int nextPos = 0;
// GET DATA-SEGMENT HOLES
final List<ODataHoleInfo> holes = d.getHolesList();
if (iVerbose) {
formatMessage(iVerbose, iListener, "\n-- found %d holes:", holes.size());
for (ODataHoleInfo hole : holes)
formatMessage(iVerbose, iListener, "\n--- hole #%-7d offset=%-10d size=%-7d", hole.holeOffset, hole.dataOffset,
hole.size);
}
// CHECK CHUNKS
formatMessage(iVerbose, iListener, "\n-- checking chunks:");
int pos;
do {
try {
pos = nextPos;
// SEARCH IF THE RECORD IT'S BETWEEN HOLES
ODataHoleInfo foundHole = null;
for (ODataHoleInfo hole : holes) {
if (hole.dataOffset == pos) {
// HOLE FOUND!
foundHole = hole;
break;
}
}
int recordSize = d.getRecordSize(pos);
formatMessage(iVerbose, iListener, "\n--- chunk #%-7d offset=%-10d size=%-7d -> ", totalChunks, pos, recordSize);
if (recordSize < 0) {
recordSize *= -1;
// HOLE: CHECK HOLE PRESENCE
if (foundHole != null) {
if (foundHole.size != recordSize) {
formatMessage(iVerbose, iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) differs in size with the hole size %d ", d.getName(), totalChunks,
pos, recordSize, foundHole.size);
warnings++;
}
nextPos = pos + foundHole.size;
} else {
formatMessage(iVerbose, iListener, "WARN: Chunk %s:%d (offset=%d size=%d) has no hole for deleted chunk ",
d.getName(), totalChunks, pos, recordSize);
warnings++;
nextPos = pos + recordSize;
}
} else {
if (foundHole != null) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) it's between the holes (hole #%d) even if has no negative recordSize. Jump the content ",
d.getName(), totalChunks, pos, recordSize, foundHole.holeOffset);
warnings++;
nextPos = pos + foundHole.size;
} else {
// REGULAR DATA CHUNK
nextPos = pos + ODataLocal.RECORD_FIX_SIZE + recordSize;
final byte[] buffer = d.getRecord(pos);
if (buffer.length != recordSize) {
formatMessage(iVerbose, iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has wrong record size because the record length is %d ", d.getName(),
totalChunks, pos, recordSize, buffer.length);
warnings++;
}
final ORecordId rid = d.getRecordRid(pos);
if (!rid.isValid()) {
formatMessage(iVerbose, iListener, "WARN: Chunk %s:%d (offset=%d size=%d) points to invalid RID %s ",
d.getName(), totalChunks, pos, recordSize, rid);
warnings++;
} else {
if (rid.clusterId >= clusters.length) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has invalid RID because points to %s but configured clusters are %d in total ",
d.getName(), totalChunks, pos, recordSize, rid, clusters.length);
warnings++;
} else if (clusters[rid.clusterId] == null) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) has invalid RID because points to %s but the cluster %d not exists ",
d.getName(), totalChunks, pos, recordSize, rid, rid.clusterId);
warnings++;
} else {
ppos.clusterPosition = rid.clusterPosition;
clusters[rid.clusterId].getPhysicalPosition(ppos);
if (ppos.dataSegmentId != d.getId()) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) point to the RID %d but it doesn't point to current data segment %d but to %d ",
d.getName(), totalChunks, pos, recordSize, rid, d.getId(), ppos.dataSegmentId);
warnings++;
}
if (ppos.dataSegmentPos != pos) {
formatMessage(
iVerbose,
iListener,
"WARN: Chunk %s:%d (offset=%d size=%d) point to the RID %d but it doesn't point to current chunk %d but to %d ",
d.getName(), totalChunks, pos, recordSize, rid, ppos.dataSegmentPos, pos);
warnings++;
}
}
}
}
}
totalChunks++;
formatMessage(iVerbose, iListener, "OK");
} catch (Exception e) {
iListener.onMessage("ERROR: " + e.toString());
// OLogManager.instance().warn(this, "ERROR: Chunk %s:%d (offset=%d) error: %s", e, d.getName(),
// totalChunks, pos, e.toString());
errors++;
}
} while (nextPos < d.getFilledUpTo());
formatMessage(iVerbose, iListener, "\n");
}
iListener.onMessage("\nCheck of database completed in " + (System.currentTimeMillis() - start)
+ "ms:\n- Total records checked: " + totalRecors + "\n- Total chunks checked.: " + totalChunks
+ "\n- Warnings.............: " + warnings + "\n- Errors...............: " + errors + "\n");
} finally {
lock.releaseSharedLock();
}
return errors == 0;
}
public ODataLocal getDataSegmentById(final int iDataSegmentId) {
checkOpeness();
lock.acquireSharedLock();
try {
if (iDataSegmentId >= dataSegments.length)
throw new IllegalArgumentException("Data segment #" + iDataSegmentId + " does not exist in database '" + name + "'");
return dataSegments[iDataSegmentId];
} finally {
lock.releaseSharedLock();
}
}
public int getDataSegmentIdByName(final String iDataSegmentName) {
if (iDataSegmentName == null)
return 0;
checkOpeness();
lock.acquireSharedLock();
try {
for (ODataLocal d : dataSegments) {
if (d != null && d.getName().equalsIgnoreCase(iDataSegmentName))
return d.getId();
}
throw new IllegalArgumentException("Data segment '" + iDataSegmentName + "' does not exist in database '" + name + "'");
} finally {
lock.releaseSharedLock();
}
}
/**
* Add a new data segment in the default segment directory and with filename equals to the cluster name.
*/
public int addDataSegment(final String iDataSegmentName) {
return addDataSegment(iDataSegmentName, null);
}
public int addDataSegment(String iSegmentName, final String iDirectory) {
checkOpeness();
iSegmentName = iSegmentName.toLowerCase();
lock.acquireExclusiveLock();
try {
final OStorageDataConfiguration conf = new OStorageDataConfiguration(configuration, iSegmentName, -1, iDirectory);
final int pos = registerDataSegment(conf);
if (pos == -1)
throw new OConfigurationException("Cannot add segment " + conf.name + " because it is already part of storage '" + name
+ "'");
dataSegments[pos].create(-1);
// UPDATE CONFIGURATION
conf.id = pos;
if (pos == configuration.dataSegments.size())
configuration.dataSegments.add(conf);
else
configuration.dataSegments.set(pos, conf);
configuration.update();
return pos;
} catch (Throwable e) {
OLogManager.instance().error(this, "Error on creation of new data segment '" + iSegmentName + "' in: " + iDirectory, e,
OStorageException.class);
return -1;
} finally {
lock.releaseExclusiveLock();
}
}
/**
* Add a new cluster into the storage. Type can be: "physical" or "logical".
*/
public int addCluster(final String iClusterType, String iClusterName, final String iLocation, final String iDataSegmentName,
boolean forceListBased, final Object... iParameters) {
checkOpeness();
lock.acquireExclusiveLock();
try {
final OCluster cluster;
if (iClusterName != null) {
iClusterName = iClusterName.toLowerCase();
// FIND THE FIRST AVAILABLE CLUSTER ID
int clusterPos = clusters.length;
for (int i = 0; i < clusters.length; ++i)
if (clusters[i] == null) {
clusterPos = i;
break;
}
cluster = Orient.instance().getClusterFactory().createCluster(iClusterType);
cluster.configure(this, clusterPos, iClusterName, iLocation, getDataSegmentIdByName(iDataSegmentName), iParameters);
} else
cluster = null;
final int clusterId = registerCluster(cluster);
if (cluster != null) {
cluster.create(-1);
configuration.update();
}
return clusterId;
} catch (Exception e) {
OLogManager.instance().exception("Error in creation of new cluster '" + iClusterName + "' of type: " + iClusterType, e,
OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return -1;
}
public int addCluster(String iClusterType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
boolean forceListBased, Object... iParameters) {
throw new UnsupportedOperationException("This operation is unsupported for " + getType()
+ " storage. If you are doing import please use parameter -preserveClusterIDs=false .");
}
public ODataLocal[] getDataSegments() {
return dataSegments;
}
public OStorageLocalTxExecuter getTxManager() {
return txManager;
}
public boolean dropCluster(final int iClusterId, final boolean iTruncate) {
lock.acquireExclusiveLock();
try {
if (iClusterId < 0 || iClusterId >= clusters.length)
throw new IllegalArgumentException("Cluster id '" + iClusterId + "' is outside the of range of configured clusters (0-"
+ (clusters.length - 1) + ") in database '" + name + "'");
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return false;
getLevel2Cache().freeCluster(iClusterId);
if (iTruncate)
cluster.truncate();
cluster.delete();
clusterMap.remove(cluster.getName());
clusters[iClusterId] = null;
// UPDATE CONFIGURATION
configuration.dropCluster(iClusterId);
return true;
} catch (Exception e) {
OLogManager.instance().exception("Error while removing cluster '" + iClusterId + "'", e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return false;
}
public boolean dropDataSegment(final String iName) {
lock.acquireExclusiveLock();
try {
final int id = getDataSegmentIdByName(iName);
final ODataLocal data = dataSegments[id];
if (data == null)
return false;
data.drop();
dataSegments[id] = null;
// UPDATE CONFIGURATION
configuration.dropDataSegment(id);
return true;
} catch (Exception e) {
OLogManager.instance().exception("Error while removing data segment '" + iName + "'", e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return false;
}
public long count(final int[] iClusterIds) {
return count(iClusterIds, false);
}
@Override
public long count(int[] iClusterIds, boolean countTombstones) {
checkOpeness();
lock.acquireSharedLock();
try {
long tot = 0;
for (int iClusterId : iClusterIds) {
if (iClusterId >= clusters.length)
throw new OConfigurationException("Cluster id " + iClusterId + " was not found in database '" + name + "'");
if (iClusterId > -1) {
final OCluster c = clusters[iClusterId];
if (c != null)
tot += c.getEntries() - (countTombstones ? 0L : c.getTombstonesCount());
}
}
return tot;
} finally {
lock.releaseSharedLock();
}
}
public OClusterPosition[] getClusterDataRange(final int iClusterId) {
if (iClusterId == -1)
return new OClusterPosition[] { OClusterPosition.INVALID_POSITION, OClusterPosition.INVALID_POSITION };
checkOpeness();
lock.acquireSharedLock();
try {
return clusters[iClusterId] != null ? new OClusterPosition[] { clusters[iClusterId].getFirstPosition(),
clusters[iClusterId].getLastPosition() } : new OClusterPosition[0];
} catch (IOException ioe) {
throw new OStorageException("Can not retrieve information about data range", ioe);
} finally {
lock.releaseSharedLock();
}
}
public long count(final int iClusterId) {
return count(iClusterId, false);
}
@Override
public long count(int iClusterId, boolean countTombstones) {
if (iClusterId == -1)
throw new OStorageException("Cluster Id " + iClusterId + " is invalid in database '" + name + "'");
// COUNT PHYSICAL CLUSTER IF ANY
checkOpeness();
lock.acquireSharedLock();
try {
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return 0;
if (countTombstones)
return cluster.getEntries();
return cluster.getEntries() - cluster.getTombstonesCount();
} finally {
lock.releaseSharedLock();
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(int iDataSegmentId, final ORecordId iRid, final byte[] iContent,
final ORecordVersion iRecordVersion, final byte iRecordType, final int iMode, ORecordCallback<OClusterPosition> iCallback) {
checkOpeness();
final OCluster cluster = getClusterById(iRid.clusterId);
final ODataLocal dataSegment = getDataSegmentById(iDataSegmentId);
final OPhysicalPosition ppos;
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (txManager.isCommitting()) {
ppos = txManager.createRecord(txManager.getCurrentTransaction().getId(), dataSegment, cluster, iRid, iContent,
iRecordVersion, iRecordType);
} else {
ppos = createRecord(dataSegment, cluster, iContent, iRecordType, iRid, iRecordVersion);
if (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean()
|| clustersToSyncImmediately.contains(cluster.getName()))
synchRecordUpdate(cluster, ppos);
if (iCallback != null)
iCallback.call(iRid, ppos.clusterPosition);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
return new OStorageOperationResult<OPhysicalPosition>(ppos);
}
public boolean updateReplica(final int dataSegmentId, final ORecordId rid, final byte[] content,
final ORecordVersion recordVersion, final byte recordType) throws IOException {
if (rid.isNew())
throw new OStorageException("Passed record with id " + rid + " is new and can not be treated as replica.");
checkOpeness();
final OCluster cluster = getClusterById(rid.clusterId);
final ODataLocal dataSegment = getDataSegmentById(dataSegmentId);
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (ppos == null) {
if (!cluster.isHashBased())
throw new OStorageException("Cluster with LH support is required.");
ppos = new OPhysicalPosition(rid.clusterPosition, recordVersion);
ppos.recordType = recordType;
ppos.dataSegmentId = dataSegment.getId();
if (!recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
}
cluster.addPhysicalPosition(ppos);
return true;
} else {
if (ppos.recordType != recordType)
throw new OStorageException("Record types of provided and stored replicas are different " + recordType + ":"
+ ppos.recordType + ".");
if (ppos.recordVersion.compareTo(recordVersion) < 0) {
cluster.updateVersion(ppos.clusterPosition, recordVersion);
if (!recordVersion.isTombstone() && !ppos.recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.setRecord(ppos.dataSegmentPos, rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, dataSegmentId, ppos.dataSegmentPos);
} else if (!recordVersion.isTombstone() && ppos.recordVersion.isTombstone()) {
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, dataSegmentId, ppos.dataSegmentPos);
} else if (recordVersion.isTombstone() && !ppos.recordVersion.isTombstone()) {
dataSegment.deleteRecord(ppos.dataSegmentPos);
}
return true;
}
}
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
return false;
}
@Override
public <V> V callInLock(Callable<V> iCallable, boolean iExclusiveLock) {
if (iExclusiveLock) {
modificationLock.requestModificationLock();
try {
return super.callInLock(iCallable, iExclusiveLock);
} finally {
modificationLock.releaseModificationLock();
}
} else {
return super.callInLock(iCallable, iExclusiveLock);
}
}
@Override
public <V> V callInRecordLock(Callable<V> callable, ORID rid, boolean exclusiveLock) {
if (exclusiveLock)
modificationLock.requestModificationLock();
try {
if (exclusiveLock)
lock.acquireExclusiveLock();
else
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, exclusiveLock ? LOCK.EXCLUSIVE : LOCK.SHARED);
try {
return callable.call();
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, exclusiveLock ? LOCK.EXCLUSIVE : LOCK.SHARED);
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new OException("Error on nested call in lock", e);
} finally {
if (exclusiveLock)
lock.releaseExclusiveLock();
else
lock.releaseSharedLock();
}
} finally {
if (exclusiveLock)
modificationLock.releaseModificationLock();
}
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRid, final String iFetchPlan, boolean iIgnoreCache,
ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
checkOpeness();
return new OStorageOperationResult<ORawBuffer>(readRecord(getClusterById(iRid.clusterId), iRid, true, loadTombstones));
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId iRid, final byte[] iContent,
final ORecordVersion iVersion, final byte iRecordType, final int iMode, ORecordCallback<ORecordVersion> iCallback) {
checkOpeness();
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
final OCluster cluster = getClusterById(iRid.clusterId);
if (txManager.isCommitting()) {
return new OStorageOperationResult<ORecordVersion>(txManager.updateRecord(txManager.getCurrentTransaction().getId(),
cluster, iRid, iContent, iVersion, iRecordType));
} else {
final OPhysicalPosition ppos = updateRecord(cluster, iRid, iContent, iVersion, iRecordType);
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final ORecordVersion returnValue = (ppos != null ? ppos.recordVersion : OVersionFactory.instance()
.createUntrackedVersion());
if (iCallback != null)
iCallback.call(iRid, returnValue);
return new OStorageOperationResult<ORecordVersion>(returnValue);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRid, final ORecordVersion iVersion, final int iMode,
ORecordCallback<Boolean> iCallback) {
checkOpeness();
final OCluster cluster = getClusterById(iRid.clusterId);
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (txManager.isCommitting()) {
return new OStorageOperationResult<Boolean>(txManager.deleteRecord(txManager.getCurrentTransaction().getId(), cluster,
iRid.clusterPosition, iVersion));
} else {
final OPhysicalPosition ppos = deleteRecord(cluster, iRid, iVersion,
OGlobalConfiguration.STORAGE_USE_TOMBSTONES.getValueAsBoolean());
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final boolean returnValue = ppos != null;
if (iCallback != null)
iCallback.call(iRid, returnValue);
return new OStorageOperationResult<Boolean>(returnValue);
}
} finally {
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public Set<String> getClusterNames() {
checkOpeness();
lock.acquireSharedLock();
try {
return clusterMap.keySet();
} finally {
lock.releaseSharedLock();
}
}
public int getClusterIdByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
if (iClusterName.length() == 0)
throw new IllegalArgumentException("Cluster name is empty");
if (Character.isDigit(iClusterName.charAt(0)))
return Integer.parseInt(iClusterName);
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getId();
} finally {
lock.releaseSharedLock();
}
return -1;
}
public String getClusterTypeByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getType();
} finally {
lock.releaseSharedLock();
}
return null;
}
public void commit(final OTransaction iTx, Runnable callback) {
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
try {
startStorageTx(iTx);
txManager.clearLogEntries(iTx);
txManager.commitAllPendingRecords(iTx);
if (callback != null)
callback.run();
if (OGlobalConfiguration.TX_COMMIT_SYNCH.getValueAsBoolean())
synch();
endStorageTx();
} catch (Exception e) {
// WE NEED TO CALL ROLLBACK HERE, IN THE LOCK
OLogManager.instance().debug(this, "Error during transaction commit, transaction will be rolled back (tx-id=%d)", e,
iTx.getId());
rollback(iTx);
if (e instanceof OException)
throw ((OException) e);
else
throw new OStorageException("Error during transaction commit.", e);
} finally {
try {
txManager.clearLogEntries(iTx);
if (writeAheadLog != null)
writeAheadLog.shrinkTill(writeAheadLog.end());
} catch (Exception e) {
// XXX WHAT CAN WE DO HERE ? ROLLBACK IS NOT POSSIBLE
// IF WE THROW EXCEPTION, A ROLLBACK WILL BE DONE AT DB LEVEL BUT NOT AT STORAGE LEVEL
OLogManager.instance().error(this, "Clear tx log entries failed", e);
}
}
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public void rollback(final OTransaction iTx) {
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
txManager.getTxSegment().rollback(iTx);
rollbackStorageTx();
if (OGlobalConfiguration.TX_COMMIT_SYNCH.getValueAsBoolean())
synch();
} catch (IOException ioe) {
OLogManager.instance().error(this,
"Error executing rollback for transaction with id '" + iTx.getId() + "' cause: " + ioe.getMessage(), ioe);
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
public void synch() {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.synch();
for (ODataLocal data : dataSegments)
if (data != null)
data.synch();
if (configuration != null)
configuration.synch();
} catch (IOException e) {
throw new OStorageException("Error on synch storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".synch", "Synch a database", timer, "db.*.synch");
}
}
protected void synchRecordUpdate(final OCluster cluster, final OPhysicalPosition ppos) {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
cluster.synch();
final ODataLocal data = getDataSegmentById(ppos.dataSegmentId);
data.synch();
if (configuration != null)
configuration.synch();
} catch (IOException e) {
throw new OStorageException("Error on synch storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler()
.stopChrono("db." + name + "record.synch", "Synch a record to database", timer, "db.*.record.synch");
}
}
/**
* Returns the list of holes as pair of position & ODataHoleInfo
*
*/
public List<ODataHoleInfo> getHolesList() {
final List<ODataHoleInfo> holes = new ArrayList<ODataHoleInfo>();
lock.acquireSharedLock();
try {
for (ODataLocal d : dataSegments)
if (d != null)
holes.addAll(d.getHolesList());
return holes;
} finally {
lock.releaseSharedLock();
}
}
/**
* Returns the total number of holes.
*
*/
public long getHoles() {
lock.acquireSharedLock();
try {
long holes = 0;
for (ODataLocal d : dataSegments)
if (d != null)
holes += d.getHoles();
return holes;
} finally {
lock.releaseSharedLock();
}
}
/**
* Returns the total size used by holes
*
*/
public long getHoleSize() {
lock.acquireSharedLock();
try {
final List<ODataHoleInfo> holes = getHolesList();
long size = 0;
for (ODataHoleInfo h : holes)
if (h.dataOffset > -1 && h.size > 0)
size += h.size;
return size;
} finally {
lock.releaseSharedLock();
}
}
public void setDefaultClusterId(final int defaultClusterId) {
this.defaultClusterId = defaultClusterId;
}
public String getPhysicalClusterNameById(final int iClusterId) {
checkOpeness();
if (iClusterId < 0)
return null;
lock.acquireSharedLock();
try {
if (iClusterId >= clusters.length)
return null;
return clusters[iClusterId] != null ? clusters[iClusterId].getName() : null;
} finally {
lock.releaseSharedLock();
}
}
@Override
public OStorageConfiguration getConfiguration() {
return configuration;
}
public int getDefaultClusterId() {
return defaultClusterId;
}
public OCluster getClusterById(int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId == ORID.CLUSTER_ID_INVALID)
// GET THE DEFAULT CLUSTER
iClusterId = defaultClusterId;
checkClusterSegmentIndexRange(iClusterId);
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterId + " is null");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
@Override
public OCluster getClusterByName(final String iClusterName) {
lock.acquireSharedLock();
try {
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterName + " does not exist in database '" + name + "'");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
@Override
public String getURL() {
return OEngineLocal.NAME + ":" + url;
}
public long getSize() {
lock.acquireSharedLock();
try {
long size = 0;
for (OCluster c : clusters)
if (c != null)
size += c.getRecordsSize();
return size;
} catch (IOException ioe) {
throw new OStorageException("Can not calculate records size");
} finally {
lock.releaseSharedLock();
}
}
public String getStoragePath() {
return storagePath;
}
public String getMode() {
return mode;
}
public OStorageVariableParser getVariableParser() {
return variableParser;
}
public int getClusters() {
lock.acquireSharedLock();
try {
return clusterMap.size();
} finally {
lock.releaseSharedLock();
}
}
public Set<OCluster> getClusterInstances() {
final Set<OCluster> result = new HashSet<OCluster>();
lock.acquireSharedLock();
try {
// ADD ALL THE CLUSTERS
for (OCluster c : clusters)
if (c != null)
result.add(c);
} finally {
lock.releaseSharedLock();
}
return result;
}
/**
* Method that completes the cluster rename operation. <strong>IT WILL NOT RENAME A CLUSTER, IT JUST CHANGES THE NAME IN THE
* INTERNAL MAPPING</strong>
*/
public void renameCluster(final String iOldName, final String iNewName) {
clusterMap.put(iNewName, clusterMap.remove(iOldName));
}
protected int registerDataSegment(final OStorageDataConfiguration iConfig) throws IOException {
checkOpeness();
// CHECK FOR DUPLICATION OF NAMES
for (ODataLocal data : dataSegments)
if (data != null && data.getName().equals(iConfig.name)) {
// OVERWRITE CONFIG
data.config = iConfig;
return -1;
}
int pos = -1;
for (int i = 0; i < dataSegments.length; ++i)
if (dataSegments[i] == null) {
// RECYCLE POSITION
pos = i;
break;
}
if (pos == -1)
// ASSIGN LATEST
pos = dataSegments.length;
// CREATE AND ADD THE NEW REF SEGMENT
final ODataLocal segment = new ODataLocal(this, iConfig, pos);
if (pos == dataSegments.length)
dataSegments = OArrays.copyOf(dataSegments, dataSegments.length + 1);
dataSegments[pos] = segment;
return pos;
}
/**
* Create the cluster by reading the configuration received as argument and register it assigning it the higher serial id.
*
* @param iConfig
* A OStorageClusterConfiguration implementation, namely physical or logical
* @return The id (physical position into the array) of the new cluster just created. First is 0.
* @throws IOException
*/
private int createClusterFromConfig(final OStorageClusterConfiguration iConfig) throws IOException {
OCluster cluster = clusterMap.get(iConfig.getName());
if (cluster instanceof OClusterLocal && iConfig instanceof OStorageEHClusterConfiguration)
clusterMap.remove(iConfig.getName());
else if (cluster != null) {
if (cluster instanceof OClusterLocal) {
// ALREADY CONFIGURED, JUST OVERWRITE CONFIG
cluster.configure(this, iConfig);
}
return -1;
}
cluster = Orient.instance().getClusterFactory().createCluster(iConfig);
cluster.configure(this, iConfig);
return registerCluster(cluster);
}
/**
* Register the cluster internally.
*
* @param iCluster
* OCluster implementation
* @return The id (physical position into the array) of the new cluster just created. First is 0.
* @throws IOException
*/
private int registerCluster(final OCluster iCluster) throws IOException {
final int id;
if (iCluster != null) {
// CHECK FOR DUPLICATION OF NAMES
if (clusterMap.containsKey(iCluster.getName()))
throw new OConfigurationException("Cannot add segment '" + iCluster.getName()
+ "' because it is already registered in database '" + name + "'");
// CREATE AND ADD THE NEW REF SEGMENT
clusterMap.put(iCluster.getName(), iCluster);
id = iCluster.getId();
} else
id = clusters.length;
clusters = OArrays.copyOf(clusters, clusters.length + 1);
clusters[id] = iCluster;
return id;
}
private void checkClusterSegmentIndexRange(final int iClusterId) {
if (iClusterId > clusters.length - 1)
throw new IllegalArgumentException("Cluster segment #" + iClusterId + " does not exist in database '" + name + "'");
}
protected OPhysicalPosition createRecord(final ODataLocal dataSegment, final OCluster cluster, final byte[] content,
final byte recordType, final ORecordId rid, final ORecordVersion recordVersion) {
assert (lock.assertExclusiveLockHold());
checkOpeness();
if (content == null)
throw new IllegalArgumentException("Record is null");
final long timer = Orient.instance().getProfiler().startChrono();
final OPhysicalPosition ppos = new OPhysicalPosition(-1, -1, recordType);
if (cluster.isHashBased()) {
if (rid.isNew()) {
if (OGlobalConfiguration.USE_NODE_ID_CLUSTER_POSITION.getValueAsBoolean()) {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.generateUniqueClusterPosition();
} else {
ppos.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(positionGenerator++);
}
} else {
ppos.clusterPosition = rid.clusterPosition;
}
}
try {
if (!cluster.addPhysicalPosition(ppos))
throw new OStorageException("Record with given id " + new ORecordId(rid.clusterId, ppos.clusterPosition)
+ " already exists.");
rid.clusterPosition = ppos.clusterPosition;
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
ppos.dataSegmentId = dataSegment.getId();
ppos.dataSegmentPos = dataSegment.addRecord(rid, content);
cluster.updateDataSegmentPosition(ppos.clusterPosition, ppos.dataSegmentId, ppos.dataSegmentPos);
if (recordVersion.getCounter() > 0 && recordVersion.compareTo(ppos.recordVersion) != 0) {
// OVERWRITE THE VERSION
cluster.updateVersion(rid.clusterPosition, recordVersion);
ppos.recordVersion = recordVersion;
}
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} catch (IOException ioe) {
try {
if (ppos.clusterPosition != null && ppos.clusterPosition.compareTo(OClusterPosition.INVALID_POSITION) != 0)
cluster.removePhysicalPosition(ppos.clusterPosition);
} catch (IOException e) {
OLogManager.instance().error(this, "Error on removing physical position in cluster: " + cluster, e);
}
OLogManager.instance().error(this, "Error on creating record in cluster: " + cluster, ioe);
return null;
} finally {
Orient.instance().getProfiler().stopChrono(PROFILER_CREATE_RECORD, "Create a record in database", timer, "db.*.createRecord");
}
}
@Override
protected ORawBuffer readRecord(final OCluster iClusterSegment, final ORecordId iRid, boolean iAtomicLock, boolean loadTombstones) {
if (!iRid.isPersistent())
throw new IllegalArgumentException("Cannot read record " + iRid + " since the position is invalid in database '" + name
+ '\'');
// NOT FOUND: SEARCH IT IN THE STORAGE
final long timer = Orient.instance().getProfiler().startChrono();
// GET LOCK ONLY IF IT'S IN ATOMIC-MODE (SEE THE PARAMETER iAtomicLock)
// USUALLY BROWSING OPERATIONS (QUERY) AVOID ATOMIC LOCKING
// TO IMPROVE PERFORMANCES BY LOCKING THE ENTIRE CLUSTER FROM THE
// OUTSIDE.
if (iAtomicLock)
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), iRid, LOCK.SHARED);
try {
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(iRid.clusterPosition));
if (ppos != null && loadTombstones && ppos.recordVersion.isTombstone())
return new ORawBuffer(null, ppos.recordVersion, ppos.recordType);
if (ppos == null || !checkForRecordValidity(ppos))
// DELETED
return null;
final ODataLocal data = getDataSegmentById(ppos.dataSegmentId);
return new ORawBuffer(data.getRecord(ppos.dataSegmentPos), ppos.recordVersion, ppos.recordType);
} finally {
lockManager.releaseLock(Thread.currentThread(), iRid, LOCK.SHARED);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on reading record " + iRid + " (cluster: " + iClusterSegment + ')', e);
return null;
} finally {
if (iAtomicLock)
lock.releaseSharedLock();
Orient.instance().getProfiler().stopChrono(PROFILER_READ_RECORD, "Read a record from database", timer, "db.*.readRecord");
}
}
protected OPhysicalPosition updateRecord(final OCluster iClusterSegment, final ORecordId rid, final byte[] recordContent,
final ORecordVersion recordVersion, final byte iRecordType) {
assert (lock.assertExclusiveLockHold());
if (iClusterSegment == null)
throw new OStorageException("Cluster not defined for record: " + rid);
final long timer = Orient.instance().getProfiler().startChrono();
try {
// GET THE SHARED LOCK AND GET AN EXCLUSIVE LOCK AGAINST THE RECORD
lockManager.acquireLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
try {
// UPDATE IT
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (!checkForRecordValidity(ppos))
return null;
// VERSION CONTROL CHECK
switch (recordVersion.getCounter()) {
// DOCUMENT UPDATE, NO VERSION CONTROL
case -1:
ppos.recordVersion.increment();
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
break;
// DOCUMENT UPDATE, NO VERSION CONTROL, NO VERSION UPDATE
case -2:
break;
default:
// MVCC CONTROL AND RECORD UPDATE OR WRONG VERSION VALUE
if (recordVersion.getCounter() > -1) {
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (!recordVersion.equals(ppos.recordVersion))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(rid, ppos.recordVersion, recordVersion, ORecordOperation.UPDATED);
ppos.recordVersion.increment();
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
} else {
// DOCUMENT ROLLBACKED
recordVersion.clearRollbackMode();
ppos.recordVersion.copyFrom(recordVersion);
iClusterSegment.updateVersion(rid.clusterPosition, ppos.recordVersion);
}
}
if (ppos.recordType != iRecordType)
iClusterSegment.updateRecordType(rid.clusterPosition, iRecordType);
final long newDataSegmentOffset;
if (ppos.dataSegmentPos == -1)
// WAS EMPTY FIRST TIME, CREATE IT NOW
newDataSegmentOffset = getDataSegmentById(ppos.dataSegmentId).addRecord(rid, recordContent);
else
newDataSegmentOffset = getDataSegmentById(ppos.dataSegmentId).setRecord(ppos.dataSegmentPos, rid, recordContent);
if (newDataSegmentOffset != ppos.dataSegmentPos) {
// UPDATE DATA SEGMENT OFFSET WITH THE NEW PHYSICAL POSITION
iClusterSegment.updateDataSegmentPosition(ppos.clusterPosition, ppos.dataSegmentId, newDataSegmentOffset);
ppos.dataSegmentPos = newDataSegmentOffset;
}
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, LOCK.EXCLUSIVE);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on updating record " + rid + " (cluster: " + iClusterSegment + ")", e);
} finally {
Orient.instance().getProfiler().stopChrono(PROFILER_UPDATE_RECORD, "Update a record to database", timer, "db.*.updateRecord");
}
return null;
}
protected OPhysicalPosition deleteRecord(final OCluster iClusterSegment, final ORecordId iRid, final ORecordVersion iVersion,
boolean useTombstones) {
assert (lock.assertExclusiveLockHold());
final long timer = Orient.instance().getProfiler().startChrono();
try {
lockManager.acquireLock(Thread.currentThread(), iRid, LOCK.EXCLUSIVE);
try {
final OPhysicalPosition ppos = iClusterSegment.getPhysicalPosition(new OPhysicalPosition(iRid.clusterPosition));
if (ppos == null || ppos.dataSegmentId < 0 || (useTombstones && ppos.recordVersion.isTombstone()))
// ALREADY DELETED
return null;
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (iVersion.getCounter() > -1 && !ppos.recordVersion.equals(iVersion))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(iRid, ppos.recordVersion, iVersion, ORecordOperation.DELETED);
if (!ppos.recordVersion.isTombstone() && ppos.dataSegmentPos > -1) {
try {
getDataSegmentById(ppos.dataSegmentId).deleteRecord(ppos.dataSegmentPos);
} catch (OIOException e) {
OLogManager.instance().error(this, "Cannot remove the record in data segment, however remove it from cluster", e);
}
}
if (useTombstones && iClusterSegment.hasTombstonesSupport())
iClusterSegment.convertToTombstone(iRid.clusterPosition);
else
iClusterSegment.removePhysicalPosition(iRid.clusterPosition);
return ppos;
} finally {
lockManager.releaseLock(Thread.currentThread(), iRid, LOCK.EXCLUSIVE);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on deleting record " + iRid + "( cluster: " + iClusterSegment + ")", e);
} finally {
Orient.instance().getProfiler()
.stopChrono(PROFILER_DELETE_RECORD, "Delete a record from database", timer, "db.*.deleteRecord");
}
return null;
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
final OCluster cluster = getClusterById(recordId.clusterId);
modificationLock.requestModificationLock();
try {
final OPhysicalPosition ppos = deleteRecord(cluster, recordId, recordVersion, false);
if (ppos != null
&& (OGlobalConfiguration.NON_TX_RECORD_UPDATE_SYNCH.getValueAsBoolean() || clustersToSyncImmediately.contains(cluster
.getName())))
synchRecordUpdate(cluster, ppos);
final boolean returnValue = ppos != null;
if (callback != null)
callback.call(recordId, returnValue);
return returnValue;
} finally {
modificationLock.releaseModificationLock();
}
}
private void installProfilerHooks() {
Orient
.instance()
.getProfiler()
.registerHookValue("db." + name + ".data.holes", "Number of the holes in database", METRIC_TYPE.COUNTER,
new OProfilerHookValue() {
public Object getValue() {
return getHoles();
}
}, "db.*.data.holes");
Orient
.instance()
.getProfiler()
.registerHookValue("db." + name + ".data.holeSize", "Size of the holes in database", METRIC_TYPE.SIZE,
new OProfilerHookValue() {
public Object getValue() {
return getHoleSize();
}
}, "db.*.data.holeSize");
}
private void uninstallProfilerHooks() {
Orient.instance().getProfiler().unregisterHookValue("db." + name + ".data.holes");
Orient.instance().getProfiler().unregisterHookValue("db." + name + ".data.holeSize");
}
private void formatMessage(final boolean iVerbose, final OCommandOutputListener iListener, final String iMessage,
final Object... iArgs) {
if (iVerbose)
iListener.onMessage(String.format(iMessage, iArgs));
}
public void freeze(boolean throwException) {
modificationLock.prohibitModifications(throwException);
synch();
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.setSoftlyClosed(true);
for (ODataLocal data : dataSegments)
if (data != null)
data.setSoftlyClosed(true);
if (configuration != null)
configuration.setSoftlyClosed(true);
} catch (IOException e) {
throw new OStorageException("Error on freeze storage '" + name + "'", e);
}
}
public void release() {
try {
for (OCluster cluster : clusters)
if (cluster != null)
cluster.setSoftlyClosed(false);
for (ODataLocal data : dataSegments)
if (data != null)
data.setSoftlyClosed(false);
if (configuration != null)
configuration.setSoftlyClosed(false);
} catch (IOException e) {
throw new OStorageException("Error on release storage '" + name + "'", e);
}
modificationLock.allowModifications();
}
public boolean wasClusterSoftlyClosed(String clusterName) {
final OCluster indexCluster = clusterMap.get(clusterName);
return !(indexCluster instanceof OClusterLocal) || ((OClusterLocal) indexCluster).isSoftlyClosed();
}
@Override
public String getType() {
return OEngineLocal.NAME;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OStorageLocal.java |
373 | public class OClassTrigger extends ODocumentHookAbstract {
public static final String CLASSNAME = "OTriggered";
public static final String METHOD_SEPARATOR = ".";
// Class Level Trigger (class custom attribute)
public static final String ONBEFORE_CREATED = "onBeforeCreate";
public static final String ONAFTER_CREATED = "onAfterCreate";
public static final String ONBEFORE_READ = "onBeforeRead";
public static final String ONAFTER_READ = "onAfterRead";
public static final String ONBEFORE_UPDATED = "onBeforeUpdate";
public static final String ONAFTER_UPDATED = "onAfterUpdate";
public static final String ONBEFORE_DELETE = "onBeforeDelete";
public static final String ONAFTER_DELETE = "onAfterDelete";
// Record Level Trigger (property name)
public static final String PROP_BEFORE_CREATE = ONBEFORE_CREATED;
public static final String PROP_AFTER_CREATE = ONAFTER_CREATED;
public static final String PROP_BEFORE_READ = ONBEFORE_READ;
public static final String PROP_AFTER_READ = ONAFTER_READ;
public static final String PROP_BEFORE_UPDATE = ONBEFORE_UPDATED;
public static final String PROP_AFTER_UPDATE = ONAFTER_UPDATED;
public static final String PROP_BEFORE_DELETE = ONBEFORE_DELETE;
public static final String PROP_AFTER_DELETE = ONAFTER_DELETE;
public OClassTrigger() {
}
public DISTRIBUTED_EXECUTION_MODE getDistributedExecutionMode() {
return DISTRIBUTED_EXECUTION_MODE.SOURCE_NODE;
}
@Override
public RESULT onRecordBeforeCreate(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONBEFORE_CREATED);
if (func != null) {
if (func instanceof OFunction)
return this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
return this.executeMethod(iDocument, (Object[]) func);
}
return RESULT.RECORD_NOT_CHANGED;
}
@Override
public void onRecordAfterCreate(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONAFTER_CREATED);
if (func != null) {
if (func instanceof OFunction)
this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
this.executeMethod(iDocument, (Object[]) func);
}
}
@Override
public RESULT onRecordBeforeRead(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONBEFORE_READ);
if (func != null) {
if (func instanceof OFunction)
return this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
return this.executeMethod(iDocument, (Object[]) func);
}
return RESULT.RECORD_NOT_CHANGED;
}
@Override
public void onRecordAfterRead(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONAFTER_READ);
if (func != null) {
if (func instanceof OFunction)
this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
this.executeMethod(iDocument, (Object[]) func);
}
}
@Override
public RESULT onRecordBeforeUpdate(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONBEFORE_UPDATED);
if (func != null) {
if (func instanceof OFunction)
return this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
return this.executeMethod(iDocument, (Object[]) func);
}
return RESULT.RECORD_NOT_CHANGED;
}
@Override
public void onRecordAfterUpdate(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONAFTER_UPDATED);
if (func != null) {
if (func instanceof OFunction)
this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
this.executeMethod(iDocument, (Object[]) func);
}
}
@Override
public RESULT onRecordBeforeDelete(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONBEFORE_DELETE);
if (func != null) {
if (func instanceof OFunction)
return this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
return this.executeMethod(iDocument, (Object[]) func);
}
return RESULT.RECORD_NOT_CHANGED;
}
@Override
public void onRecordAfterDelete(final ODocument iDocument) {
Object func = this.checkClzAttribute(iDocument, ONAFTER_DELETE);
if (func != null) {
if (func instanceof OFunction)
this.executeFunction(iDocument, (OFunction) func);
else if (func instanceof Object[])
this.executeMethod(iDocument, (Object[]) func);
}
}
private Object checkClzAttribute(final ODocument iDocument, String attr) {
OClass clz = iDocument.getSchemaClass();
if (clz != null && clz.isSubClassOf(CLASSNAME)) {
OFunction func = null;
String fieldName = ((OClassImpl) clz).getCustom(attr);
OClass superClz = clz.getSuperClass();
while (fieldName == null || fieldName.length() == 0) {
if (superClz == null || superClz.getName().equals(CLASSNAME))
break;
fieldName = ((OClassImpl) superClz).getCustom(attr);
superClz = superClz.getSuperClass();
}
if (fieldName != null && fieldName.length() > 0) {
// check if it is reflection or not
Object[] clzMethod = this.checkMethod(fieldName);
if (clzMethod != null)
return clzMethod;
func = ODatabaseRecordThreadLocal.INSTANCE.get().getMetadata().getFunctionLibrary().getFunction(fieldName);
if (func == null) { // check if it is rid
if (OStringSerializerHelper.contains(fieldName, ORID.SEPARATOR)) {
try {
ODocument funcDoc = ODatabaseRecordThreadLocal.INSTANCE.get().load(new ORecordId(fieldName));
if (funcDoc != null) {
func = ODatabaseRecordThreadLocal.INSTANCE.get().getMetadata().getFunctionLibrary()
.getFunction((String) funcDoc.field("name"));
}
} catch (Exception ex) {
OLogManager.instance().error(this, "illegal record id : ", ex.getMessage());
}
}
}
} else {
ODocument funcDoc = iDocument.field(attr);
if (funcDoc != null) {
func = ODatabaseRecordThreadLocal.INSTANCE.get().getMetadata().getFunctionLibrary()
.getFunction((String) funcDoc.field("name"));
}
}
return func;
}
return null;
}
private Object[] checkMethod(String fieldName) {
String clzName = null;
String methodName = null;
if (fieldName.contains(METHOD_SEPARATOR)) {
clzName = fieldName.substring(0, fieldName.lastIndexOf(METHOD_SEPARATOR));
methodName = fieldName.substring(fieldName.lastIndexOf(METHOD_SEPARATOR) + 1);
}
if (clzName == null || methodName == null)
return null;
try {
Class clz = ClassLoader.getSystemClassLoader().loadClass(clzName);
Method method = clz.getMethod(methodName, ODocument.class);
return new Object[] { clz, method };
} catch (Exception ex) {
OLogManager.instance().error(this, "illegal class or method : " + clzName + "/" + methodName);
return null;
}
}
public RESULT onTrigger(final TYPE iType, final ORecord<?> iRecord) {
if (ODatabaseRecordThreadLocal.INSTANCE.isDefined() && ODatabaseRecordThreadLocal.INSTANCE.get().getStatus() != STATUS.OPEN)
return RESULT.RECORD_NOT_CHANGED;
if (!(iRecord instanceof ODocument))
return RESULT.RECORD_NOT_CHANGED;
final ODocument document = (ODocument) iRecord;
if (document.getSchemaClass() != null && document.getSchemaClass().isSubClassOf(CLASSNAME))
return super.onTrigger(iType, iRecord);
return RESULT.RECORD_NOT_CHANGED;
}
private RESULT executeMethod(final ODocument iDocument, final Object[] clzMethod) {
if (clzMethod[0] instanceof Class && clzMethod[1] instanceof Method) {
Method method = (Method) clzMethod[1];
Class clz = (Class) clzMethod[0];
String result = null;
try {
result = (String) method.invoke(clz.newInstance(), iDocument);
} catch (Exception ex) {
throw new OException("Failed to invoke method " + method.getName(), ex);
}
if (result == null) {
return RESULT.RECORD_NOT_CHANGED;
}
return RESULT.valueOf(result);
}
return RESULT.RECORD_NOT_CHANGED;
}
private RESULT executeFunction(final ODocument iDocument, final OFunction func) {
if (func == null)
return RESULT.RECORD_NOT_CHANGED;
ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && !(db instanceof ODatabaseRecordTx))
db = db.getUnderlying();
// final OFunction f = db.getMetadata().getFunctionLibrary().getFunction(funcName);
final OScriptManager scriptManager = Orient.instance().getScriptManager();
final ScriptEngine scriptEngine = scriptManager.getEngine(func.getLanguage());
// final ScriptEngine scriptEngine = new ScriptEngineManager().getEngineByName("javascript");
final Bindings binding = scriptEngine.getBindings(ScriptContext.ENGINE_SCOPE);
// final Bindings binding = scriptEngine.createBindings();
for (OScriptInjection i : scriptManager.getInjections())
i.bind(binding);
binding.put("doc", iDocument);
if (db != null) {
binding.put("db", new OScriptDocumentDatabaseWrapper((ODatabaseRecordTx) db));
binding.put("orient", new OScriptOrientWrapper(db));
} else
binding.put("orient", new OScriptOrientWrapper());
// scriptEngine.setBindings(binding, ScriptContext.ENGINE_SCOPE);
String result = null;
try {
if (func.getLanguage() == null)
throw new OConfigurationException("Database function '" + func.getName() + "' has no language");
final String funcStr = scriptManager.getFunctionDefinition(func);
if (funcStr != null) {
try {
scriptEngine.eval(funcStr);
} catch (ScriptException e) {
scriptManager.getErrorMessage(e, funcStr);
}
}
if (scriptEngine instanceof Invocable) {
final Invocable invocableEngine = (Invocable) scriptEngine;
Object[] EMPTY = new Object[0];
result = (String) invocableEngine.invokeFunction(func.getName(), EMPTY);
}
} catch (ScriptException e) {
throw new OCommandScriptException("Error on execution of the script", func.getName(), e.getColumnNumber(), e);
} catch (NoSuchMethodException e) {
throw new OCommandScriptException("Error on execution of the script", func.getName(), 0, e);
} catch (OCommandScriptException e) {
// PASS THROUGH
throw e;
} finally {
scriptManager.unbind(binding);
}
if (result == null) {
return RESULT.RECORD_NOT_CHANGED;
}
return RESULT.valueOf(result);// result;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_record_OClassTrigger.java |
2,158 | public class BitsDocIdSetIterator extends MatchDocIdSetIterator {
private final Bits bits;
public BitsDocIdSetIterator(Bits bits) {
super(bits.length());
this.bits = bits;
}
public BitsDocIdSetIterator(int maxDoc, Bits bits) {
super(maxDoc);
this.bits = bits;
}
@Override
protected boolean matchDoc(int doc) {
return bits.get(doc);
}
public static class FilteredIterator extends FilteredDocIdSetIterator {
private final Bits bits;
FilteredIterator(DocIdSetIterator innerIter, Bits bits) {
super(innerIter);
this.bits = bits;
}
@Override
protected boolean match(int doc) {
return bits.get(doc);
}
}
@Override
public long cost() {
return this.bits.length();
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_docset_BitsDocIdSetIterator.java |
2,590 | private static class NodeComparator implements Comparator<DiscoveryNode> {
@Override
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
return o1.id().compareTo(o2.id());
}
} | 1no label
| src_main_java_org_elasticsearch_discovery_zen_elect_ElectMasterService.java |
4,476 | public class RecoverySource extends AbstractComponent {
public static class Actions {
public static final String START_RECOVERY = "index/shard/recovery/startRecovery";
}
private final TransportService transportService;
private final IndicesService indicesService;
private final RecoverySettings recoverySettings;
private final ClusterService clusterService;
private final TimeValue internalActionTimeout;
private final TimeValue internalActionLongTimeout;
@Inject
public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
RecoverySettings recoverySettings, ClusterService clusterService) {
super(settings);
this.transportService = transportService;
this.indicesService = indicesService;
this.clusterService = clusterService;
this.recoverySettings = recoverySettings;
transportService.registerHandler(Actions.START_RECOVERY, new StartRecoveryTransportRequestHandler());
this.internalActionTimeout = componentSettings.getAsTime("internal_action_timeout", TimeValue.timeValueMinutes(15));
this.internalActionLongTimeout = new TimeValue(internalActionTimeout.millis() * 2);
}
private RecoveryResponse recover(final StartRecoveryRequest request) {
final InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
// verify that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
// the index operations will not be routed to it properly
RoutingNode node = clusterService.state().readOnlyRoutingNodes().node(request.targetNode().id());
if (node == null) {
throw new DelayRecoveryException("source node does not have the node [" + request.targetNode() + "] in its state yet..");
}
ShardRouting targetShardRouting = null;
for (ShardRouting shardRouting : node) {
if (shardRouting.shardId().equals(request.shardId())) {
targetShardRouting = shardRouting;
break;
}
}
if (targetShardRouting == null) {
throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node");
}
if (!targetShardRouting.initializing()) {
throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
}
logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(), request.shardId().id(), request.targetNode(), request.markAsRelocated());
final RecoveryResponse response = new RecoveryResponse();
shard.recover(new Engine.RecoveryHandler() {
@Override
public void phase1(final SnapshotIndexCommit snapshot) throws ElasticsearchException {
long totalSize = 0;
long existingTotalSize = 0;
try {
StopWatch stopWatch = new StopWatch().start();
for (String name : snapshot.getFiles()) {
StoreFileMetaData md = shard.store().metaData(name);
boolean useExisting = false;
if (request.existingFiles().containsKey(name)) {
// we don't compute checksum for segments, so always recover them
if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
response.phase1ExistingFileNames.add(name);
response.phase1ExistingFileSizes.add(md.length());
existingTotalSize += md.length();
useExisting = true;
if (logger.isTraceEnabled()) {
logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, md.checksum(), md.length());
}
}
}
if (!useExisting) {
if (request.existingFiles().containsKey(name)) {
logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name, request.existingFiles().get(name), md);
} else {
logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", request.shardId().index().name(), request.shardId().id(), request.targetNode(), name);
}
response.phase1FileNames.add(name);
response.phase1FileSizes.add(md.length());
}
totalSize += md.length();
}
response.phase1TotalSize = totalSize;
response.phase1ExistingTotalSize = existingTotalSize;
logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes,
response.phase1ExistingFileNames, response.phase1ExistingFileSizes, response.phase1TotalSize, response.phase1ExistingTotalSize);
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
final AtomicReference<Throwable> lastException = new AtomicReference<Throwable>();
int fileIndex = 0;
for (final String name : response.phase1FileNames) {
ThreadPoolExecutor pool;
long fileSize = response.phase1FileSizes.get(fileIndex);
if (fileSize > recoverySettings.SMALL_FILE_CUTOFF_BYTES) {
pool = recoverySettings.concurrentStreamPool();
} else {
pool = recoverySettings.concurrentSmallFileStreamPool();
}
pool.execute(new Runnable() {
@Override
public void run() {
IndexInput indexInput = null;
try {
final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
byte[] buf = new byte[BUFFER_SIZE];
StoreFileMetaData md = shard.store().metaData(name);
// TODO: maybe use IOContext.READONCE?
indexInput = shard.store().openInputRaw(name, IOContext.READ);
boolean shouldCompressRequest = recoverySettings.compress();
if (CompressorFactory.isCompressed(indexInput)) {
shouldCompressRequest = false;
}
long len = indexInput.length();
long readCount = 0;
while (readCount < len) {
if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
throw new IndexShardClosedException(shard.shardId());
}
int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
long position = indexInput.getFilePointer();
if (recoverySettings.rateLimiter() != null) {
recoverySettings.rateLimiter().pause(toRead);
}
indexInput.readBytes(buf, 0, toRead, false);
BytesArray content = new BytesArray(buf, 0, toRead);
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), name, position, len, md.checksum(), content),
TransportRequestOptions.options().withCompress(shouldCompressRequest).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
readCount += toRead;
}
} catch (Throwable e) {
lastException.set(e);
} finally {
IOUtils.closeWhileHandlingException(indexInput);
latch.countDown();
}
}
});
fileIndex++;
}
latch.await();
if (lastException.get() != null) {
throw lastException.get();
}
// now, set the clean files request
Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), snapshotFiles), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
stopWatch.stop();
logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
response.phase1Time = stopWatch.totalTime().millis();
} catch (Throwable e) {
throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
}
}
@Override
public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
if (shard.state() == IndexShardState.CLOSED) {
throw new IndexShardClosedException(request.shardId());
}
logger.trace("[{}][{}] recovery [phase2] to {}: start", request.shardId().index().name(), request.shardId().id(), request.targetNode());
StopWatch stopWatch = new StopWatch().start();
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
stopWatch.stop();
response.startTime = stopWatch.totalTime().millis();
logger.trace("[{}][{}] recovery [phase2] to {}: start took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
stopWatch = new StopWatch().start();
int totalOperations = sendSnapshot(snapshot);
stopWatch.stop();
logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
response.phase2Time = stopWatch.totalTime().millis();
response.phase2Operations = totalOperations;
}
@Override
public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
if (shard.state() == IndexShardState.CLOSED) {
throw new IndexShardClosedException(request.shardId());
}
logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations", request.shardId().index().name(), request.shardId().id(), request.targetNode());
StopWatch stopWatch = new StopWatch().start();
int totalOperations = sendSnapshot(snapshot);
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.options().withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
if (request.markAsRelocated()) {
// TODO what happens if the recovery process fails afterwards, we need to mark this back to started
try {
shard.relocated("to " + request.targetNode());
} catch (IllegalIndexShardStateException e) {
// we can ignore this exception since, on the other node, when it moved to phase3
// it will also send shard started, which might cause the index shard we work against
// to move be closed by the time we get to the the relocated method
}
}
stopWatch.stop();
logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode(), stopWatch.totalTime());
response.phase3Time = stopWatch.totalTime().millis();
response.phase3Operations = totalOperations;
}
private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
int ops = 0;
long size = 0;
int totalOperations = 0;
List<Translog.Operation> operations = Lists.newArrayList();
while (snapshot.hasNext()) {
if (shard.state() == IndexShardState.CLOSED) {
throw new IndexShardClosedException(request.shardId());
}
Translog.Operation operation = snapshot.next();
operations.add(operation);
ops += 1;
size += operation.estimateSize();
totalOperations++;
if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().bytes()) {
// don't throttle translog, since we lock for phase3 indexing, so we need to move it as
// fast as possible. Note, sine we index docs to replicas while the index files are recovered
// the lock can potentially be removed, in which case, it might make sense to re-enable
// throttling in this phase
// if (recoverySettings.rateLimiter() != null) {
// recoverySettings.rateLimiter().pause(size);
// }
RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations);
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, TransportRequestOptions.options().withCompress(recoverySettings.compress()).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
ops = 0;
size = 0;
operations.clear();
}
}
// send the leftover
if (!operations.isEmpty()) {
RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations);
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, TransportRequestOptions.options().withCompress(recoverySettings.compress()).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(internalActionLongTimeout), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
}
return totalOperations;
}
});
return response;
}
class StartRecoveryTransportRequestHandler extends BaseTransportRequestHandler<StartRecoveryRequest> {
@Override
public StartRecoveryRequest newInstance() {
return new StartRecoveryRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel) throws Exception {
RecoveryResponse response = recover(request);
channel.sendResponse(response);
}
}
} | 1no label
| src_main_java_org_elasticsearch_indices_recovery_RecoverySource.java |
1,521 | public class ReplicaAllocatedAfterPrimaryTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class);
@Test
public void testBackupIsAllocatedAfterPrimary() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
assertThat(routingTable.index("test").shards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
logger.info("Start all the primary shards");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.node(nodeHoldingPrimary).shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_ReplicaAllocatedAfterPrimaryTests.java |
140 | public static class Name {
public static final String Description = "StructuredContentImpl_Description";
public static final String Internal = "StructuredContentImpl_Internal";
public static final String Rules = "StructuredContentImpl_Rules";
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentImpl.java |
1,669 | interface ReadBlobListener {
void onPartial(byte[] data, int offset, int size) throws IOException;
void onCompleted();
void onFailure(Throwable t);
} | 0true
| src_main_java_org_elasticsearch_common_blobstore_BlobContainer.java |
2,730 | public class TransportNodesListGatewayMetaState extends TransportNodesOperationAction<TransportNodesListGatewayMetaState.Request, TransportNodesListGatewayMetaState.NodesLocalGatewayMetaState, TransportNodesListGatewayMetaState.NodeRequest, TransportNodesListGatewayMetaState.NodeLocalGatewayMetaState> {
private LocalGatewayMetaState metaState;
@Inject
public TransportNodesListGatewayMetaState(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
super(settings, clusterName, threadPool, clusterService, transportService);
}
TransportNodesListGatewayMetaState init(LocalGatewayMetaState metaState) {
this.metaState = metaState;
return this;
}
public ActionFuture<NodesLocalGatewayMetaState> list(String[] nodesIds, @Nullable TimeValue timeout) {
return execute(new Request(nodesIds).timeout(timeout));
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected String transportAction() {
return "/gateway/local/meta-state";
}
@Override
protected boolean transportCompress() {
return true; // compress since the metadata can become large
}
@Override
protected Request newRequest() {
return new Request();
}
@Override
protected NodeRequest newNodeRequest() {
return new NodeRequest();
}
@Override
protected NodeRequest newNodeRequest(String nodeId, Request request) {
return new NodeRequest(nodeId, request);
}
@Override
protected NodeLocalGatewayMetaState newNodeResponse() {
return new NodeLocalGatewayMetaState();
}
@Override
protected NodesLocalGatewayMetaState newResponse(Request request, AtomicReferenceArray responses) {
final List<NodeLocalGatewayMetaState> nodesList = Lists.newArrayList();
final List<FailedNodeException> failures = Lists.newArrayList();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodeLocalGatewayMetaState) { // will also filter out null response for unallocated ones
nodesList.add((NodeLocalGatewayMetaState) resp);
} else if (resp instanceof FailedNodeException) {
failures.add((FailedNodeException) resp);
}
}
return new NodesLocalGatewayMetaState(clusterName, nodesList.toArray(new NodeLocalGatewayMetaState[nodesList.size()]),
failures.toArray(new FailedNodeException[failures.size()]));
}
@Override
protected NodeLocalGatewayMetaState nodeOperation(NodeRequest request) throws ElasticsearchException {
try {
return new NodeLocalGatewayMetaState(clusterService.localNode(), metaState.loadMetaState());
} catch (Exception e) {
throw new ElasticsearchException("failed to load metadata", e);
}
}
@Override
protected boolean accumulateExceptions() {
return true;
}
static class Request extends NodesOperationRequest<Request> {
public Request() {
}
public Request(String... nodesIds) {
super(nodesIds);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}
public static class NodesLocalGatewayMetaState extends NodesOperationResponse<NodeLocalGatewayMetaState> {
private FailedNodeException[] failures;
NodesLocalGatewayMetaState() {
}
public NodesLocalGatewayMetaState(ClusterName clusterName, NodeLocalGatewayMetaState[] nodes, FailedNodeException[] failures) {
super(clusterName, nodes);
this.failures = failures;
}
public FailedNodeException[] failures() {
return failures;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeLocalGatewayMetaState[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new NodeLocalGatewayMetaState();
nodes[i].readFrom(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeLocalGatewayMetaState response : nodes) {
response.writeTo(out);
}
}
}
static class NodeRequest extends NodeOperationRequest {
NodeRequest() {
}
NodeRequest(String nodeId, TransportNodesListGatewayMetaState.Request request) {
super(request, nodeId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}
public static class NodeLocalGatewayMetaState extends NodeOperationResponse {
private MetaData metaData;
NodeLocalGatewayMetaState() {
}
public NodeLocalGatewayMetaState(DiscoveryNode node, MetaData metaData) {
super(node);
this.metaData = metaData;
}
public MetaData metaData() {
return metaData;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.readBoolean()) {
metaData = MetaData.Builder.readFrom(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (metaData == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
MetaData.Builder.writeTo(metaData, out);
}
}
}
} | 0true
| src_main_java_org_elasticsearch_gateway_local_state_meta_TransportNodesListGatewayMetaState.java |
622 | indexEngine.getValuesMajor(iRangeFrom, isInclusive, null, new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_OIndexOneValue.java |
5,776 | public class MatchedQueriesFetchSubPhase implements FetchSubPhase {
@Override
public Map<String, ? extends SearchParseElement> parseElements() {
return ImmutableMap.of();
}
@Override
public boolean hitsExecutionNeeded(SearchContext context) {
return false;
}
@Override
public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException {
}
@Override
public boolean hitExecutionNeeded(SearchContext context) {
return !context.parsedQuery().namedFilters().isEmpty()
|| (context.parsedPostFilter() !=null && !context.parsedPostFilter().namedFilters().isEmpty());
}
@Override
public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException {
List<String> matchedQueries = Lists.newArrayListWithCapacity(2);
addMatchedQueries(hitContext, context.parsedQuery().namedFilters(), matchedQueries);
if (context.parsedPostFilter() != null) {
addMatchedQueries(hitContext, context.parsedPostFilter().namedFilters(), matchedQueries);
}
hitContext.hit().matchedQueries(matchedQueries.toArray(new String[matchedQueries.size()]));
}
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Filter> namedFiltersAndQueries, List<String> matchedQueries) {
for (Map.Entry<String, Filter> entry : namedFiltersAndQueries.entrySet()) {
String name = entry.getKey();
Filter filter = entry.getValue();
try {
DocIdSet docIdSet = filter.getDocIdSet(hitContext.readerContext(), null); // null is fine, since we filter by hitContext.docId()
if (!DocIdSets.isEmpty(docIdSet)) {
Bits bits = docIdSet.bits();
if (bits != null) {
if (bits.get(hitContext.docId())) {
matchedQueries.add(name);
}
} else {
DocIdSetIterator iterator = docIdSet.iterator();
if (iterator != null) {
if (iterator.advance(hitContext.docId()) == hitContext.docId()) {
matchedQueries.add(name);
}
}
}
}
} catch (IOException e) {
// ignore
} finally {
SearchContext.current().clearReleasables();
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_search_fetch_matchedqueries_MatchedQueriesFetchSubPhase.java |
92 | public class TTYConsoleReader implements OConsoleReader {
private static final String HISTORY_FILE_NAME = ".orientdb_history";
private static int MAX_HISTORY_ENTRIES = 50;
public static int END_CHAR = 70;
public static int BEGIN_CHAR = 72;
public static int DEL_CHAR = 126;
public static int DOWN_CHAR = 66;
public static int UP_CHAR = 65;
public static int RIGHT_CHAR = 67;
public static int LEFT_CHAR = 68;
public static int HORIZONTAL_TAB_CHAR = 9;
public static int VERTICAL_TAB_CHAR = 11;
public static int BACKSPACE_CHAR = 127;
public static int NEW_LINE_CHAR = 10;
public static int UNIT_SEPARATOR_CHAR = 31;
protected int currentPos = 0;
protected List<String> history = new ArrayList<String>();
protected String historyBuffer;
protected Reader inStream;
protected PrintStream outStream;
public TTYConsoleReader() {
File file = getHistoryFile(true);
BufferedReader reader;
try {
reader = new BufferedReader(new FileReader(file));
String historyEntry = reader.readLine();
while (historyEntry != null) {
history.add(historyEntry);
historyEntry = reader.readLine();
}
if (System.getProperty("file.encoding") != null) {
inStream = new InputStreamReader(System.in, System.getProperty("file.encoding"));
outStream = new PrintStream(System.out, false, System.getProperty("file.encoding"));
} else {
inStream = new InputStreamReader(System.in);
outStream = System.out;
}
} catch (FileNotFoundException fnfe) {
OLogManager.instance().error(this, "History file not found", fnfe, "");
} catch (IOException ioe) {
OLogManager.instance().error(this, "Error reading history file.", ioe, "");
}
}
protected OConsoleApplication console;
public String readLine() {
String consoleInput = "";
try {
StringBuffer buffer = new StringBuffer();
currentPos = 0;
historyBuffer = null;
int historyNum = history.size();
boolean hintedHistory = false;
while (true) {
boolean escape = false;
boolean ctrl = false;
int next = inStream.read();
if (next == 27) {
escape = true;
inStream.read();
next = inStream.read();
}
if (escape) {
if (next == 49) {
inStream.read();
next = inStream.read();
}
if (next == 53) {
ctrl = true;
next = inStream.read();
}
if (ctrl) {
if (next == RIGHT_CHAR) {
currentPos = buffer.indexOf(" ", currentPos) + 1;
if (currentPos == 0)
currentPos = buffer.length();
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
} else if (next == LEFT_CHAR) {
if (currentPos > 1 && currentPos < buffer.length() && buffer.charAt(currentPos - 1) == ' ') {
currentPos = buffer.lastIndexOf(" ", (currentPos - 2)) + 1;
} else {
currentPos = buffer.lastIndexOf(" ", currentPos) + 1;
}
if (currentPos < 0)
currentPos = 0;
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
} else {
}
} else {
if (next == UP_CHAR && !history.isEmpty()) {
if (history.size() > 0) { // UP
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
if (!hintedHistory && (historyNum == history.size() || !buffer.toString().equals(history.get(historyNum)))) {
if (buffer.length() > 0) {
hintedHistory = true;
historyBuffer = buffer.toString();
} else {
historyBuffer = null;
}
}
historyNum = getHintedHistoryIndexUp(historyNum);
if (historyNum > -1) {
buffer = new StringBuffer(history.get(historyNum));
} else {
buffer = new StringBuffer(historyBuffer);
}
currentPos = buffer.length();
rewriteConsole(buffer, false);
// writeHistory(historyNum);
}
} else if (next == DOWN_CHAR && !history.isEmpty()) { // DOWN
if (history.size() > 0) {
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
historyNum = getHintedHistoryIndexDown(historyNum);
if (historyNum == history.size()) {
if (historyBuffer != null) {
buffer = new StringBuffer(historyBuffer);
} else {
buffer = new StringBuffer("");
}
} else {
buffer = new StringBuffer(history.get(historyNum));
}
currentPos = buffer.length();
rewriteConsole(buffer, false);
// writeHistory(historyNum);
}
} else if (next == RIGHT_CHAR) {
if (currentPos < buffer.length()) {
currentPos++;
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
}
} else if (next == LEFT_CHAR) {
if (currentPos > 0) {
currentPos--;
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
}
} else if (next == END_CHAR) {
currentPos = buffer.length();
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
} else if (next == BEGIN_CHAR) {
currentPos = 0;
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
} else {
}
}
} else {
if (next == NEW_LINE_CHAR) {
outStream.println();
break;
} else if (next == BACKSPACE_CHAR) {
if (buffer.length() > 0 && currentPos > 0) {
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
buffer.deleteCharAt(currentPos - 1);
currentPos--;
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
}
} else if (next == DEL_CHAR) {
if (buffer.length() > 0 && currentPos >= 0 && currentPos < buffer.length()) {
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
buffer.deleteCharAt(currentPos);
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
}
} else if (next == HORIZONTAL_TAB_CHAR) {
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
buffer = writeHint(buffer);
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
currentPos = buffer.length();
} else {
if ((next > UNIT_SEPARATOR_CHAR && next < BACKSPACE_CHAR) || next > BACKSPACE_CHAR) {
StringBuffer cleaner = new StringBuffer();
for (int i = 0; i < buffer.length(); i++) {
cleaner.append(" ");
}
if (currentPos == buffer.length()) {
buffer.append((char) next);
} else {
buffer.insert(currentPos, (char) next);
}
currentPos++;
rewriteConsole(cleaner, true);
rewriteConsole(buffer, false);
} else {
outStream.println();
outStream.print(buffer);
}
}
historyNum = history.size();
hintedHistory = false;
}
}
consoleInput = buffer.toString();
history.remove(consoleInput);
history.add(consoleInput);
historyNum = history.size();
writeHistory(historyNum);
} catch (IOException e) {
return null;
}
if (consoleInput.equals("clear")) {
outStream.flush();
for (int i = 0; i < 150; i++) {
outStream.println();
}
outStream.print("\r");
outStream.print("orientdb> ");
return readLine();
} else {
return consoleInput;
}
}
private void writeHistory(int historyNum) throws IOException {
if (historyNum <= MAX_HISTORY_ENTRIES) {
File historyFile = getHistoryFile(false);
BufferedWriter writer = new BufferedWriter(new FileWriter(historyFile));
try {
for (String historyEntry : history) {
writer.write(historyEntry);
writer.newLine();
}
} finally {
writer.flush();
writer.close();
}
} else {
File historyFile = getHistoryFile(false);
BufferedWriter writer = new BufferedWriter(new FileWriter(historyFile));
try {
for (String historyEntry : history.subList(historyNum - MAX_HISTORY_ENTRIES - 1, historyNum - 1)) {
writer.write(historyEntry);
writer.newLine();
}
} finally {
writer.flush();
writer.close();
}
}
}
private StringBuffer writeHint(StringBuffer buffer) {
List<String> suggestions = new ArrayList<String>();
for (Method method : console.getConsoleMethods().keySet()) {
String command = OConsoleApplication.getClearName(method.getName());
if (command.startsWith(buffer.toString())) {
suggestions.add(command);
}
}
if (suggestions.size() > 1) {
StringBuffer hintBuffer = new StringBuffer();
String[] bufferComponents = buffer.toString().split(" ");
String[] suggestionComponents;
Set<String> bufferPart = new HashSet<String>();
String suggestionPart = null;
boolean appendSpace = true;
for (String suggestion : suggestions) {
suggestionComponents = suggestion.split(" ");
hintBuffer.append("* " + suggestion + " ");
hintBuffer.append("\n");
suggestionPart = "";
if (bufferComponents.length == 0 || buffer.length() == 0) {
suggestionPart = null;
} else if (bufferComponents.length == 1) {
bufferPart.add(suggestionComponents[0]);
if (bufferPart.size() > 1) {
suggestionPart = bufferComponents[0];
appendSpace = false;
} else {
suggestionPart = suggestionComponents[0];
}
} else {
bufferPart.add(suggestionComponents[bufferComponents.length - 1]);
if (bufferPart.size() > 1) {
for (int i = 0; i < bufferComponents.length; i++) {
suggestionPart += bufferComponents[i];
if (i < (bufferComponents.length - 1)) {
suggestionPart += " ";
}
appendSpace = false;
}
} else {
for (int i = 0; i < suggestionComponents.length; i++) {
suggestionPart += suggestionComponents[i] + " ";
}
}
}
}
if (suggestionPart != null) {
buffer = new StringBuffer();
buffer.append(suggestionPart);
if (appendSpace) {
buffer.append(" ");
}
}
hintBuffer.append("-----------------------------\n");
rewriteHintConsole(hintBuffer);
} else if (suggestions.size() > 0) {
buffer = new StringBuffer();
buffer.append(suggestions.get(0));
buffer.append(" ");
}
return buffer;
}
public void setConsole(OConsoleApplication iConsole) {
console = iConsole;
}
public OConsoleApplication getConsole() {
return console;
}
private void rewriteConsole(StringBuffer buffer, boolean cleaner) {
outStream.print("\r");
outStream.print("orientdb> ");
if (currentPos < buffer.length() && buffer.length() > 0 && !cleaner) {
outStream.print("\033[0m" + buffer.substring(0, currentPos) + "\033[0;30;47m" + buffer.substring(currentPos, currentPos + 1)
+ "\033[0m" + buffer.substring(currentPos + 1) + "\033[0m");
} else {
outStream.print(buffer);
}
}
private void rewriteHintConsole(StringBuffer buffer) {
outStream.print("\r");
outStream.print(buffer);
}
private int getHintedHistoryIndexUp(int historyNum) {
if (historyBuffer != null && !historyBuffer.equals("")) {
for (int i = (historyNum - 1); i >= 0; i--) {
if (history.get(i).startsWith(historyBuffer)) {
return i;
}
}
return -1;
}
return historyNum > 0 ? (historyNum - 1) : 0;
}
private int getHintedHistoryIndexDown(int historyNum) throws IOException {
if (historyBuffer != null && !historyBuffer.equals("")) {
for (int i = historyNum + 1; i < history.size(); i++) {
if (history.get(i).startsWith(historyBuffer)) {
return i;
}
}
return history.size();
}
return historyNum < history.size() ? (historyNum + 1) : history.size();
}
private File getHistoryFile(boolean read) {
File file = new File(HISTORY_FILE_NAME);
if (!file.exists()) {
try {
file.createNewFile();
} catch (IOException ioe) {
OLogManager.instance().error(this, "Error creating history file.", ioe, "");
}
} else if (!read) {
file.delete();
try {
file.createNewFile();
} catch (IOException ioe) {
OLogManager.instance().error(this, "Error creating history file.", ioe, "");
}
}
return file;
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_console_TTYConsoleReader.java |
736 | public static final class SBTreeEntry<K, V> implements Comparable<SBTreeEntry<K, V>> {
private final Comparator<? super K> comparator = ODefaultComparator.INSTANCE;
public final long leftChild;
public final long rightChild;
public final K key;
public final OSBTreeValue<V> value;
public SBTreeEntry(long leftChild, long rightChild, K key, OSBTreeValue<V> value) {
this.leftChild = leftChild;
this.rightChild = rightChild;
this.key = key;
this.value = value;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
final SBTreeEntry<?, ?> that = (SBTreeEntry<?, ?>) o;
if (leftChild != that.leftChild)
return false;
if (rightChild != that.rightChild)
return false;
if (!key.equals(that.key))
return false;
if (value != null ? !value.equals(that.value) : that.value != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (leftChild ^ (leftChild >>> 32));
result = 31 * result + (int) (rightChild ^ (rightChild >>> 32));
result = 31 * result + key.hashCode();
result = 31 * result + (value != null ? value.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "SBTreeEntry{" + "leftChild=" + leftChild + ", rightChild=" + rightChild + ", key=" + key + ", value=" + value + '}';
}
@Override
public int compareTo(SBTreeEntry<K, V> other) {
return comparator.compare(key, other.key);
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_index_sbtree_local_OSBTreeBucket.java |
3,135 | awaitBusy(new Predicate<Object>() {
public boolean apply(Object o) {
IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
long newSegmentsMemoryWithoutBloom = stats.getTotal().getSegments().getMemoryInBytes();
logger.info("trying segments without bloom: {}", newSegmentsMemoryWithoutBloom);
return newSegmentsMemoryWithoutBloom == segmentsMemoryWithoutBloom;
}
}); | 0true
| src_test_java_org_elasticsearch_index_engine_internal_InternalEngineIntegrationTest.java |
1,120 | public class WanReplicationConfig {
String name;
List<WanTargetClusterConfig> targetClusterConfigs;
public List<WanTargetClusterConfig> getTargetClusterConfigs() {
return targetClusterConfigs;
}
public WanReplicationConfig addTargetClusterConfig(WanTargetClusterConfig wanTargetClusterConfig) {
if (targetClusterConfigs == null) {
targetClusterConfigs = new ArrayList<WanTargetClusterConfig>(2);
}
targetClusterConfigs.add(wanTargetClusterConfig);
return this;
}
public WanReplicationConfig setTargetClusterConfigs(List<WanTargetClusterConfig> list) {
targetClusterConfigs = list;
return this;
}
public String getName() {
return name;
}
public WanReplicationConfig setName(String name) {
this.name = name;
return this;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("WanReplicationConfig");
sb.append("{name='").append(name).append('\'');
sb.append(", targetClusterConfigs=").append(targetClusterConfigs);
sb.append('}');
return sb.toString();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_WanReplicationConfig.java |
528 | public class DimensionUnitOfMeasureType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, DimensionUnitOfMeasureType> TYPES = new LinkedHashMap<String, DimensionUnitOfMeasureType>();
public static final DimensionUnitOfMeasureType CENTIMETERS = new DimensionUnitOfMeasureType("CENTIMETERS", "Centimeters");
public static final DimensionUnitOfMeasureType METERS = new DimensionUnitOfMeasureType("METERS", "Meters");
public static final DimensionUnitOfMeasureType INCHES = new DimensionUnitOfMeasureType("INCHES", "Inches");
public static final DimensionUnitOfMeasureType FEET = new DimensionUnitOfMeasureType("FEET", "Feet");
public static DimensionUnitOfMeasureType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public DimensionUnitOfMeasureType() {
//do nothing
}
public DimensionUnitOfMeasureType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)){
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
DimensionUnitOfMeasureType other = (DimensionUnitOfMeasureType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_util_DimensionUnitOfMeasureType.java |
56 | public final class DiffApplyingPrimitiveIntIterator extends AbstractPrimitiveIntIterator
{
private enum Phase
{
FILTERED_SOURCE
{
@Override
void computeNext( DiffApplyingPrimitiveIntIterator self )
{
self.computeNextFromSourceAndFilter();
}
},
ADDED_ELEMENTS
{
@Override
void computeNext( DiffApplyingPrimitiveIntIterator self )
{
self.computeNextFromAddedElements();
}
},
NO_ADDED_ELEMENTS
{
@Override
void computeNext( DiffApplyingPrimitiveIntIterator self )
{
self.endReached();
}
};
abstract void computeNext( DiffApplyingPrimitiveIntIterator self );
}
private final PrimitiveIntIterator source;
private final Iterator<?> addedElementsIterator;
private final Set<?> addedElements;
private final Set<?> removedElements;
Phase phase;
public DiffApplyingPrimitiveIntIterator( PrimitiveIntIterator source,
Set<?> addedElements, Set<?> removedElements )
{
this.source = source;
this.addedElements = addedElements;
this.addedElementsIterator = addedElements.iterator();
this.removedElements = removedElements;
phase = Phase.FILTERED_SOURCE;
computeNext();
}
@Override
protected void computeNext()
{
phase.computeNext( this );
}
private void computeNextFromSourceAndFilter()
{
for ( boolean hasNext = source.hasNext(); hasNext; hasNext = source.hasNext() )
{
int value = source.next();
next( value );
if ( !removedElements.contains( value ) && !addedElements.contains( value ) )
{
return;
}
}
transitionToAddedElements();
}
private void transitionToAddedElements()
{
phase = !addedElementsIterator.hasNext() ? Phase.NO_ADDED_ELEMENTS : Phase.ADDED_ELEMENTS;
computeNext();
}
private void computeNextFromAddedElements()
{
if ( addedElementsIterator.hasNext() )
{
next( (Integer) addedElementsIterator.next() );
}
else
{
endReached();
}
}
} | 1no label
| community_kernel_src_main_java_org_neo4j_kernel_impl_util_DiffApplyingPrimitiveIntIterator.java |
309 | public class MergeFileSystemAndClassPathXMLApplicationContext extends AbstractMergeXMLApplicationContext {
public MergeFileSystemAndClassPathXMLApplicationContext(ApplicationContext parent) {
super(parent);
}
public MergeFileSystemAndClassPathXMLApplicationContext(String[] classPathLocations, String[] fileSystemLocations) throws BeansException {
this(classPathLocations, fileSystemLocations, null);
}
public MergeFileSystemAndClassPathXMLApplicationContext(LinkedHashMap<String, ResourceType> locations, ApplicationContext parent) throws BeansException {
this(parent);
ResourceInputStream[] resources = new ResourceInputStream[locations.size()];
int j = 0;
for (Map.Entry<String, ResourceType> entry : locations.entrySet()) {
switch (entry.getValue()) {
case CLASSPATH:
resources[j] = new ResourceInputStream(getClassLoader(parent).getResourceAsStream(entry.getKey()), entry.getKey());
break;
case FILESYSTEM:
try {
File temp = new File(entry.getKey());
resources[j] = new ResourceInputStream(new BufferedInputStream(new FileInputStream(temp)), entry.getKey());
} catch (FileNotFoundException e) {
throw new FatalBeanException("Unable to merge context files", e);
}
break;
}
j++;
}
ImportProcessor importProcessor = new ImportProcessor(this);
try {
resources = importProcessor.extract(resources);
} catch (MergeException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
}
this.configResources = new MergeApplicationContextXmlConfigResource().getConfigResources(resources, null);
refresh();
}
public MergeFileSystemAndClassPathXMLApplicationContext(String[] classPathLocations, String[] fileSystemLocations, ApplicationContext parent) throws BeansException {
this(parent);
ResourceInputStream[] classPathSources;
ResourceInputStream[] fileSystemSources;
try {
classPathSources = new ResourceInputStream[classPathLocations.length];
for (int j=0;j<classPathLocations.length;j++){
classPathSources[j] = new ResourceInputStream(getClassLoader(parent).getResourceAsStream(classPathLocations[j]), classPathLocations[j]);
}
fileSystemSources = new ResourceInputStream[fileSystemLocations.length];
for (int j=0;j<fileSystemSources.length;j++){
File temp = new File(fileSystemLocations[j]);
fileSystemSources[j] = new ResourceInputStream(new BufferedInputStream(new FileInputStream(temp)), fileSystemLocations[j]);
}
} catch (FileNotFoundException e) {
throw new FatalBeanException("Unable to merge context files", e);
}
ImportProcessor importProcessor = new ImportProcessor(this);
try {
classPathSources = importProcessor.extract(classPathSources);
fileSystemSources = importProcessor.extract(fileSystemSources);
} catch (MergeException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
}
this.configResources = new MergeApplicationContextXmlConfigResource().getConfigResources(classPathSources, fileSystemSources);
refresh();
}
/**
* This could be advantageous for subclasses to override in order to utilize the parent application context. By default,
* this utilizes the class loader for the current class.
*/
protected ClassLoader getClassLoader(ApplicationContext parent) {
return MergeFileSystemAndClassPathXMLApplicationContext.class.getClassLoader();
}
public enum ResourceType {
FILESYSTEM,CLASSPATH
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_extensibility_context_MergeFileSystemAndClassPathXMLApplicationContext.java |
1,555 | public interface ProcessContextFactory<T> {
public ProcessContext createContext(T preSeedData) throws WorkflowException;
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_workflow_ProcessContextFactory.java |
1,418 | public class DummyProperty {
private long id;
private int version;
private String key;
private DummyEntity entity;
public DummyProperty() {
}
public DummyProperty(String key) {
super();
this.key = key;
}
public DummyProperty(String key, DummyEntity entity) {
super();
this.key = key;
this.entity = entity;
}
public DummyProperty(long id, String key, DummyEntity entity) {
super();
this.id = id;
this.key = key;
this.entity = entity;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public DummyEntity getEntity() {
return entity;
}
public void setEntity(DummyEntity entity) {
this.entity = entity;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_test_java_com_hazelcast_hibernate_entity_DummyProperty.java |
3,414 | public interface IndexGateway extends IndexComponent, CloseableIndexComponent {
String type();
Class<? extends IndexShardGateway> shardGatewayClass();
} | 0true
| src_main_java_org_elasticsearch_index_gateway_IndexGateway.java |
2,228 | static class CustomBoostFactorScorer extends Scorer {
private final float subQueryBoost;
private final Scorer scorer;
private final FilterFunction[] filterFunctions;
private final ScoreMode scoreMode;
private final float maxBoost;
private final Bits[] docSets;
private final CombineFunction scoreCombiner;
private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreMode scoreMode, FilterFunction[] filterFunctions,
float maxBoost, Bits[] docSets, CombineFunction scoreCombiner) throws IOException {
super(w);
this.subQueryBoost = w.getQuery().getBoost();
this.scorer = scorer;
this.scoreMode = scoreMode;
this.filterFunctions = filterFunctions;
this.maxBoost = maxBoost;
this.docSets = docSets;
this.scoreCombiner = scoreCombiner;
}
@Override
public int docID() {
return scorer.docID();
}
@Override
public int advance(int target) throws IOException {
return scorer.advance(target);
}
@Override
public int nextDoc() throws IOException {
return scorer.nextDoc();
}
@Override
public float score() throws IOException {
int docId = scorer.docID();
double factor = 1.0f;
float subQueryScore = scorer.score();
if (scoreMode == ScoreMode.First) {
for (int i = 0; i < filterFunctions.length; i++) {
if (docSets[i].get(docId)) {
factor = filterFunctions[i].function.score(docId, subQueryScore);
break;
}
}
} else if (scoreMode == ScoreMode.Max) {
double maxFactor = Double.NEGATIVE_INFINITY;
for (int i = 0; i < filterFunctions.length; i++) {
if (docSets[i].get(docId)) {
maxFactor = Math.max(filterFunctions[i].function.score(docId, subQueryScore), maxFactor);
}
}
if (maxFactor != Float.NEGATIVE_INFINITY) {
factor = maxFactor;
}
} else if (scoreMode == ScoreMode.Min) {
double minFactor = Double.POSITIVE_INFINITY;
for (int i = 0; i < filterFunctions.length; i++) {
if (docSets[i].get(docId)) {
minFactor = Math.min(filterFunctions[i].function.score(docId, subQueryScore), minFactor);
}
}
if (minFactor != Float.POSITIVE_INFINITY) {
factor = minFactor;
}
} else if (scoreMode == ScoreMode.Multiply) {
for (int i = 0; i < filterFunctions.length; i++) {
if (docSets[i].get(docId)) {
factor *= filterFunctions[i].function.score(docId, subQueryScore);
}
}
} else { // Avg / Total
double totalFactor = 0.0f;
int count = 0;
for (int i = 0; i < filterFunctions.length; i++) {
if (docSets[i].get(docId)) {
totalFactor += filterFunctions[i].function.score(docId, subQueryScore);
count++;
}
}
if (count != 0) {
factor = totalFactor;
if (scoreMode == ScoreMode.Avg) {
factor /= count;
}
}
}
return scoreCombiner.combine(subQueryBoost, subQueryScore, factor, maxBoost);
}
@Override
public int freq() throws IOException {
return scorer.freq();
}
@Override
public long cost() {
return scorer.cost();
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_search_function_FiltersFunctionScoreQuery.java |
1,647 | public class VersionMismatchLogRequest implements ConsoleRequest {
private String manCenterVersion;
public VersionMismatchLogRequest() {
}
public VersionMismatchLogRequest(String manCenterVersion) {
this.manCenterVersion = manCenterVersion;
}
@Override
public int getType() {
return ConsoleRequestConstants.REQUEST_TYPE_LOG_VERSION_MISMATCH;
}
@Override
public Object readResponse(ObjectDataInput in) throws IOException {
return "SUCCESS";
}
@Override
public void writeResponse(ManagementCenterService managementCenterService, ObjectDataOutput dos) throws Exception {
managementCenterService.signalVersionMismatch();
Node node = managementCenterService.getHazelcastInstance().node;
ILogger logger = node.getLogger(VersionMismatchLogRequest.class);
//todo: does this message make sense because to the user it just displays version information we already know.
//he has no clue that the management version is not matching with his own.
logger.severe("The version of the management center is " + manCenterVersion);
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(manCenterVersion);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
manCenterVersion = in.readUTF();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_management_request_VersionMismatchLogRequest.java |
532 | public class PomEvaluator {
private static String SEPARATOR = "============================================================";
private static Map<String, Category> knownLibraries = new HashMap<String,Category>();
private static Map<LicenseType, List<Dependency>> licenseDependencyMap = new HashMap<LicenseType, List<Dependency>>();
private static Category SPRING = new Category("Spring Framework", LicenseType.APACHE2, FrameworkType.GENERAL);
private static Category HIBERNATE = new Category("Hibernate Framework", LicenseType.LGPL, FrameworkType.PERSISTENCE);
private static Category GOOGLE = new Category("Google", LicenseType.APACHE2, FrameworkType.GENERAL);
private static Category BROADLEAF_OPEN_SOURCE = new Category("Broadleaf Framework Open Source", LicenseType.APACHE2, FrameworkType.ECOMMERCE);
private static Category BROADLEAF_COMMERCIAL = new Category("Broadleaf Framework Commercial", LicenseType.APACHE2, FrameworkType.ECOMMERCE);
private static Category APACHE_FOUNDATION = new Category("Apache 2.0", LicenseType.APACHE2, FrameworkType.GENERAL);
private static Category JAVAX = new Category("javax", LicenseType.JAVA_EXTENSION, FrameworkType.OTHER);
private static Category THYMELEAF = new Category("thymeleaf", LicenseType.APACHE2, FrameworkType.UI);
private static Category SLF4J = new Category("slfj", LicenseType.MIT, FrameworkType.LOGGING);
private static Category LOG4J = new Category("log4j", LicenseType.APACHE2, FrameworkType.LOGGING);
private static Category OTHER = new Category("Other", LicenseType.OTHER, FrameworkType.OTHER);
private static Category SMART_GWT = new Category("Smart GWT UI Toolkit", LicenseType.LGPL, FrameworkType.UI);
// CODEHAUS is used by Apache and Spring Framework
private static Category JACKSON = new Category("Codehaus Jackson Library", LicenseType.APACHE2, FrameworkType.XML, SPRING, APACHE_FOUNDATION);
private static Category PLEXUS = new Category("Codehaus Plexus Library", LicenseType.APACHE2, FrameworkType.XML, SPRING, APACHE_FOUNDATION);
private static Category ASM = new Category("OW2 ASM libraries", LicenseType.OW2, FrameworkType.GENERAL, APACHE_FOUNDATION, GOOGLE);
private static Category CGLIB = new Category("CGLIB libraries", LicenseType.APACHE2, FrameworkType.GENERAL, SPRING, HIBERNATE);
private static Category JERSEY = new Category("Jersey Libraries", LicenseType.LGPL, FrameworkType.XML);
private static Category XSTREAM = new Category("Codehaus XML parsing library", LicenseType.XSTREAM_BSD, FrameworkType.XML);
private static Category JODA_TIME = new Category("Date and time utilities", LicenseType.APACHE2, FrameworkType.UTILITY, APACHE_FOUNDATION);
private static Category TRANSMORPH = new Category("Entropy Transmorph - SalesForce.com", LicenseType.APACHE2, FrameworkType.UTILITY);
private static Category QUARTZ = new Category("Teracotta Quartz", LicenseType.APACHE2, FrameworkType.SCHEDULER);
private static Category EHCACHE = new Category("Teracotta ehCache", LicenseType.APACHE2, FrameworkType.CACHE);
private static Category ANTLR = new Category("Antlr Runtime", LicenseType.ANTLR_BSD, FrameworkType.UTILITY, APACHE_FOUNDATION);
private static Category ASPECTJ = new Category("Aspect J", LicenseType.ECLIPSE_PUBLIC, FrameworkType.GENERAL, SPRING);
private static Category MVEL = new Category("MVEL rules evaluation", LicenseType.APACHE2, FrameworkType.RULES);
private static Category ORO = new Category("ORO regular expressions", LicenseType.APACHE2, FrameworkType.RULES);
private static Category JAVA_ASSIST = new Category("Java Assist", LicenseType.APACHE2, FrameworkType.GENERAL);
private static Category ANTISAMMY = new Category("Anti-Sammy", LicenseType.ANTISAMMY_BSD, FrameworkType.GENERAL);
private static void initializeKnownLibraries() {
// Spring
knownLibraries.put("org.springframework", SPRING);
knownLibraries.put("org.springframework.security", SPRING);
knownLibraries.put("org.springframework.social", SPRING);
// Hibernate
knownLibraries.put("org.hibernate", HIBERNATE);
knownLibraries.put("org.hibernate.javax.persistence", HIBERNATE);
// Broadleaf
knownLibraries.put("org.broadleafcommerce", BROADLEAF_OPEN_SOURCE);
knownLibraries.put("com.broadleafcommerce", BROADLEAF_COMMERCIAL);
// Thymeleaf
knownLibraries.put("org.thymeleaf", THYMELEAF);
// JavaX
knownLibraries.put("javax.xml.bind", JAVAX);
knownLibraries.put("javax.mail", JAVAX);
knownLibraries.put("javax.servlet", JAVAX);
knownLibraries.put("javax.servlet.jsp", JAVAX);
knownLibraries.put("jstl", JAVAX);
// Logging
knownLibraries.put("org.slf4j", SLF4J);
knownLibraries.put("log4j", LOG4J);
// Apache
knownLibraries.put("commons-validator", APACHE_FOUNDATION);
knownLibraries.put("commons-collections", APACHE_FOUNDATION);
knownLibraries.put("commons-beanutils", APACHE_FOUNDATION);
knownLibraries.put("commons-cli", APACHE_FOUNDATION);
knownLibraries.put("commons-fileupload", APACHE_FOUNDATION);
knownLibraries.put("commons-dbcp", APACHE_FOUNDATION);
knownLibraries.put("commons-codec", APACHE_FOUNDATION);
knownLibraries.put("org.apache.commons", APACHE_FOUNDATION);
knownLibraries.put("commons-lang", APACHE_FOUNDATION);
knownLibraries.put("commons-digester", APACHE_FOUNDATION);
knownLibraries.put("commons-logging", APACHE_FOUNDATION);
knownLibraries.put("commons-pool", APACHE_FOUNDATION);
knownLibraries.put("org.apache.geronimo.specs", APACHE_FOUNDATION);
knownLibraries.put("org.apache.solr", APACHE_FOUNDATION);
knownLibraries.put("org.apache.velocity", APACHE_FOUNDATION);
knownLibraries.put("org.apache.xmlbeans", APACHE_FOUNDATION);
knownLibraries.put("taglibs", APACHE_FOUNDATION);
knownLibraries.put("jakarta-regexp", APACHE_FOUNDATION);
// Google - Will retire
knownLibraries.put("com.google.gwt", GOOGLE);
knownLibraries.put("com.google.code.gwt-math", GOOGLE);
knownLibraries.put("com.google.code.findbugs", GOOGLE);
knownLibraries.put("net.sf.gwt-widget", GOOGLE);
// SmartGWT - Will retire with 3.0
knownLibraries.put("com.smartgwt", SMART_GWT);
// CodeHaus - JSON / XML processing
knownLibraries.put("org.codehaus.jackson", JACKSON);
knownLibraries.put("org.codehaus.plexus", PLEXUS);
// ASM
knownLibraries.put("asm", ASM);
// CGLIB
knownLibraries.put("cglib", CGLIB);
// Jersey - used for REST services
knownLibraries.put("com.sun.jersey", JERSEY);
knownLibraries.put("com.sun.jersey.contribs", JERSEY);
// XStream - used for REST services
knownLibraries.put("com.thoughtworks.xstream", JERSEY);
// Joda-Time
knownLibraries.put("joda-time", JODA_TIME);
// Cache
knownLibraries.put("net.sf.jsr107cache", JAVAX);
// Transmorph - Will retire with 3.0
knownLibraries.put("net.sf.transmorph", TRANSMORPH);
// Teracotta software
knownLibraries.put("net.sf.ehcache", EHCACHE);
knownLibraries.put("org.opensymphony.quartz", QUARTZ);
// Antlr
knownLibraries.put("org.antlr", ANTLR);
// Aspect J
knownLibraries.put("org.aspectj", ASPECTJ);
// MVEL
knownLibraries.put("org.mvel", MVEL);
// ORO
knownLibraries.put("oro", ORO);
// Java Assist
knownLibraries.put("org.javassist", JAVA_ASSIST);
// OWASP
knownLibraries.put("org.owasp.antisamy", ANTISAMMY);
}
/**
* @param args
*/
public static void main(String[] args) {
initializeKnownLibraries();
BufferedReader br = null;
try {
String fileName = "/Users/brianpolster/blc-workspace/BroadleafCommerce/pom.xml";
if (args.length > 0) {
fileName = args[0];
}
br = new BufferedReader(new FileReader(fileName));
forwardToTag("<dependencies>", br);
List<Dependency> dependencies = populateDependencies(br);
for (Dependency dependency : dependencies) {
Category category = knownLibraries.get(dependency.groupId);
if (category != null) {
category.dependencyList.add(dependency);
List<Dependency> licenseDependencyList = licenseDependencyMap.get(category.licenseType);
if (licenseDependencyList == null) {
licenseDependencyList = new ArrayList<Dependency>();
licenseDependencyList.add(dependency);
licenseDependencyMap.put(category.licenseType, licenseDependencyList);
}
} else {
if (dependency.scope.equals("test") ||
dependency.scope.equals("provided")) {
continue;
}
OTHER.dependencyList.add(dependency);
}
}
Set<Category> categoryList = new HashSet<Category>(knownLibraries.values());
System.out.println("Related Software Report\r");
for (Category category : categoryList) {
printOutDependencies(category, category.dependencyList);
}
if (OTHER.dependencyList.size() > 0) {
printOutDependencies(OTHER, OTHER.dependencyList);
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (br != null) br.close();
} catch (IOException ex) {
ex.printStackTrace();
}
}
}
public static void printOutDependencies(Category category, List<Dependency> dependencies) {
List<String> dependencyNames = new ArrayList<String>();
for (Dependency d : dependencies) {
dependencyNames.add(d.toString());
}
Collections.sort(dependencyNames);
System.out.println(category);
System.out.println(SEPARATOR);
for (String name : dependencyNames) {
System.out.println(name);
}
System.out.println("Total count for category " + category.categoryName + ": " + dependencyNames.size() + "\r\r");
}
public static List<Dependency> populateDependencies(BufferedReader br) throws IOException {
String currentLine;
List<Dependency> dependencyList = new ArrayList<Dependency>();
while (forwardToTag("<dependency", br)) {
Dependency current = new Dependency();
while ((currentLine = br.readLine()) != null) {
if (currentLine.contains("</dependency>")) {
break;
}
current.scope = getTagValue("<scope>", currentLine, current.scope);
current.groupId = getTagValue("<groupId>", currentLine, current.groupId);
current.artifactId = getTagValue("<artifactId>", currentLine, current.artifactId);
current.version = getTagValue("<version>", currentLine, current.version);
}
dependencyList.add(current);
}
return dependencyList;
}
public static String getTagValue(String tagName, String line, String currentValue) {
int startPos = line.indexOf(tagName);
if (startPos >= 0) {
int endPos = line.indexOf("</", startPos + 1);
if (endPos >= 0) {
return line.substring(startPos + tagName.length(), endPos);
}
}
return currentValue;
}
public static boolean forwardToTag(String tagName, BufferedReader br) throws IOException {
String sCurrentLine;
while ((sCurrentLine = br.readLine()) != null) {
String lowerCaseLine = sCurrentLine.toLowerCase();
if (lowerCaseLine.indexOf(tagName) >= 0) {
return true;
}
}
return false;
}
static class Dependency {
String groupId;
String artifactId;
String version;
String scope;
List<Category> categoriesThatDependOnThis = new ArrayList<Category>();
public String toString() {
return groupId + "." + artifactId + "." + version + " [" + scope + "]";
}
}
static class LicenseType {
private String name;
private String url;
public static LicenseType APACHE2 = new LicenseType("APACHE2", "http://www.apache.org/licenses/LICENSE-2.0.html");
public static LicenseType LGPL = new LicenseType("LGPL", "http://www.gnu.org/licenses/lgpl-3.0.html, http://www.gnu.org/licenses/lgpl-2.1.html,");
public static LicenseType MIT = new LicenseType("MIT", "http://opensource.org/licenses/MIT");
public static LicenseType JAVA_EXTENSION = new LicenseType("JAVA_EXTENSION", "n/a");
public static LicenseType OW2 = new LicenseType("OW2", "http://asm.ow2.org/license.html");
public static LicenseType XSTREAM_BSD = new LicenseType("XSTREAM_BSD", "http://xstream.codehaus.org/license.html");
public static LicenseType ANTLR_BSD = new LicenseType("ANTLR_BSD", "http://www.antlr.org/license.html");
public static LicenseType ANTISAMMY_BSD = new LicenseType("ANTISAMMY_BSD", "http://opensource.org/licenses/bsd-license.php");
public static LicenseType OTHER = new LicenseType("OTHER", "Unknown");
public static LicenseType ECLIPSE_PUBLIC = new LicenseType("ECLIPSE PUBLIC", "http://www.eclipse.org/legal/epl-v10.html");
public LicenseType(String name, String url) {
this.name = name;
this.url = url;
}
public String toString() {
return name.toString() + ":" + url;
}
}
static enum FrameworkType {
PERSISTENCE,
GENERAL,
LOGGING,
UI,
XML,
UTILITY,
SCHEDULER,
CACHE,
RULES,
ECOMMERCE,
OTHER
}
static class Category {
String categoryName;
LicenseType licenseType;
FrameworkType frameworkType;
List<Dependency> dependencyList = new ArrayList<Dependency>();
Category[] usedByCategories;
public Category(String categoryName, LicenseType type, FrameworkType frameworkType) {
this.categoryName = categoryName;
this.licenseType = type;
this.frameworkType = frameworkType;
}
public Category(String categoryName, LicenseType type, FrameworkType frameworkType, Category... usedByCategories) {
this(categoryName, type, frameworkType);
this.usedByCategories = usedByCategories;
}
public String toString() {
return "Category Name : " + categoryName +
"\rLicense Type : " + licenseType.name +
"\rLicense URL : " + licenseType.url;
}
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_util_PomEvaluator.java |
2,303 | public class SoftThreadLocalRecyclerTests extends AbstractRecyclerTests {
@Override
protected Recycler<byte[]> newRecycler() {
return Recyclers.threadLocal(Recyclers.softFactory(Recyclers.dequeFactory(RECYCLER_C, 10)));
}
} | 0true
| src_test_java_org_elasticsearch_common_recycler_SoftThreadLocalRecyclerTests.java |
2,736 | public class ShardStateInfo {
public final long version;
// can be null if we don't know...
@Nullable
public final Boolean primary;
public ShardStateInfo(long version, Boolean primary) {
this.version = version;
this.primary = primary;
}
} | 0true
| src_main_java_org_elasticsearch_gateway_local_state_shards_ShardStateInfo.java |
1,232 | addOperation(operations, new Runnable() {
public void run() {
IMap map = hazelcast.getMap("myMap");
map.containsValue(new Customer(random.nextInt(100), String.valueOf(random.nextInt(100000))));
}
}, 2); | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
926 | while (!found && makeDbCall(iOtherDb, new ODbRelatedCall<Boolean>() {
public Boolean call() {
return otherIterator.hasNext();
}
})) { | 0true
| core_src_main_java_com_orientechnologies_orient_core_record_impl_ODocumentHelper.java |
1,671 | public class BlobPath implements Iterable<String> {
private final ImmutableList<String> paths;
public BlobPath() {
this.paths = ImmutableList.of();
}
public static BlobPath cleanPath() {
return new BlobPath();
}
private BlobPath(ImmutableList<String> paths) {
this.paths = paths;
}
@Override
public Iterator<String> iterator() {
return paths.iterator();
}
public String[] toArray() {
return paths.toArray(new String[paths.size()]);
}
public BlobPath add(String path) {
ImmutableList.Builder<String> builder = ImmutableList.builder();
return new BlobPath(builder.addAll(paths).add(path).build());
}
public String buildAsString(String separator) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < paths.size(); i++) {
sb.append(paths.get(i));
if (i < (paths.size() - 1)) {
sb.append(separator);
}
}
return sb.toString();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (String path : paths) {
sb.append('[').append(path).append(']');
}
return sb.toString();
}
} | 0true
| src_main_java_org_elasticsearch_common_blobstore_BlobPath.java |
95 | public class NamedOperationManagerImpl implements NamedOperationManager {
protected List<NamedOperationComponent> namedOperationComponents = new ArrayList<NamedOperationComponent>();
@Override
public Map<String, String> manageNamedParameters(Map<String, String> parameterMap) {
List<String> utilizedNames = new ArrayList<String>();
Map<String, String> derivedMap = new LinkedHashMap<String, String>();
for (NamedOperationComponent namedOperationComponent : namedOperationComponents) {
utilizedNames.addAll(namedOperationComponent.setOperationValues(parameterMap, derivedMap));
}
for (String utilizedName : utilizedNames) {
parameterMap.remove(utilizedName);
}
derivedMap.putAll(parameterMap);
return derivedMap;
}
public List<NamedOperationComponent> getNamedOperationComponents() {
return namedOperationComponents;
}
public void setNamedOperationComponents(List<NamedOperationComponent> namedOperationComponents) {
this.namedOperationComponents = namedOperationComponents;
}
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_service_operation_NamedOperationManagerImpl.java |
905 | @RunWith(HazelcastParallelClassRunner.class)
@Category(SlowTest.class)
public class ConditionTest extends HazelcastTestSupport {
@Test(expected = UnsupportedOperationException.class)
public void testNewConditionWithoutNameIsNotSupported() {
HazelcastInstance instance = createHazelcastInstance();
ILock lock = instance.getLock(randomString());
lock.newCondition();
}
@Test(timeout = 60000, expected = NullPointerException.class)
public void testNewCondition_whenNullName() {
HazelcastInstance instance = createHazelcastInstance();
ILock lock = instance.getLock(randomString());
lock.newCondition(null);
}
@Test(timeout = 60000)
@Ignore
public void testMultipleConditionsForSameLock() {
}
@Test(timeout = 60000)
@Ignore
public void testSameConditionRetrievedMultipleTimesForSameLock() {
}
@Test(timeout = 60000)
@Ignore
public void testConditionsWithSameNameButDifferentLocksAreIndependent() {
}
@Test(timeout = 60000)
public void testSignalWithSingleWaiter() throws InterruptedException {
HazelcastInstance instance = createHazelcastInstance();
String lockName = randomString();
String conditionName = randomString();
final ILock lock = instance.getLock(lockName);
final ICondition condition = lock.newCondition(conditionName);
final AtomicInteger count = new AtomicInteger(0);
Thread t = new Thread(new Runnable() {
public void run() {
try {
lock.lock();
if (lock.isLockedByCurrentThread()) {
count.incrementAndGet();
}
condition.await();
if (lock.isLockedByCurrentThread()) {
count.incrementAndGet();
}
} catch (InterruptedException ignored) {
} finally {
lock.unlock();
}
}
});
t.start();
Thread.sleep(1000);
assertEquals(false, lock.isLocked());
lock.lock();
assertEquals(true, lock.isLocked());
condition.signal();
lock.unlock();
t.join();
assertEquals(2, count.get());
}
@Test(timeout = 60000)
public void testSignalAllWithSingleWaiter() throws InterruptedException {
HazelcastInstance instance = createHazelcastInstance();
String lockName = randomString();
String conditionName = randomString();
final ILock lock = instance.getLock(lockName);
final ICondition condition = lock.newCondition(conditionName);
final AtomicInteger count = new AtomicInteger(0);
final int k = 50;
final CountDownLatch awaitLatch = new CountDownLatch(k);
final CountDownLatch finalLatch = new CountDownLatch(k);
for (int i = 0; i < k; i++) {
new Thread(new Runnable() {
public void run() {
try {
lock.lock();
if (lock.isLockedByCurrentThread()) {
count.incrementAndGet();
}
awaitLatch.countDown();
condition.await();
if (lock.isLockedByCurrentThread()) {
count.incrementAndGet();
}
} catch (InterruptedException ignored) {
} finally {
lock.unlock();
finalLatch.countDown();
}
}
}).start();
}
awaitLatch.await(1, TimeUnit.MINUTES);
lock.lock();
condition.signalAll();
lock.unlock();
finalLatch.await(1, TimeUnit.MINUTES);
assertEquals(k * 2, count.get());
}
@Test(timeout = 60000)
@Ignore
public void testInterruptionDuringWaiting() {
}
//if there are multiple waiters, then only 1 waiter should be notified.
@Test(timeout = 60000)
@Ignore
public void testSignalWithMultipleWaiters() {
}
//a signal is send to wake up threads, but it isn't a flag set on the condition so that future waiters will
//receive this signal
@Test(timeout = 60000)
@Ignore
public void testSignalIsNotStored() {
}
@Test(timeout = 60000, expected = IllegalMonitorStateException.class)
public void testAwaitOnConditionOfFreeLock() throws InterruptedException {
HazelcastInstance instance = createHazelcastInstance();
ILock lock = instance.getLock(randomString());
ICondition condition = lock.newCondition("condition");
condition.await();
}
@Test(timeout = 60000, expected = IllegalMonitorStateException.class)
public void testSignalOnConditionOfFreeLock() {
HazelcastInstance instance = createHazelcastInstance();
ILock lock = instance.getLock(randomString());
ICondition condition = lock.newCondition("condition");
condition.signal();
}
@Test(timeout = 60000)
@Ignore
public void testAwaitOnConditionOwnedByOtherThread() {
}
@Test(timeout = 60000)
@Ignore
public void testSignalOnConditionOwnedByOtherThread() {
}
@Test(timeout = 60000)
@Ignore
public void testAwait_whenTimeout() {
}
@Test(timeout = 60000)
@Ignore
public void testAwait_whenNegativeTimeout() {
}
@Test(timeout = 60000)
@Ignore
public void testAwait_nullTimeout() {
}
// ====================== tests to make sure the condition can deal with cluster member failure ====================
@Test(timeout = 100000)
public void testKeyOwnerDiesOnCondition() throws Exception {
final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3);
final HazelcastInstance keyOwner = nodeFactory.newHazelcastInstance();
final HazelcastInstance instance1 = nodeFactory.newHazelcastInstance();
final HazelcastInstance instance2 = nodeFactory.newHazelcastInstance();
final AtomicInteger signalCounter = new AtomicInteger(0);
final String key = generateKeyOwnedBy(instance1);
final ILock lock1 = instance1.getLock(key);
final String conditionName = randomString();
final ICondition condition1 = lock1.newCondition(conditionName);
Thread t = new Thread(new Runnable() {
public void run() {
ILock lock = instance2.getLock(key);
ICondition condition = lock.newCondition(conditionName);
lock.lock();
try {
condition.await();
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
lock.unlock();
}
signalCounter.incrementAndGet();
}
});
t.start();
Thread.sleep(1000);
lock1.lock();
keyOwner.shutdown();
condition1.signal();
lock1.unlock();
Thread.sleep(1000);
t.join();
assertEquals(1, signalCounter.get());
}
@Test(timeout = 60000, expected = DistributedObjectDestroyedException.class)
public void testDestroyLockWhenOtherWaitingOnConditionAwait() throws InterruptedException {
final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(2);
final HazelcastInstance instance = nodeFactory.newHazelcastInstance();
final ILock lock = instance.getLock(randomString());
final ICondition condition = lock.newCondition("condition");
final CountDownLatch latch = new CountDownLatch(1);
new Thread(new Runnable() {
public void run() {
try {
latch.await(30, TimeUnit.SECONDS);
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
lock.destroy();
}
}).start();
lock.lock();
latch.countDown();
condition.await();
lock.unlock();
}
@Test(timeout = 60000, expected = HazelcastInstanceNotActiveException.class)
public void testShutDownNodeWhenOtherWaitingOnConditionAwait() throws InterruptedException {
final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(2);
final HazelcastInstance instance = nodeFactory.newHazelcastInstance();
nodeFactory.newHazelcastInstance();
final String name = randomString();
final ILock lock = instance.getLock(name);
final ICondition condition = lock.newCondition("condition");
final CountDownLatch latch = new CountDownLatch(1);
new Thread(new Runnable() {
public void run() {
try {
latch.await(1, TimeUnit.MINUTES);
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
instance.shutdown();
}
}).start();
lock.lock();
try {
latch.countDown();
condition.await();
} catch (InterruptedException e) {
}
lock.unlock();
}
@Test
@Category(ProblematicTest.class)
public void testLockConditionSignalAllShutDownKeyOwner() throws InterruptedException {
final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(2);
final String name = randomString();
final HazelcastInstance instance = nodeFactory.newHazelcastInstance();
final AtomicInteger count = new AtomicInteger(0);
final int size = 50;
final HazelcastInstance keyOwner = nodeFactory.newHazelcastInstance();
warmUpPartitions(instance, keyOwner);
final String key = generateKeyOwnedBy(keyOwner);
final ILock lock = instance.getLock(key);
final ICondition condition = lock.newCondition(name);
final CountDownLatch awaitLatch = new CountDownLatch(size);
final CountDownLatch finalLatch = new CountDownLatch(size);
for (int i = 0; i < size; i++) {
new Thread(new Runnable() {
public void run() {
lock.lock();
try {
awaitLatch.countDown();
condition.await();
if (lock.isLockedByCurrentThread()) {
count.incrementAndGet();
}
} catch (InterruptedException ignored) {
} finally {
lock.unlock();
finalLatch.countDown();
}
}
}).start();
}
ILock lock1 = keyOwner.getLock(key);
ICondition condition1 = lock1.newCondition(name);
awaitLatch.await(1, TimeUnit.MINUTES);
lock1.lock();
condition1.signalAll();
lock1.unlock();
keyOwner.shutdown();
finalLatch.await(2, TimeUnit.MINUTES);
assertEquals(size, count.get());
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_concurrent_lock_ConditionTest.java |
654 | constructors[LIST_INDEX_OF] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new ListIndexOfOperation();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java |
496 | return scheduledExecutor.scheduleAtFixedRate(new Runnable() {
public void run() {
executeInternal(command);
}
}, initialDelay, period, unit); | 1no label
| hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientExecutionServiceImpl.java |
3,143 | engine.recover(new Engine.RecoveryHandler() {
@Override
public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
}
@Override
public void phase2(Translog.Snapshot snapshot) throws EngineException {
assertThat(snapshot.hasNext(), equalTo(true));
Translog.Create create = (Translog.Create) snapshot.next();
assertThat(create.source().toBytesArray(), equalTo(B_2));
assertThat(snapshot.hasNext(), equalTo(false));
}
@Override
public void phase3(Translog.Snapshot snapshot) throws EngineException {
MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
}
}); | 0true
| src_test_java_org_elasticsearch_index_engine_internal_InternalEngineTests.java |
66 | public interface OCloseable {
public void close();
} | 0true
| commons_src_main_java_com_orientechnologies_common_concur_resource_OCloseable.java |
331 | public interface ODatabase extends OBackupable, Closeable {
public static enum OPTIONS {
SECURITY
}
public static enum STATUS {
OPEN, CLOSED, IMPORTING
}
public static enum ATTRIBUTES {
TYPE, STATUS, DEFAULTCLUSTERID, DATEFORMAT, DATETIMEFORMAT, TIMEZONE, LOCALECOUNTRY, LOCALELANGUAGE, CHARSET, CUSTOM
}
/**
* Opens a database using the user and password received as arguments.
*
* @param iUserName
* Username to login
* @param iUserPassword
* Password associated to the user
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public <DB extends ODatabase> DB open(final String iUserName, final String iUserPassword);
/**
* Creates a new database.
*
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public <DB extends ODatabase> DB create();
/**
* Reloads the database information like the cluster list.
*/
public void reload();
/**
* Drops a database.
*
*/
public void drop();
/**
* Declares an intent to the database. Intents aim to optimize common use cases.
*
* @param iIntent
* The intent
*/
public boolean declareIntent(final OIntent iIntent);
/**
* Checks if the database exists.
*
* @return True if already exists, otherwise false.
*/
public boolean exists();
/**
* Closes an opened database.
*/
public void close();
/**
* Returns the current status of database.
*/
public STATUS getStatus();
/**
* Returns the total size of database as the real used space.
*/
public long getSize();
/**
* Returns the current status of database.
*/
public <DB extends ODatabase> DB setStatus(STATUS iStatus);
/**
* Returns the database name.
*
* @return Name of the database
*/
public String getName();
/**
* Returns the database URL.
*
* @return URL of the database
*/
public String getURL();
/**
* Returns the underlying storage implementation.
*
* @return The underlying storage implementation
* @see OStorage
*/
public OStorage getStorage();
/**
* Internal only: replace the storage with a new one.
*
* @param iNewStorage
* The new storage to use. Usually it's a wrapped instance of the current cluster.
*/
public void replaceStorage(OStorage iNewStorage);
/**
* Returns the level1 cache. Cannot be null.
*
* @return Current cache.
*/
public OLevel1RecordCache getLevel1Cache();
/**
* Returns the level1 cache. Cannot be null.
*
* @return Current cache.
*/
public OLevel2RecordCache getLevel2Cache();
/**
* Returns the data segment id by name.
*
* @param iDataSegmentName
* Data segment name
* @return The id of searched data segment.
*/
public int getDataSegmentIdByName(String iDataSegmentName);
public String getDataSegmentNameById(int dataSegmentId);
/**
* Returns the default cluster id. If not specified all the new entities will be stored in the default cluster.
*
* @return The default cluster id
*/
public int getDefaultClusterId();
/**
* Returns the number of clusters.
*
* @return Number of the clusters
*/
public int getClusters();
/**
* Returns true if the cluster exists, otherwise false.
*
* @param iClusterName
* Cluster name
* @return true if the cluster exists, otherwise false
*/
public boolean existsCluster(String iClusterName);
/**
* Returns all the names of the clusters.
*
* @return Collection of cluster names.
*/
public Collection<String> getClusterNames();
/**
* Returns the cluster id by name.
*
* @param iClusterName
* Cluster name
* @return The id of searched cluster.
*/
public int getClusterIdByName(String iClusterName);
/**
* Returns the cluster type.
*
* @param iClusterName
* Cluster name
* @return The cluster type as string
*/
public String getClusterType(String iClusterName);
/**
* Returns the cluster name by id.
*
* @param iClusterId
* Cluster id
* @return The name of searched cluster.
*/
public String getClusterNameById(int iClusterId);
/**
* Returns the total size of records contained in the cluster defined by its name.
*
* @param iClusterName
* Cluster name
* @return Total size of records contained.
*/
public long getClusterRecordSizeByName(String iClusterName);
/**
* Returns the total size of records contained in the cluster defined by its id.
*
* @param iClusterId
* Cluster id
* @return The name of searched cluster.
*/
public long getClusterRecordSizeById(int iClusterId);
/**
* Checks if the database is closed.
*
* @return true if is closed, otherwise false.
*/
public boolean isClosed();
/**
* Counts all the entities in the specified cluster id.
*
* @param iCurrentClusterId
* Cluster id
* @return Total number of entities contained in the specified cluster
*/
public long countClusterElements(int iCurrentClusterId);
public long countClusterElements(int iCurrentClusterId, boolean countTombstones);
/**
* Counts all the entities in the specified cluster ids.
*
* @param iClusterIds
* Array of cluster ids Cluster id
* @return Total number of entities contained in the specified clusters
*/
public long countClusterElements(int[] iClusterIds);
public long countClusterElements(int[] iClusterIds, boolean countTombstones);
/**
* Counts all the entities in the specified cluster name.
*
* @param iClusterName
* Cluster name
* @return Total number of entities contained in the specified cluster
*/
public long countClusterElements(String iClusterName);
/**
* Adds a new cluster.
*
* @param iClusterName
* Cluster name
* @param iType
* Cluster type between the defined ones
* @param iParameters
* Additional parameters to pass to the factories
* @return Cluster id
*/
public int addCluster(String iClusterName, CLUSTER_TYPE iType, Object... iParameters);
/**
* Adds a new cluster.
*
* @param iType
* Cluster type between the defined ones
* @param iClusterName
* Cluster name
* @param iDataSegmentName
* Data segment where to store record of this cluster. null means 'default'
* @param iParameters
* Additional parameters to pass to the factories
*
* @return Cluster id
*/
public int addCluster(String iType, String iClusterName, String iLocation, final String iDataSegmentName, Object... iParameters);
/**
* Adds a new cluster.
*
* @param iType
* Cluster type between the defined ones
* @param iClusterName
* Cluster name
* @param iRequestedId
* requested id of the cluster
* @param iDataSegmentName
* Data segment where to store record of this cluster. null means 'default'
* @param iParameters
* Additional parameters to pass to the factories
*
* @return Cluster id
*/
public int addCluster(String iType, String iClusterName, int iRequestedId, String iLocation, final String iDataSegmentName,
Object... iParameters);
/**
*
* Drops a cluster by its name. Physical clusters will be completely deleted
*
* @param iClusterName
* @return
*/
public boolean dropCluster(String iClusterName, final boolean iTruncate);
/**
* Drops a cluster by its id. Physical clusters will be completely deleted.
*
* @param iClusterId
* @return true if has been removed, otherwise false
*/
public boolean dropCluster(int iClusterId, final boolean iTruncate);
/**
* Adds a data segment where to store record content. Data segments contain the content of records. Cluster segments contain the
* pointer to them.
*/
public int addDataSegment(String iSegmentName, String iLocation);
/**
* Drop a data segment and all the contained data.
*
* @param name
* segment name
* @return true if the segment has been removed, otherwise false
*/
public boolean dropDataSegment(String name);
/**
* Sets a property value
*
* @param iName
* Property name
* @param iValue
* new value to set
* @return The previous value if any, otherwise null
*/
public Object setProperty(String iName, Object iValue);
/**
* Gets the property value.
*
* @param iName
* Property name
* @return The previous value if any, otherwise null
*/
public Object getProperty(String iName);
/**
* Returns an iterator of the property entries
*/
public Iterator<Map.Entry<String, Object>> getProperties();
/**
* Returns a database attribute value
*
* @param iAttribute
* Attributes between #ATTRIBUTES enum
* @return The attribute value
*/
public Object get(ATTRIBUTES iAttribute);
/**
* Sets a database attribute value
*
* @param iAttribute
* Attributes between #ATTRIBUTES enum
* @param iValue
* Value to set
* @return
*/
public <DB extends ODatabase> DB set(ATTRIBUTES iAttribute, Object iValue);
/**
* Registers a listener to the database events.
*
* @param iListener
*/
public void registerListener(ODatabaseListener iListener);
/**
* Unregisters a listener to the database events.
*
* @param iListener
*/
public void unregisterListener(ODatabaseListener iListener);
public <V> V callInLock(Callable<V> iCallable, boolean iExclusiveLock);
public <V> V callInRecordLock(Callable<V> iCallable, ORID rid, boolean iExclusiveLock);
public ORecordMetadata getRecordMetadata(final ORID rid);
/**
* Flush cached storage content to the disk.
*
* After this call users can perform only select queries. All write-related commands will queued till {@link #release()} command
* will be called.
*
* Given command waits till all on going modifications in indexes or DB will be finished.
*
* IMPORTANT: This command is not reentrant.
*/
public void freeze();
/**
* Allows to execute write-related commands on DB. Called after {@link #freeze()} command.
*/
public void release();
/**
* Flush cached storage content to the disk.
*
* After this call users can perform only select queries. All write-related commands will queued till {@link #release()} command
* will be called or exception will be thrown on attempt to modify DB data. Concrete behaviour depends on
* <code>throwException</code> parameter.
*
* IMPORTANT: This command is not reentrant.
*
* @param throwException
* If <code>true</code> {@link com.orientechnologies.common.concur.lock.OModificationOperationProhibitedException}
* exception will be thrown in case of write command will be performed.
*/
public void freeze(boolean throwException);
/**
* Flush cached cluster content to the disk.
*
* After this call users can perform only select queries. All write-related commands will queued till {@link #releaseCluster(int)}
* command will be called.
*
* Given command waits till all on going modifications in indexes or DB will be finished.
*
* IMPORTANT: This command is not reentrant.
*
* @param iClusterId
* that must be released
*/
public void freezeCluster(int iClusterId);
/**
* Allows to execute write-related commands on the cluster
*
* @param iClusterId
* that must be released
*/
public void releaseCluster(int iClusterId);
/**
* Flush cached cluster content to the disk.
*
* After this call users can perform only select queries. All write-related commands will queued till {@link #releaseCluster(int)}
* command will be called.
*
* Given command waits till all on going modifications in indexes or DB will be finished.
*
* IMPORTANT: This command is not reentrant.
*
* @param iClusterId
* that must be released
* @param throwException
* If <code>true</code> {@link com.orientechnologies.common.concur.lock.OModificationOperationProhibitedException}
* exception will be thrown in case of write command will be performed.
*/
public void freezeCluster(int iClusterId, boolean throwException);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_ODatabase.java |
1,533 | public class ThrottlingAllocationTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(ThrottlingAllocationTests.class);
@Test
public void testPrimaryRecoveryThrottling() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.node_concurrent_recoveries", 3)
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start one node, do reroute, only 3 should initialize");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(17));
logger.info("start initializing, another 3 should initialize");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(14));
logger.info("start initializing, another 3 should initialize");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(6));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(11));
logger.info("start initializing, another 1 should initialize");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(9));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
logger.info("start initializing, all primaries should be started");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
}
@Test
public void testReplicaAndPrimaryRecoveryThrottling() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 3)
.put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start one node, do reroute, only 3 should initialize");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(7));
logger.info("start initializing, another 2 should initialize");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
logger.info("start initializing, all primaries should be started");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
logger.info("start another node, replicas should start being allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
logger.info("start initializing replicas");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
logger.info("start initializing replicas, all should be started");
routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_ThrottlingAllocationTests.java |
88 | {
@Override
public void run()
{
assertEquals( 1, count( node.getRelationships() ) );
}
}; | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_ReadTransactionLogWritingTest.java |
267 | private final class EditorPartListener implements IPartListener {
@Override
public void partActivated(IWorkbenchPart part) {
if (part instanceof CeylonEditor) {
setUpActiveEditor((CeylonEditor) part);
if (documentProvider!=null) {
retrieveOccurrenceAnnotations();
if (!markingEnabled) {
unregisterListeners();
removeExistingOccurrenceAnnotations();
}
}
}
}
@Override
public void partClosed(IWorkbenchPart part) {
if (part == activeEditor) {
unregisterListeners();
activeEditor = null;
documentProvider = null;
document = null;
parseController = null;
occurrenceAnnotations = null;
DocumentationView documentationView = DocumentationView.getInstance();
if (documentationView!=null) {
documentationView.update(null, -1, -1);
}
}
}
@Override
public void partBroughtToTop(IWorkbenchPart part) { }
@Override
public void partDeactivated(IWorkbenchPart part) { }
@Override
public void partOpened(IWorkbenchPart part) { }
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_MarkOccurrencesAction.java |
949 | clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
@Override
public void postAdded() {
ClusterBlockException blockException = checkBlock(request, clusterService.state());
if (blockException == null || !blockException.retryable()) {
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
@Override
public void onClose() {
clusterService.remove(this);
listener.onFailure(blockException);
}
@Override
public void onTimeout(TimeValue timeout) {
clusterService.remove(this);
listener.onFailure(blockException);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
ClusterBlockException blockException = checkBlock(request, event.state());
if (blockException == null || !blockException.retryable()) {
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_support_master_TransportMasterNodeOperationAction.java |
368 | return new RecordIterator<Entry>() {
private final Iterator<Map.Entry<byte[], NavigableMap<Long, byte[]>>> kv = currentRow.getMap().get(columnFamilyBytes).entrySet().iterator();
@Override
public boolean hasNext() {
ensureOpen();
return kv.hasNext();
}
@Override
public Entry next() {
ensureOpen();
return StaticArrayEntry.ofBytes(kv.next(), entryGetter);
}
@Override
public void close() {
isClosed = true;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}; | 0true
| titan-hbase-parent_titan-hbase-core_src_main_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseKeyColumnValueStore.java |
401 | @SuppressWarnings({ "serial" })
public class ORecordTrackedList extends OTrackedList<OIdentifiable> {
public ORecordTrackedList(final ORecordInternal<?> iSourceRecord) {
super(iSourceRecord);
}
public Iterator<OIdentifiable> rawIterator() {
return iterator();
}
/**
* The item's identity does not affect nothing.
*/
public void onBeforeIdentityChanged(final ORID iRID) {
}
/**
* The item's identity does not affect nothing.
*/
public void onAfterIdentityChanged(final ORecord<?> iRecord) {
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordTrackedList.java |
610 | workThreads[threadNum] = new Thread(new Runnable() {
public void run() {
while (doingWork.get()) {
final HazelcastInstance hz = instances[threadNum];
Set<Member> members = new HashSet<Member>(hz.getCluster().getMembers());
members.remove(hz.getCluster().getLocalMember());
final Map<Member, Future<String>> futures = hz.getExecutorService("test")
.submitToMembers(new PingCallable(), members);
for (Future<String> f : futures.values()) {
try {
f.get();
} catch (MemberLeftException ignored) {
} catch (Exception e) {
e.printStackTrace();
}
}
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_cluster_MemberListTest.java |
277 | @RunWith(HazelcastSerialClassRunner.class)
@Category(SlowTest.class)
public class ClientExecutionPoolSizeLowTest {
static final int COUNT = 1000;
static HazelcastInstance server1;
static HazelcastInstance server2;
static HazelcastInstance client;
static IMap map;
@Before
public void init() {
Config config = new Config();
server1 = Hazelcast.newHazelcastInstance(config);
ClientConfig clientConfig = new ClientConfig();
clientConfig.setExecutorPoolSize(1);
clientConfig.getNetworkConfig().setRedoOperation(true);
client = HazelcastClient.newHazelcastClient(clientConfig);
server2 = Hazelcast.newHazelcastInstance(config);
map = client.getMap(randomString());
}
@After
public void destroy() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testNodeTerminate() throws InterruptedException, ExecutionException {
for (int i = 0; i < COUNT; i++) {
map.put(i, i);
if (i == COUNT / 2) {
server2.getLifecycleService().terminate();
}
}
assertEquals(COUNT, map.size());
}
@Test
public void testOwnerNodeTerminate() throws InterruptedException, ExecutionException {
for (int i = 0; i < COUNT; i++) {
map.put(i, i);
if (i == COUNT / 2) {
server1.getLifecycleService().terminate();
}
}
assertEquals(COUNT, map.size());
}
@Test
public void testNodeTerminateWithAsyncOperations() throws InterruptedException, ExecutionException {
for (int i = 0; i < COUNT; i++) {
map.putAsync(i, i);
if (i == COUNT / 2) {
server2.getLifecycleService().terminate();
}
}
assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(COUNT, map.size());
}
});
}
@Test
public void testOwnerNodeTerminateWithAsyncOperations() throws InterruptedException, ExecutionException {
for (int i = 0; i < COUNT; i++) {
map.putAsync(i, i);
if (i == COUNT / 2) {
server1.getLifecycleService().terminate();
}
}
assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(COUNT, map.size());
}
});
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_io_ClientExecutionPoolSizeLowTest.java |
1,829 | public static class DummyObject implements Serializable {
} | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapInstanceSharingTest.java |
1,468 | public class OSQLFunctionDijkstra extends OSQLFunctionPathFinder<Float> {
public static final String NAME = "dijkstra";
private static final Float MIN = new Float(0f);
private String paramWeightFieldName;
public OSQLFunctionDijkstra() {
super(NAME, 3, 4);
}
public Object execute(OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) {
final OrientBaseGraph graph = OGraphCommandExecutorSQLFactory.getGraph();
final ORecordInternal<?> record = (ORecordInternal<?>) (iCurrentRecord != null ? iCurrentRecord.getRecord() : null);
Object source = iParameters[0];
if (OMultiValue.isMultiValue(source)) {
if (OMultiValue.getSize(source) > 1)
throw new IllegalArgumentException("Only one sourceVertex is allowed");
source = OMultiValue.getFirstValue(source);
}
paramSourceVertex = graph.getVertex((OIdentifiable) OSQLHelper.getValue(source, record, iContext));
Object dest = iParameters[1];
if (OMultiValue.isMultiValue(dest)) {
if (OMultiValue.getSize(dest) > 1)
throw new IllegalArgumentException("Only one destinationVertex is allowed");
dest = OMultiValue.getFirstValue(dest);
}
paramDestinationVertex = graph.getVertex((OIdentifiable) OSQLHelper.getValue(dest, record, iContext));
paramWeightFieldName = (String) OSQLHelper.getValue(iParameters[2], record, iContext);
if (iParameters.length > 3)
paramDirection = Direction.valueOf(iParameters[3].toString().toUpperCase());
return super.execute(iParameters, iContext);
}
public String getSyntax() {
return "Syntax error: dijkstra(<sourceVertex>, <destinationVertex>, <weightEdgeFieldName>, [<direction>])";
}
@Override
protected Float getShortestDistance(final Vertex destination) {
if (destination == null)
return Float.MAX_VALUE;
final Float d = distance.get(destination);
return d == null ? Float.MAX_VALUE : d;
}
@Override
protected Float getMinimumDistance() {
return MIN;
}
protected Float getDistance(final Vertex node, final Vertex target) {
final Iterator<Edge> edges = ((OrientVertex) node).getEdges((OrientVertex) target, paramDirection).iterator();
if (edges.hasNext()) {
final Edge e = edges.next();
if (e != null) {
final Object fieldValue = e.getProperty(paramWeightFieldName);
if (fieldValue != null)
if (fieldValue instanceof Float)
return (Float) fieldValue;
else if (fieldValue instanceof Number)
return ((Number) fieldValue).floatValue();
}
}
return MIN;
}
@Override
protected Float sumDistances(final Float iDistance1, final Float iDistance2) {
return iDistance1.floatValue() + iDistance2.floatValue();
}
} | 1no label
| graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionDijkstra.java |
830 | return new DataSerializableFactory() {
@Override
public IdentifiedDataSerializable create(int typeId) {
switch (typeId) {
case ALTER_AND_GET:
return new AlterAndGetOperation();
case ALTER:
return new AlterOperation();
case APPLY:
return new ApplyOperation();
case COMPARE_AND_SET:
return new CompareAndSetOperation();
case CONTAINS:
return new ContainsOperation();
case GET_AND_ALTER:
return new GetAndAlterOperation();
case GET_AND_SET:
return new GetAndSetOperation();
case GET:
return new GetOperation();
case IS_NULL:
return new IsNullOperation();
case SET_AND_GET:
return new SetAndGetOperation();
case SET_BACKUP:
return new SetBackupOperation();
case SET:
return new SetOperation();
case REPLICATION:
return new AtomicReferenceReplicationOperation();
default:
return null;
}
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_atomicreference_AtomicReferenceDataSerializerHook.java |
1,832 | @Target({METHOD, CONSTRUCTOR, FIELD})
@Retention(RUNTIME)
@Documented
public @interface Inject {
/**
* If true, and the appropriate binding is not found,
* the Injector will skip injection of this method or field rather than
* produce an error. When applied to a field, any default value already
* assigned to the field will remain (guice will not actively null out the
* field). When applied to a method, the method will only be invoked if
* bindings for <i>all</i> parameters are found. When applied to a
* constructor, an error will result upon Injector creation.
*/
boolean optional() default false;
} | 0true
| src_main_java_org_elasticsearch_common_inject_Inject.java |
888 | threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
Tuple<String, Long>[] context1 = scrollId.getContext();
for (int i = 0; i < context1.length; i++) {
Tuple<String, Long> target = context1[i];
DiscoveryNode node = nodes.get(target.v1());
if (node != null && nodes.localNodeId().equals(node.id())) {
executeQueryPhase(i, counter, node, target.v2());
}
}
}
}); | 1no label
| src_main_java_org_elasticsearch_action_search_type_TransportSearchScrollQueryThenFetchAction.java |
501 | private class RefreshTask implements Runnable {
public void run() {
if (updating.compareAndSet(false, true)) {
try {
final ClientClusterService clusterService = client.getClientClusterService();
final Address master = clusterService.getMasterAddress();
final PartitionsResponse response = getPartitionsFrom(master);
if (response != null) {
processPartitionResponse(response);
}
} catch (HazelcastInstanceNotActiveException ignored) {
} finally {
updating.set(false);
}
}
}
} | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientPartitionServiceImpl.java |
1,374 | public class OTransactionOptimistic extends OTransactionRealAbstract {
private static final boolean useSBTree = OGlobalConfiguration.INDEX_USE_SBTREE_BY_DEFAULT.getValueAsBoolean();
private boolean usingLog;
private static AtomicInteger txSerial = new AtomicInteger();
private int autoRetries = OGlobalConfiguration.TX_AUTO_RETRY.getValueAsInteger();
public OTransactionOptimistic(final ODatabaseRecordTx iDatabase) {
super(iDatabase, txSerial.incrementAndGet());
usingLog = OGlobalConfiguration.TX_USE_LOG.getValueAsBoolean();
}
public void begin() {
status = TXSTATUS.BEGUN;
}
public void commit() {
checkTransaction();
status = TXSTATUS.COMMITTING;
if (OScenarioThreadLocal.INSTANCE.get() != RUN_MODE.RUNNING_DISTRIBUTED && !(database.getStorage() instanceof OStorageEmbedded))
database.getStorage().commit(this, null);
else {
final List<String> involvedIndexes = getInvolvedIndexes();
if (involvedIndexes != null)
Collections.sort(involvedIndexes);
for (int retry = 1; retry <= autoRetries; ++retry) {
try {
// LOCK INVOLVED INDEXES
List<OIndexAbstract<?>> lockedIndexes = null;
try {
if (involvedIndexes != null)
for (String indexName : involvedIndexes) {
final OIndexAbstract<?> index = (OIndexAbstract<?>) database.getMetadata().getIndexManager()
.getIndexInternal(indexName);
if (lockedIndexes == null)
lockedIndexes = new ArrayList<OIndexAbstract<?>>();
index.acquireModificationLock();
lockedIndexes.add(index);
}
if (!useSBTree) {
// SEARCH FOR INDEX BASED ON DOCUMENT TOUCHED
final Collection<? extends OIndex<?>> indexes = database.getMetadata().getIndexManager().getIndexes();
List<? extends OIndex<?>> indexesToLock = null;
if (indexes != null) {
indexesToLock = new ArrayList<OIndex<?>>(indexes);
Collections.sort(indexesToLock, new Comparator<OIndex<?>>() {
public int compare(final OIndex<?> indexOne, final OIndex<?> indexTwo) {
return indexOne.getName().compareTo(indexTwo.getName());
}
});
}
if (indexesToLock != null && !indexesToLock.isEmpty()) {
if (lockedIndexes == null)
lockedIndexes = new ArrayList<OIndexAbstract<?>>();
for (OIndex<?> index : indexesToLock) {
for (Entry<ORID, ORecordOperation> entry : recordEntries.entrySet()) {
final ORecord<?> record = entry.getValue().record.getRecord();
if (record instanceof ODocument) {
ODocument doc = (ODocument) record;
if (!lockedIndexes.contains(index.getInternal()) && doc.getSchemaClass() != null
&& index.getDefinition() != null
&& doc.getSchemaClass().isSubClassOf(index.getDefinition().getClassName())) {
index.getInternal().acquireModificationLock();
lockedIndexes.add((OIndexAbstract<?>) index.getInternal());
}
}
}
}
for (OIndexAbstract<?> index : lockedIndexes)
index.acquireExclusiveLock();
}
}
final Map<String, OIndex> indexes = new HashMap<String, OIndex>();
for (OIndex index : database.getMetadata().getIndexManager().getIndexes())
indexes.put(index.getName(), index);
final Runnable callback = new Runnable() {
@Override
public void run() {
final ODocument indexEntries = getIndexChanges();
if (indexEntries != null) {
final Map<String, OIndexInternal<?>> indexesToCommit = new HashMap<String, OIndexInternal<?>>();
for (Entry<String, Object> indexEntry : indexEntries) {
final OIndexInternal<?> index = indexes.get(indexEntry.getKey()).getInternal();
indexesToCommit.put(index.getName(), index.getInternal());
}
for (OIndexInternal<?> indexInternal : indexesToCommit.values())
indexInternal.preCommit();
for (Entry<String, Object> indexEntry : indexEntries) {
final OIndexInternal<?> index = indexesToCommit.get(indexEntry.getKey()).getInternal();
if (index == null) {
OLogManager.instance().error(this, "Index with name " + indexEntry.getKey() + " was not found.");
throw new OIndexException("Index with name " + indexEntry.getKey() + " was not found.");
} else
index.addTxOperation((ODocument) indexEntry.getValue());
}
try {
for (OIndexInternal<?> indexInternal : indexesToCommit.values())
indexInternal.commit();
} finally {
for (OIndexInternal<?> indexInternal : indexesToCommit.values())
indexInternal.postCommit();
}
}
}
};
final String storageType = database.getStorage().getType();
if (storageType.equals(OEngineLocal.NAME) || storageType.equals(OEngineLocalPaginated.NAME))
database.getStorage().commit(OTransactionOptimistic.this, callback);
else {
database.getStorage().callInLock(new Callable<Object>() {
@Override
public Object call() throws Exception {
database.getStorage().commit(OTransactionOptimistic.this, null);
callback.run();
return null;
}
}, true);
}
// OK
break;
} finally {
// RELEASE INDEX LOCKS IF ANY
if (lockedIndexes != null) {
if (!useSBTree) {
for (OIndexAbstract<?> index : lockedIndexes)
index.releaseExclusiveLock();
}
for (OIndexAbstract<?> index : lockedIndexes)
index.releaseModificationLock();
}
}
} catch (OTimeoutException e) {
if (autoRetries == 0) {
OLogManager.instance().debug(this, "Caught timeout exception during commit, but no automatic retry has been set", e);
throw e;
} else if (retry == autoRetries) {
OLogManager.instance().debug(this, "Caught timeout exception during %d/%d. Retry limit is exceeded.", retry,
autoRetries);
throw e;
} else {
OLogManager.instance().debug(this, "Caught timeout exception during commit retrying %d/%d...", retry, autoRetries);
}
}
}
}
}
public void rollback() {
checkTransaction();
status = TXSTATUS.ROLLBACKING;
database.getStorage().callInLock(new Callable<Void>() {
public Void call() throws Exception {
database.getStorage().rollback(OTransactionOptimistic.this);
return null;
}
}, true);
// CLEAR THE CACHE MOVING GOOD RECORDS TO LEVEL-2 CACHE
database.getLevel1Cache().clear();
// REMOVE ALL THE ENTRIES AND INVALIDATE THE DOCUMENTS TO AVOID TO BE RE-USED DIRTY AT USER-LEVEL. IN THIS WAY RE-LOADING MUST
// EXECUTED
for (ORecordOperation v : recordEntries.values())
v.getRecord().unload();
for (ORecordOperation v : allEntries.values())
v.getRecord().unload();
indexEntries.clear();
}
public ORecordInternal<?> loadRecord(final ORID iRid, final ORecordInternal<?> iRecord, final String iFetchPlan,
boolean ignoreCache, boolean loadTombstone) {
checkTransaction();
final ORecordInternal<?> txRecord = getRecord(iRid);
if (txRecord == OTransactionRealAbstract.DELETED_RECORD)
// DELETED IN TX
return null;
if (txRecord != null) {
if (iRecord != null && txRecord != iRecord)
OLogManager.instance().warn(
this,
"Found record in transaction with the same RID %s but different instance. "
+ "Probably the record has been loaded from another transaction and reused on the current one: reload it "
+ "from current transaction before to update or delete it", iRecord.getIdentity());
return txRecord;
}
if (iRid.isTemporary())
return null;
// DELEGATE TO THE STORAGE, NO TOMBSTONES SUPPORT IN TX MODE
final ORecordInternal<?> record = database.executeReadRecord((ORecordId) iRid, iRecord, iFetchPlan, ignoreCache, false);
if (record != null)
addRecord(record, ORecordOperation.LOADED, null);
return record;
}
public void deleteRecord(final ORecordInternal<?> iRecord, final OPERATION_MODE iMode) {
if (!iRecord.getIdentity().isValid())
return;
addRecord(iRecord, ORecordOperation.DELETED, null);
}
public void saveRecord(final ORecordInternal<?> iRecord, final String iClusterName, final OPERATION_MODE iMode,
boolean iForceCreate, final ORecordCallback<? extends Number> iRecordCreatedCallback,
ORecordCallback<ORecordVersion> iRecordUpdatedCallback) {
if (iRecord == null)
return;
final byte operation = iForceCreate ? ORecordOperation.CREATED : iRecord.getIdentity().isValid() ? ORecordOperation.UPDATED
: ORecordOperation.CREATED;
addRecord(iRecord, operation, iClusterName);
}
protected void addRecord(final ORecordInternal<?> iRecord, final byte iStatus, final String iClusterName) {
checkTransaction();
switch (iStatus) {
case ORecordOperation.CREATED:
database.callbackHooks(TYPE.BEFORE_CREATE, iRecord);
break;
case ORecordOperation.LOADED:
/**
* Read hooks already invoked in {@link com.orientechnologies.orient.core.db.record.ODatabaseRecordAbstract#executeReadRecord}
* .
*/
break;
case ORecordOperation.UPDATED:
database.callbackHooks(TYPE.BEFORE_UPDATE, iRecord);
break;
case ORecordOperation.DELETED:
database.callbackHooks(TYPE.BEFORE_DELETE, iRecord);
break;
}
try {
if (iRecord.getIdentity().isTemporary())
temp2persistent.put(iRecord.getIdentity().copy(), iRecord);
if ((status == OTransaction.TXSTATUS.COMMITTING) && database.getStorage() instanceof OStorageEmbedded) {
// I'M COMMITTING: BYPASS LOCAL BUFFER
switch (iStatus) {
case ORecordOperation.CREATED:
case ORecordOperation.UPDATED:
final ORID oldRid = iRecord.getIdentity().copy();
database.executeSaveRecord(iRecord, iClusterName, iRecord.getRecordVersion(), iRecord.getRecordType(), false,
OPERATION_MODE.SYNCHRONOUS, false, null, null);
updateIdentityAfterCommit(oldRid, iRecord.getIdentity());
break;
case ORecordOperation.DELETED:
database.executeDeleteRecord(iRecord, iRecord.getRecordVersion(), false, false, OPERATION_MODE.SYNCHRONOUS, false);
break;
}
final ORecordOperation txRecord = getRecordEntry(iRecord.getIdentity());
if (txRecord == null) {
// NOT IN TX, SAVE IT ANYWAY
allEntries.put(iRecord.getIdentity(), new ORecordOperation(iRecord, iStatus));
} else if (txRecord.record != iRecord) {
// UPDATE LOCAL RECORDS TO AVOID MISMATCH OF VERSION/CONTENT
final String clusterName = getDatabase().getClusterNameById(iRecord.getIdentity().getClusterId());
if (!clusterName.equals(OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME)
&& !clusterName.equals(OMetadataDefault.CLUSTER_INDEX_NAME))
OLogManager
.instance()
.warn(
this,
"Found record in transaction with the same RID %s but different instance. Probably the record has been loaded from another transaction and reused on the current one: reload it from current transaction before to update or delete it",
iRecord.getIdentity());
txRecord.record = iRecord;
txRecord.type = iStatus;
}
} else {
final ORecordId rid = (ORecordId) iRecord.getIdentity();
if (!rid.isValid()) {
iRecord.onBeforeIdentityChanged(rid);
// ASSIGN A UNIQUE SERIAL TEMPORARY ID
if (rid.clusterId == ORID.CLUSTER_ID_INVALID)
rid.clusterId = iClusterName != null ? database.getClusterIdByName(iClusterName) : database.getDefaultClusterId();
rid.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(newObjectCounter--);
iRecord.onAfterIdentityChanged(iRecord);
} else
// REMOVE FROM THE DB'S CACHE
database.getLevel1Cache().freeRecord(rid);
ORecordOperation txEntry = getRecordEntry(rid);
if (txEntry == null) {
if (!(rid.isTemporary() && iStatus != ORecordOperation.CREATED)) {
// NEW ENTRY: JUST REGISTER IT
txEntry = new ORecordOperation(iRecord, iStatus);
recordEntries.put(rid, txEntry);
}
} else {
// UPDATE PREVIOUS STATUS
txEntry.record = iRecord;
switch (txEntry.type) {
case ORecordOperation.LOADED:
switch (iStatus) {
case ORecordOperation.UPDATED:
txEntry.type = ORecordOperation.UPDATED;
break;
case ORecordOperation.DELETED:
txEntry.type = ORecordOperation.DELETED;
break;
}
break;
case ORecordOperation.UPDATED:
switch (iStatus) {
case ORecordOperation.DELETED:
txEntry.type = ORecordOperation.DELETED;
break;
}
break;
case ORecordOperation.DELETED:
break;
case ORecordOperation.CREATED:
switch (iStatus) {
case ORecordOperation.DELETED:
recordEntries.remove(rid);
break;
}
break;
}
}
}
switch (iStatus) {
case ORecordOperation.CREATED:
database.callbackHooks(TYPE.AFTER_CREATE, iRecord);
break;
case ORecordOperation.LOADED:
/**
* Read hooks already invoked in
* {@link com.orientechnologies.orient.core.db.record.ODatabaseRecordAbstract#executeReadRecord}.
*/
break;
case ORecordOperation.UPDATED:
database.callbackHooks(TYPE.AFTER_UPDATE, iRecord);
break;
case ORecordOperation.DELETED:
database.callbackHooks(TYPE.AFTER_DELETE, iRecord);
break;
}
} catch (Throwable t) {
switch (iStatus) {
case ORecordOperation.CREATED:
database.callbackHooks(TYPE.CREATE_FAILED, iRecord);
break;
case ORecordOperation.UPDATED:
database.callbackHooks(TYPE.UPDATE_FAILED, iRecord);
break;
case ORecordOperation.DELETED:
database.callbackHooks(TYPE.DELETE_FAILED, iRecord);
break;
}
if (t instanceof RuntimeException)
throw (RuntimeException) t;
else
throw new ODatabaseException("Error on saving record " + iRecord.getIdentity(), t);
}
}
@Override
public boolean updateReplica(ORecordInternal<?> iRecord) {
throw new UnsupportedOperationException("updateReplica()");
}
@Override
public String toString() {
return "OTransactionOptimistic [id=" + id + ", status=" + status + ", recEntries=" + recordEntries.size() + ", idxEntries="
+ indexEntries.size() + ']';
}
public boolean isUsingLog() {
return usingLog;
}
public void setUsingLog(final boolean useLog) {
this.usingLog = useLog;
}
public int getAutoRetries() {
return autoRetries;
}
public void setAutoRetries(final int autoRetries) {
this.autoRetries = autoRetries;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionOptimistic.java |
1,125 | public class XmlConfigBuilder extends AbstractXmlConfigHelper implements ConfigBuilder {
private final static ILogger logger = Logger.getLogger(XmlConfigBuilder.class);
private Config config;
private InputStream in;
private File configurationFile;
private URL configurationUrl;
private Properties properties = System.getProperties();
boolean usingSystemConfig = false;
/**
* Constructs a XmlConfigBuilder that reads from the provided file.
*
* @param xmlFileName the name of the XML file
* @throws FileNotFoundException if the file can't be found.
*/
public XmlConfigBuilder(String xmlFileName) throws FileNotFoundException {
this(new FileInputStream(xmlFileName));
}
/**
* Constructs a XmlConfigBuilder that reads from the given InputStream.
*
* @param inputStream the InputStream containing the XML configuration.
* @throws IllegalArgumentException if inputStream is null.
*/
public XmlConfigBuilder(InputStream inputStream) {
if (inputStream == null) {
throw new IllegalArgumentException("inputStream can't be null");
}
this.in = inputStream;
}
/**
* Constructs a XmlConfigBuilder that tries to find a usable XML configuration file.
*/
public XmlConfigBuilder() {
String configFile = System.getProperty("hazelcast.config");
try {
if (configFile != null) {
configurationFile = new File(configFile);
logger.info("Using configuration file at " + configurationFile.getAbsolutePath());
if (!configurationFile.exists()) {
String msg = "Config file at '" + configurationFile.getAbsolutePath() + "' doesn't exist.";
msg += "\nHazelcast will try to use the hazelcast.xml config file in the working directory.";
logger.warning(msg);
configurationFile = null;
}
}
if (configurationFile == null) {
configFile = "hazelcast.xml";
configurationFile = new File("hazelcast.xml");
if (!configurationFile.exists()) {
configurationFile = null;
}
}
if (configurationFile != null) {
logger.info("Using configuration file at " + configurationFile.getAbsolutePath());
try {
in = new FileInputStream(configurationFile);
configurationUrl = configurationFile.toURI().toURL();
usingSystemConfig = true;
} catch (final Exception e) {
String msg = "Having problem reading config file at '" + configFile + "'.";
msg += "\nException message: " + e.getMessage();
msg += "\nHazelcast will try to use the hazelcast.xml config file in classpath.";
logger.warning(msg);
in = null;
}
}
if (in == null) {
logger.info("Looking for hazelcast.xml config file in classpath.");
configurationUrl = Config.class.getClassLoader().getResource("hazelcast.xml");
if (configurationUrl == null) {
configurationUrl = Config.class.getClassLoader().getResource("hazelcast-default.xml");
logger.warning(
"Could not find hazelcast.xml in classpath.\nHazelcast will use hazelcast-default.xml config file in jar.");
if (configurationUrl == null) {
logger.warning("Could not find hazelcast-default.xml in the classpath!"
+ "\nThis may be due to a wrong-packaged or corrupted jar file.");
return;
}
}
logger.info("Using configuration file " + configurationUrl.getFile() + " in the classpath.");
in = configurationUrl.openStream();
if (in == null) {
String msg = "Having problem reading config file hazelcast-default.xml in the classpath.";
msg += "\nHazelcast will start with default configuration.";
logger.warning(msg);
}
}
} catch (final Throwable e) {
logger.severe("Error while creating configuration:" + e.getMessage(), e);
}
}
/**
* Gets the current used properties. Can be null if no properties are set.
*
* @return the used properties.
* @see #setProperties(java.util.Properties)
*/
public Properties getProperties() {
return properties;
}
/**
* Sets the used properties. Can be null if no properties should be used.
* <p/>
* Properties are used to resolve ${variable} occurrences in the XML file.
*
* @param properties the new properties.
* @return the XmlConfigBuilder
*/
public XmlConfigBuilder setProperties(Properties properties) {
this.properties = properties;
return this;
}
public Config build() {
Config config = new Config();
config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
return build(config);
}
Config build(Config config) {
try {
parse(config);
} catch (Exception e) {
throw new HazelcastException(e);
}
config.setConfigurationFile(configurationFile);
config.setConfigurationUrl(configurationUrl);
return config;
}
private void parse(final Config config) throws Exception {
this.config = config;
final DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
Document doc;
try {
doc = builder.parse(in);
} catch (final Exception e) {
String msgPart = "config file '" + config.getConfigurationFile() + "' set as a system property.";
if (!usingSystemConfig) {
msgPart = "hazelcast-default.xml config file in the classpath.";
}
String msg = "Having problem parsing the " + msgPart;
msg += "\nException: " + e.getMessage();
msg += "\nHazelcast startup interrupted.";
logger.severe(msg);
throw e;
} finally {
IOUtil.closeResource(in);
}
Element element = doc.getDocumentElement();
try {
element.getTextContent();
} catch (final Throwable e) {
domLevel3 = false;
}
preprocess(element);
handleConfig(element);
}
private void preprocess(Node root) {
NamedNodeMap attributes = root.getAttributes();
if (attributes != null) {
for (int k = 0; k < attributes.getLength(); k++) {
Node attribute = attributes.item(k);
replaceVariables(attribute);
}
}
if (root.getNodeValue() != null) {
replaceVariables(root);
}
final NodeList childNodes = root.getChildNodes();
for (int k = 0; k < childNodes.getLength(); k++) {
Node child = childNodes.item(k);
preprocess(child);
}
}
private void replaceVariables(Node node) {
String value = node.getNodeValue();
StringBuilder sb = new StringBuilder();
int endIndex = -1;
int startIndex = value.indexOf("${");
while (startIndex > -1) {
endIndex = value.indexOf('}', startIndex);
if (endIndex == -1) {
logger.warning("Bad variable syntax. Could not find a closing curly bracket '}' on node: " + node.getLocalName());
break;
}
String variable = value.substring(startIndex + 2, endIndex);
String variableReplacement = properties.getProperty(variable);
if (variableReplacement != null) {
sb.append(variableReplacement);
} else {
sb.append(value.substring(startIndex, endIndex + 1));
logger.warning("Could not find a value for property '" + variable + "' on node: " + node.getLocalName());
}
startIndex = value.indexOf("${", endIndex);
}
sb.append(value.substring(endIndex + 1));
node.setNodeValue(sb.toString());
}
private void handleConfig(final Element docElement) throws Exception {
for (org.w3c.dom.Node node : new IterableNodeList(docElement.getChildNodes())) {
final String nodeName = cleanNodeName(node.getNodeName());
if ("network".equals(nodeName)) {
handleNetwork(node);
} else if ("group".equals(nodeName)) {
handleGroup(node);
} else if ("properties".equals(nodeName)) {
fillProperties(node, config.getProperties());
} else if ("wan-replication".equals(nodeName)) {
handleWanReplication(node);
} else if ("executor-service".equals(nodeName)) {
handleExecutor(node);
} else if ("services".equals(nodeName)) {
handleServices(node);
} else if ("queue".equals(nodeName)) {
handleQueue(node);
} else if ("map".equals(nodeName)) {
handleMap(node);
} else if ("multimap".equals(nodeName)) {
handleMultiMap(node);
} else if ("replicatedmap".equals(nodeName)) {
handleReplicatedMap(node);
} else if ("list".equals(nodeName)) {
handleList(node);
} else if ("set".equals(nodeName)) {
handleSet(node);
} else if ("topic".equals(nodeName)) {
handleTopic(node);
} else if ("jobtracker".equals(nodeName)) {
handleJobTracker(node);
} else if ("semaphore".equals(nodeName)) {
handleSemaphore(node);
} else if ("listeners".equals(nodeName)) {
handleListeners(node);
} else if ("partition-group".equals(nodeName)) {
handlePartitionGroup(node);
} else if ("serialization".equals(nodeName)) {
handleSerialization(node);
} else if ("security".equals(nodeName)) {
handleSecurity(node);
} else if ("license-key".equals(nodeName)) {
config.setLicenseKey(getTextContent(node));
} else if ("management-center".equals(nodeName)) {
handleManagementCenterConfig(node);
}
}
}
private void handleServices(final Node node) {
final Node attDefaults = node.getAttributes().getNamedItem("enable-defaults");
final boolean enableDefaults = attDefaults == null || checkTrue(getTextContent(attDefaults));
ServicesConfig servicesConfig = config.getServicesConfig();
servicesConfig.setEnableDefaults(enableDefaults);
for (Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("service".equals(nodeName)) {
ServiceConfig serviceConfig = new ServiceConfig();
String enabledValue = getAttribute(child, "enabled");
boolean enabled = checkTrue(enabledValue);
serviceConfig.setEnabled(enabled);
for (org.w3c.dom.Node n : new IterableNodeList(child.getChildNodes())) {
final String value = cleanNodeName(n.getNodeName());
if ("name".equals(value)) {
String name = getTextContent(n);
serviceConfig.setName(name);
} else if ("class-name".equals(value)) {
String className = getTextContent(n);
serviceConfig.setClassName(className);
} else if ("properties".equals(value)) {
fillProperties(n, serviceConfig.getProperties());
} else if ("configuration".equals(value)) {
Node parserNode = n.getAttributes().getNamedItem("parser");
String parserClass;
if (parserNode == null || (parserClass = getTextContent(parserNode)) == null) {
throw new IllegalArgumentException("Parser is required!");
}
try {
ServiceConfigurationParser parser = ClassLoaderUtil.newInstance(config.getClassLoader(), parserClass);
Object obj = parser.parse((Element) n);
serviceConfig.setConfigObject(obj);
} catch (Exception e) {
ExceptionUtil.sneakyThrow(e);
}
}
}
servicesConfig.addServiceConfig(serviceConfig);
}
}
}
private void handleWanReplication(final Node node) throws Exception {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final WanReplicationConfig wanReplicationConfig = new WanReplicationConfig();
wanReplicationConfig.setName(name);
for (org.w3c.dom.Node nodeTarget : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(nodeTarget.getNodeName());
if ("target-cluster".equals(nodeName)) {
WanTargetClusterConfig wanTarget = new WanTargetClusterConfig();
String groupName = getAttribute(nodeTarget, "group-name");
String groupPassword = getAttribute(nodeTarget, "group-password");
if (groupName != null) {
wanTarget.setGroupName(groupName);
}
if (groupPassword != null) {
wanTarget.setGroupPassword(groupPassword);
}
for (org.w3c.dom.Node targetChild : new IterableNodeList(nodeTarget.getChildNodes())) {
final String targetChildName = cleanNodeName(targetChild.getNodeName());
if ("replication-impl".equals(targetChildName)) {
wanTarget.setReplicationImpl(getTextContent(targetChild));
} else if ("end-points".equals(targetChildName)) {
for (org.w3c.dom.Node address : new IterableNodeList(targetChild.getChildNodes())) {
final String addressNodeName = cleanNodeName(address.getNodeName());
if ("address".equals(addressNodeName)) {
String addressStr = getTextContent(address);
wanTarget.addEndpoint(addressStr);
}
}
}
}
wanReplicationConfig.addTargetClusterConfig(wanTarget);
}
}
config.addWanReplicationConfig(wanReplicationConfig);
}
private void handleNetwork(final org.w3c.dom.Node node) throws Exception {
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("port".equals(nodeName)) {
handlePort(child);
} else if ("outbound-ports".equals(nodeName)) {
handleOutboundPorts(child);
} else if ("public-address".equals(nodeName)) {
final String address = getTextContent(child);
config.getNetworkConfig().setPublicAddress(address);
} else if ("join".equals(nodeName)) {
handleJoin(child);
} else if ("interfaces".equals(nodeName)) {
handleInterfaces(child);
} else if ("symmetric-encryption".equals(nodeName)) {
handleViaReflection(child, config.getNetworkConfig(), new SymmetricEncryptionConfig());
} else if ("ssl".equals(nodeName)) {
handleSSLConfig(child);
} else if ("socket-interceptor".equals(nodeName)) {
handleSocketInterceptorConfig(child);
}
}
}
private void handleExecutor(final org.w3c.dom.Node node) throws Exception {
final ExecutorConfig executorConfig = new ExecutorConfig();
handleViaReflection(node, config, executorConfig);
}
private void handleGroup(final org.w3c.dom.Node node) {
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String value = getTextContent(n).trim();
final String nodeName = cleanNodeName(n.getNodeName());
if ("name".equals(nodeName)) {
config.getGroupConfig().setName(value);
} else if ("password".equals(nodeName)) {
config.getGroupConfig().setPassword(value);
}
}
}
private void handleInterfaces(final org.w3c.dom.Node node) {
final NamedNodeMap atts = node.getAttributes();
final InterfacesConfig interfaces = config.getNetworkConfig().getInterfaces();
for (int a = 0; a < atts.getLength(); a++) {
final org.w3c.dom.Node att = atts.item(a);
if ("enabled".equals(att.getNodeName())) {
final String value = att.getNodeValue();
interfaces.setEnabled(checkTrue(value));
}
}
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
if ("interface".equalsIgnoreCase(cleanNodeName(n.getNodeName()))) {
final String value = getTextContent(n).trim();
interfaces.addInterface(value);
}
}
}
private void handleViaReflection(final org.w3c.dom.Node node, Object parent, Object target) throws Exception {
final NamedNodeMap atts = node.getAttributes();
if (atts != null) {
for (int a = 0; a < atts.getLength(); a++) {
final org.w3c.dom.Node att = atts.item(a);
String methodName = "set" + getMethodName(att.getNodeName());
Method method = getMethod(target, methodName);
final String value = att.getNodeValue();
invoke(target, method, value);
}
}
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String value = getTextContent(n).trim();
String methodName = "set" + getMethodName(cleanNodeName(n.getNodeName()));
Method method = getMethod(target, methodName);
invoke(target, method, value);
}
String mName = "set" + target.getClass().getSimpleName();
Method method = getMethod(parent, mName);
if (method == null) {
mName = "add" + target.getClass().getSimpleName();
method = getMethod(parent, mName);
}
method.invoke(parent, target);
}
private void invoke(Object target, Method method, String value) {
if (method == null)
return;
Class<?>[] args = method.getParameterTypes();
if (args == null || args.length == 0)
return;
Class<?> arg = method.getParameterTypes()[0];
try {
if (arg == String.class) {
method.invoke(target, value);
} else if (arg == int.class) {
method.invoke(target, Integer.parseInt(value));
} else if (arg == long.class) {
method.invoke(target, Long.parseLong(value));
} else if (arg == boolean.class) {
method.invoke(target, Boolean.parseBoolean(value));
}
} catch (Exception e) {
logger.warning(e);
}
}
private Method getMethod(Object target, String methodName) {
Method[] methods = target.getClass().getMethods();
for (Method method : methods) {
if (method.getName().equalsIgnoreCase(methodName)) {
return method;
}
}
return null;
}
private String getMethodName(String element) {
StringBuilder sb = new StringBuilder();
char[] chars = element.toCharArray();
boolean upper = true;
for (char c : chars) {
if (c == '_' || c == '-' || c == '.') {
upper = true;
} else {
if (upper) {
sb.append(Character.toUpperCase(c));
upper = false;
} else {
sb.append(c);
}
}
}
return sb.toString();
}
private void handleJoin(final org.w3c.dom.Node node) {
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String name = cleanNodeName(child.getNodeName());
if ("multicast".equals(name)) {
handleMulticast(child);
} else if ("tcp-ip".equals(name)) {
handleTcpIp(child);
} else if ("aws".equals(name)) {
handleAWS(child);
}
}
}
private void handleAWS(Node node) {
final JoinConfig join = config.getNetworkConfig().getJoin();
final NamedNodeMap atts = node.getAttributes();
final AwsConfig awsConfig = join.getAwsConfig();
for (int a = 0; a < atts.getLength(); a++) {
final Node att = atts.item(a);
final String value = getTextContent(att).trim();
if ("enabled".equalsIgnoreCase(att.getNodeName())) {
awsConfig.setEnabled(checkTrue(value));
} else if (att.getNodeName().equals("connection-timeout-seconds")) {
awsConfig.setConnectionTimeoutSeconds(getIntegerValue("connection-timeout-seconds", value, 5));
}
}
for (Node n : new IterableNodeList(node.getChildNodes())) {
final String value = getTextContent(n).trim();
if ("secret-key".equals(cleanNodeName(n.getNodeName()))) {
awsConfig.setSecretKey(value);
} else if ("access-key".equals(cleanNodeName(n.getNodeName()))) {
awsConfig.setAccessKey(value);
} else if ("region".equals(cleanNodeName(n.getNodeName()))) {
awsConfig.setRegion(value);
} else if ("host-header".equals(cleanNodeName(n.getNodeName()))) {
awsConfig.setHostHeader(value);
} else if ("security-group-name".equals(cleanNodeName(n.getNodeName()))) {
awsConfig.setSecurityGroupName(value);
} else if ("tag-key".equals(cleanNodeName(n.getNodeName()))) {
awsConfig.setTagKey(value);
} else if ("tag-value".equals(cleanNodeName(n.getNodeName()))) {
awsConfig.setTagValue(value);
}
}
}
private void handleMulticast(final org.w3c.dom.Node node) {
final JoinConfig join = config.getNetworkConfig().getJoin();
final NamedNodeMap atts = node.getAttributes();
final MulticastConfig multicastConfig = join.getMulticastConfig();
for (int a = 0; a < atts.getLength(); a++) {
final org.w3c.dom.Node att = atts.item(a);
final String value = getTextContent(att).trim();
if ("enabled".equalsIgnoreCase(att.getNodeName())) {
multicastConfig.setEnabled(checkTrue(value));
}
}
for (Node n : new IterableNodeList(node.getChildNodes())) {
final String value = getTextContent(n).trim();
if ("multicast-group".equals(cleanNodeName(n.getNodeName()))) {
multicastConfig.setMulticastGroup(value);
} else if ("multicast-port".equals(cleanNodeName(n.getNodeName()))) {
multicastConfig.setMulticastPort(Integer.parseInt(value));
} else if ("multicast-timeout-seconds".equals(cleanNodeName(n.getNodeName()))) {
multicastConfig.setMulticastTimeoutSeconds(Integer.parseInt(value));
} else if ("multicast-time-to-live-seconds".equals(cleanNodeName(n.getNodeName()))) {
//we need this line for the time being to prevent not reading the multicast-time-to-live-seconds property
//for more info see: https://github.com/hazelcast/hazelcast/issues/752
multicastConfig.setMulticastTimeToLive(Integer.parseInt(value));
} else if ("multicast-time-to-live".equals(cleanNodeName(n.getNodeName()))) {
multicastConfig.setMulticastTimeToLive(Integer.parseInt(value));
} else if ("trusted-interfaces".equals(cleanNodeName(n.getNodeName()))) {
for (org.w3c.dom.Node child : new IterableNodeList(n.getChildNodes())) {
if ("interface".equalsIgnoreCase(cleanNodeName(child.getNodeName()))) {
multicastConfig.addTrustedInterface(getTextContent(child).trim());
}
}
}
}
}
private void handleTcpIp(final org.w3c.dom.Node node) {
final NamedNodeMap atts = node.getAttributes();
final JoinConfig join = config.getNetworkConfig().getJoin();
final TcpIpConfig tcpIpConfig = join.getTcpIpConfig();
for (int a = 0; a < atts.getLength(); a++) {
final org.w3c.dom.Node att = atts.item(a);
final String value = getTextContent(att).trim();
if (att.getNodeName().equals("enabled")) {
tcpIpConfig.setEnabled(checkTrue(value));
} else if (att.getNodeName().equals("connection-timeout-seconds")) {
tcpIpConfig.setConnectionTimeoutSeconds(getIntegerValue("connection-timeout-seconds", value, 5));
}
}
final NodeList nodelist = node.getChildNodes();
final Set<String> memberTags = new HashSet<String>(Arrays.asList("interface", "member", "members"));
for (int i = 0; i < nodelist.getLength(); i++) {
final org.w3c.dom.Node n = nodelist.item(i);
final String value = getTextContent(n).trim();
if (cleanNodeName(n.getNodeName()).equals("required-member")) {
tcpIpConfig.setRequiredMember(value);
} else if (memberTags.contains(cleanNodeName(n.getNodeName()))) {
tcpIpConfig.addMember(value);
}
}
}
private void handlePort(final Node node) {
final String portStr = getTextContent(node).trim();
final NetworkConfig networkConfig = config.getNetworkConfig();
if (portStr != null && portStr.length() > 0) {
networkConfig.setPort(Integer.parseInt(portStr));
}
final NamedNodeMap atts = node.getAttributes();
for (int a = 0; a < atts.getLength(); a++) {
final org.w3c.dom.Node att = atts.item(a);
final String value = getTextContent(att).trim();
if ("auto-increment".equals(att.getNodeName())) {
networkConfig.setPortAutoIncrement(checkTrue(value));
} else if ("port-count".equals(att.getNodeName())) {
int portCount = Integer.parseInt(value);
networkConfig.setPortCount(portCount);
}
}
}
private void handleOutboundPorts(final Node child) {
final NetworkConfig networkConfig = config.getNetworkConfig();
for (Node n : new IterableNodeList(child.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
if ("ports".equals(nodeName)) {
final String value = getTextContent(n);
networkConfig.addOutboundPortDefinition(value);
}
}
}
private void handleQueue(final org.w3c.dom.Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final QueueConfig qConfig = new QueueConfig();
qConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("max-size".equals(nodeName)) {
qConfig.setMaxSize(getIntegerValue("max-size", value, QueueConfig.DEFAULT_MAX_SIZE));
} else if ("backup-count".equals(nodeName)) {
qConfig.setBackupCount(getIntegerValue("backup-count", value, QueueConfig.DEFAULT_SYNC_BACKUP_COUNT));
} else if ("async-backup-count".equals(nodeName)) {
qConfig.setAsyncBackupCount(getIntegerValue("async-backup-count", value, QueueConfig.DEFAULT_ASYNC_BACKUP_COUNT));
} else if ("item-listeners".equals(nodeName)) {
for (org.w3c.dom.Node listenerNode : new IterableNodeList(n.getChildNodes())) {
if ("item-listener".equals(cleanNodeName(listenerNode))) {
final NamedNodeMap attrs = listenerNode.getAttributes();
boolean incValue = checkTrue(getTextContent(attrs.getNamedItem("include-value")));
String listenerClass = getTextContent(listenerNode);
qConfig.addItemListenerConfig(new ItemListenerConfig(listenerClass, incValue));
}
}
} else if ("statistics-enabled".equals(nodeName)) {
qConfig.setStatisticsEnabled(checkTrue(value));
} else if ("queue-store".equals(nodeName)) {
final QueueStoreConfig queueStoreConfig = createQueueStoreConfig(n);
qConfig.setQueueStoreConfig(queueStoreConfig);
} else if ("empty-queue-ttl".equals(nodeName)) {
qConfig.setEmptyQueueTtl(getIntegerValue("empty-queue-ttl", value, QueueConfig.DEFAULT_EMPTY_QUEUE_TTL));
}
}
this.config.addQueueConfig(qConfig);
}
private void handleList(final org.w3c.dom.Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final ListConfig lConfig = new ListConfig();
lConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("max-size".equals(nodeName)) {
lConfig.setMaxSize(getIntegerValue("max-size", value, ListConfig.DEFAULT_MAX_SIZE));
} else if ("backup-count".equals(nodeName)) {
lConfig.setBackupCount(getIntegerValue("backup-count", value, ListConfig.DEFAULT_SYNC_BACKUP_COUNT));
} else if ("async-backup-count".equals(nodeName)) {
lConfig.setAsyncBackupCount(getIntegerValue("async-backup-count", value, ListConfig.DEFAULT_ASYNC_BACKUP_COUNT));
} else if ("item-listeners".equals(nodeName)) {
for (org.w3c.dom.Node listenerNode : new IterableNodeList(n.getChildNodes())) {
if ("item-listener".equals(cleanNodeName(listenerNode))) {
final NamedNodeMap attrs = listenerNode.getAttributes();
boolean incValue = checkTrue(getTextContent(attrs.getNamedItem("include-value")));
String listenerClass = getTextContent(listenerNode);
lConfig.addItemListenerConfig(new ItemListenerConfig(listenerClass, incValue));
}
}
} else if ("statistics-enabled".equals(nodeName)) {
lConfig.setStatisticsEnabled(checkTrue(value));
}
}
this.config.addListConfig(lConfig);
}
private void handleSet(final org.w3c.dom.Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final SetConfig sConfig = new SetConfig();
sConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("max-size".equals(nodeName)) {
sConfig.setMaxSize(getIntegerValue("max-size", value, SetConfig.DEFAULT_MAX_SIZE));
} else if ("backup-count".equals(nodeName)) {
sConfig.setBackupCount(getIntegerValue("backup-count", value, SetConfig.DEFAULT_SYNC_BACKUP_COUNT));
} else if ("async-backup-count".equals(nodeName)) {
sConfig.setAsyncBackupCount(getIntegerValue("async-backup-count", value, SetConfig.DEFAULT_ASYNC_BACKUP_COUNT));
} else if ("item-listeners".equals(nodeName)) {
for (org.w3c.dom.Node listenerNode : new IterableNodeList(n.getChildNodes())) {
if ("item-listener".equals(cleanNodeName(listenerNode))) {
final NamedNodeMap attrs = listenerNode.getAttributes();
boolean incValue = checkTrue(getTextContent(attrs.getNamedItem("include-value")));
String listenerClass = getTextContent(listenerNode);
sConfig.addItemListenerConfig(new ItemListenerConfig(listenerClass, incValue));
}
}
} else if ("statistics-enabled".equals(nodeName)) {
sConfig.setStatisticsEnabled(checkTrue(value));
}
}
this.config.addSetConfig(sConfig);
}
private void handleMultiMap(final org.w3c.dom.Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final MultiMapConfig multiMapConfig = new MultiMapConfig();
multiMapConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("value-collection-type".equals(nodeName)) {
multiMapConfig.setValueCollectionType(value);
} else if ("backup-count".equals(nodeName)) {
multiMapConfig.setBackupCount(getIntegerValue("backup-count", value, MultiMapConfig.DEFAULT_SYNC_BACKUP_COUNT));
} else if ("async-backup-count".equals(nodeName)) {
multiMapConfig.setAsyncBackupCount(getIntegerValue("async-backup-count", value, MultiMapConfig.DEFAULT_ASYNC_BACKUP_COUNT));
} else if ("entry-listeners".equals(nodeName)) {
for (org.w3c.dom.Node listenerNode : new IterableNodeList(n.getChildNodes())) {
if ("entry-listener".equals(cleanNodeName(listenerNode))) {
final NamedNodeMap attrs = listenerNode.getAttributes();
boolean incValue = checkTrue(getTextContent(attrs.getNamedItem("include-value")));
boolean local = checkTrue(getTextContent(attrs.getNamedItem("local")));
String listenerClass = getTextContent(listenerNode);
multiMapConfig.addEntryListenerConfig(new EntryListenerConfig(listenerClass, local, incValue));
}
}
} else if ("statistics-enabled".equals(nodeName)) {
multiMapConfig.setStatisticsEnabled(checkTrue(value));
// } else if ("partition-strategy".equals(nodeName)) {
// multiMapConfig.setPartitioningStrategyConfig(new PartitioningStrategyConfig(value));
}
}
this.config.addMultiMapConfig(multiMapConfig);
}
private void handleReplicatedMap(final org.w3c.dom.Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final ReplicatedMapConfig replicatedMapConfig = new ReplicatedMapConfig();
replicatedMapConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("concurrency-level".equals(nodeName)) {
replicatedMapConfig.setConcurrencyLevel(getIntegerValue("concurrency-level", value, ReplicatedMapConfig.DEFAULT_CONCURRENCY_LEVEL));
} else if ("in-memory-format".equals(nodeName)) {
replicatedMapConfig.setInMemoryFormat(InMemoryFormat.valueOf(upperCaseInternal(value)));
} else if ("replication-delay-millis".equals(nodeName)) {
replicatedMapConfig.setReplicationDelayMillis(getIntegerValue("replication-delay-millis", value, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
} else if ("async-fillup".equals(nodeName)) {
replicatedMapConfig.setAsyncFillup(checkTrue(value));
} else if ("statistics-enabled".equals(nodeName)) {
replicatedMapConfig.setStatisticsEnabled(checkTrue(value));
} else if ("entry-listeners".equals(nodeName)) {
for (org.w3c.dom.Node listenerNode : new IterableNodeList(n.getChildNodes())) {
if ("entry-listener".equals(cleanNodeName(listenerNode))) {
final NamedNodeMap attrs = listenerNode.getAttributes();
boolean incValue = checkTrue(getTextContent(attrs.getNamedItem("include-value")));
boolean local = checkTrue(getTextContent(attrs.getNamedItem("local")));
String listenerClass = getTextContent(listenerNode);
replicatedMapConfig.addEntryListenerConfig(new EntryListenerConfig(listenerClass, local, incValue));
}
}
}
}
this.config.addReplicatedMapConfig(replicatedMapConfig);
}
private void handleMap(final org.w3c.dom.Node node) throws Exception {
final String name = getAttribute(node, "name");
final MapConfig mapConfig = new MapConfig();
mapConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("backup-count".equals(nodeName)) {
mapConfig.setBackupCount(getIntegerValue("backup-count", value, MapConfig.DEFAULT_BACKUP_COUNT));
} else if ("in-memory-format".equals(nodeName)) {
mapConfig.setInMemoryFormat(InMemoryFormat.valueOf(upperCaseInternal(value)));
} else if ("async-backup-count".equals(nodeName)) {
mapConfig.setAsyncBackupCount(getIntegerValue("async-backup-count", value, MapConfig.MIN_BACKUP_COUNT));
} else if ("eviction-policy".equals(nodeName)) {
mapConfig.setEvictionPolicy(MapConfig.EvictionPolicy.valueOf(upperCaseInternal(value)));
} else if ("max-size".equals(nodeName)) {
final MaxSizeConfig msc = mapConfig.getMaxSizeConfig();
final Node maxSizePolicy = n.getAttributes().getNamedItem("policy");
if (maxSizePolicy != null) {
msc.setMaxSizePolicy(MaxSizeConfig.MaxSizePolicy.valueOf(upperCaseInternal(getTextContent(maxSizePolicy))));
}
int size;
if (value.length() < 2) {
size = Integer.parseInt(value);
} else {
char last = value.charAt(value.length() - 1);
int type = 0;
if (last == 'g' || last == 'G') {
type = 1;
} else if (last == 'm' || last == 'M') {
type = 2;
}
if (type == 0) {
size = Integer.parseInt(value);
} else if (type == 1) {
size = Integer.parseInt(value.substring(0, value.length() - 1)) * 1000;
} else {
size = Integer.parseInt(value.substring(0, value.length() - 1));
}
}
msc.setSize(size);
} else if ("eviction-percentage".equals(nodeName)) {
mapConfig.setEvictionPercentage(getIntegerValue("eviction-percentage", value,
MapConfig.DEFAULT_EVICTION_PERCENTAGE));
} else if ("time-to-live-seconds".equals(nodeName)) {
mapConfig.setTimeToLiveSeconds(getIntegerValue("time-to-live-seconds", value,
MapConfig.DEFAULT_TTL_SECONDS));
} else if ("max-idle-seconds".equals(nodeName)) {
mapConfig.setMaxIdleSeconds(getIntegerValue("max-idle-seconds", value,
MapConfig.DEFAULT_MAX_IDLE_SECONDS));
} else if ("map-store".equals(nodeName)) {
MapStoreConfig mapStoreConfig = createMapStoreConfig(n);
mapConfig.setMapStoreConfig(mapStoreConfig);
} else if ("near-cache".equals(nodeName)) {
handleViaReflection(n, mapConfig, new NearCacheConfig());
} else if ("merge-policy".equals(nodeName)) {
mapConfig.setMergePolicy(value);
} else if ("read-backup-data".equals(nodeName)) {
mapConfig.setReadBackupData(checkTrue(value));
} else if ("statistics-enabled".equals(nodeName)) {
mapConfig.setStatisticsEnabled(checkTrue(value));
} else if ("wan-replication-ref".equals(nodeName)) {
WanReplicationRef wanReplicationRef = new WanReplicationRef();
final String wanName = getAttribute(n, "name");
wanReplicationRef.setName(wanName);
for (org.w3c.dom.Node wanChild : new IterableNodeList(n.getChildNodes())) {
final String wanChildName = cleanNodeName(wanChild.getNodeName());
final String wanChildValue = getTextContent(n);
if ("merge-policy".equals(wanChildName)) {
wanReplicationRef.setMergePolicy(wanChildValue);
}
}
mapConfig.setWanReplicationRef(wanReplicationRef);
} else if ("indexes".equals(nodeName)) {
for (org.w3c.dom.Node indexNode : new IterableNodeList(n.getChildNodes())) {
if ("index".equals(cleanNodeName(indexNode))) {
final NamedNodeMap attrs = indexNode.getAttributes();
boolean ordered = checkTrue(getTextContent(attrs.getNamedItem("ordered")));
String attribute = getTextContent(indexNode);
mapConfig.addMapIndexConfig(new MapIndexConfig(attribute, ordered));
}
}
} else if ("entry-listeners".equals(nodeName)) {
for (org.w3c.dom.Node listenerNode : new IterableNodeList(n.getChildNodes())) {
if ("entry-listener".equals(cleanNodeName(listenerNode))) {
final NamedNodeMap attrs = listenerNode.getAttributes();
boolean incValue = checkTrue(getTextContent(attrs.getNamedItem("include-value")));
boolean local = checkTrue(getTextContent(attrs.getNamedItem("local")));
String listenerClass = getTextContent(listenerNode);
mapConfig.addEntryListenerConfig(new EntryListenerConfig(listenerClass, local, incValue));
}
}
} else if ("partition-strategy".equals(nodeName)) {
mapConfig.setPartitioningStrategyConfig(new PartitioningStrategyConfig(value));
}
}
this.config.addMapConfig(mapConfig);
}
private MapStoreConfig createMapStoreConfig(final org.w3c.dom.Node node) {
MapStoreConfig mapStoreConfig = new MapStoreConfig();
final NamedNodeMap atts = node.getAttributes();
for (int a = 0; a < atts.getLength(); a++) {
final org.w3c.dom.Node att = atts.item(a);
final String value = getTextContent(att).trim();
if ("enabled".equals(att.getNodeName())) {
mapStoreConfig.setEnabled(checkTrue(value));
} else if ("initial-mode".equals(att.getNodeName())) {
final InitialLoadMode mode = InitialLoadMode.valueOf(upperCaseInternal(getTextContent(att)));
mapStoreConfig.setInitialLoadMode(mode);
}
}
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
if ("class-name".equals(nodeName)) {
mapStoreConfig.setClassName(getTextContent(n).trim());
} else if ("factory-class-name".equals(nodeName)) {
mapStoreConfig.setFactoryClassName(getTextContent(n).trim());
} else if ("write-delay-seconds".equals(nodeName)) {
mapStoreConfig.setWriteDelaySeconds(getIntegerValue("write-delay-seconds", getTextContent(n).trim(),
MapStoreConfig.DEFAULT_WRITE_DELAY_SECONDS));
} else if ("properties".equals(nodeName)) {
fillProperties(n, mapStoreConfig.getProperties());
}
}
return mapStoreConfig;
}
private QueueStoreConfig createQueueStoreConfig(final org.w3c.dom.Node node) {
QueueStoreConfig queueStoreConfig = new QueueStoreConfig();
final NamedNodeMap atts = node.getAttributes();
for (int a = 0; a < atts.getLength(); a++) {
final org.w3c.dom.Node att = atts.item(a);
final String value = getTextContent(att).trim();
if (att.getNodeName().equals("enabled")) {
queueStoreConfig.setEnabled(checkTrue(value));
}
}
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
if ("class-name".equals(nodeName)) {
queueStoreConfig.setClassName(getTextContent(n).trim());
} else if ("factory-class-name".equals(nodeName)) {
queueStoreConfig.setFactoryClassName(getTextContent(n).trim());
} else if ("properties".equals(nodeName)) {
fillProperties(n, queueStoreConfig.getProperties());
}
}
return queueStoreConfig;
}
private void handleSSLConfig(final org.w3c.dom.Node node) {
SSLConfig sslConfig = new SSLConfig();
final NamedNodeMap atts = node.getAttributes();
final Node enabledNode = atts.getNamedItem("enabled");
final boolean enabled = enabledNode != null && checkTrue(getTextContent(enabledNode).trim());
sslConfig.setEnabled(enabled);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
if ("factory-class-name".equals(nodeName)) {
sslConfig.setFactoryClassName(getTextContent(n).trim());
} else if ("properties".equals(nodeName)) {
fillProperties(n, sslConfig.getProperties());
}
}
config.getNetworkConfig().setSSLConfig(sslConfig);
}
private void handleSocketInterceptorConfig(final org.w3c.dom.Node node) {
SocketInterceptorConfig socketInterceptorConfig = parseSocketInterceptorConfig(node);
config.getNetworkConfig().setSocketInterceptorConfig(socketInterceptorConfig);
}
private void handleTopic(final org.w3c.dom.Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final TopicConfig tConfig = new TopicConfig();
tConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
if (nodeName.equals("global-ordering-enabled")) {
tConfig.setGlobalOrderingEnabled(checkTrue(getTextContent(n)));
} else if ("message-listeners".equals(nodeName)) {
for (org.w3c.dom.Node listenerNode : new IterableNodeList(n.getChildNodes())) {
if ("message-listener".equals(cleanNodeName(listenerNode))) {
tConfig.addMessageListenerConfig(new ListenerConfig(getTextContent(listenerNode)));
}
}
} else if ("statistics-enabled".equals(nodeName)) {
tConfig.setStatisticsEnabled(checkTrue(getTextContent(n)));
}
}
config.addTopicConfig(tConfig);
}
private void handleJobTracker(final Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final JobTrackerConfig jConfig = new JobTrackerConfig();
jConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("max-thread-size".equals(nodeName)) {
jConfig.setMaxThreadSize(getIntegerValue("max-thread-size", value, JobTrackerConfig.DEFAULT_MAX_THREAD_SIZE));
} else if ("queue-size".equals(nodeName)) {
jConfig.setQueueSize(getIntegerValue("queue-size", value, JobTrackerConfig.DEFAULT_QUEUE_SIZE));
} else if ("retry-count".equals(nodeName)) {
jConfig.setRetryCount(getIntegerValue("retry-count", value, JobTrackerConfig.DEFAULT_RETRY_COUNT));
} else if ("chunk-size".equals(nodeName)) {
jConfig.setChunkSize(getIntegerValue("chunk-size", value, JobTrackerConfig.DEFAULT_CHUNK_SIZE));
} else if ("communicate-stats".equals(nodeName)) {
jConfig.setCommunicateStats(value == null || value.length() == 0 ?
JobTrackerConfig.DEFAULT_COMMUNICATE_STATS : Boolean.parseBoolean(value));
} else if ("topology-changed-stategy".equals(nodeName)) {
TopologyChangedStrategy topologyChangedStrategy = JobTrackerConfig.DEFAULT_TOPOLOGY_CHANGED_STRATEGY;
for (TopologyChangedStrategy temp : TopologyChangedStrategy.values()) {
if (temp.name().equals(value)) {
topologyChangedStrategy = temp;
}
}
jConfig.setTopologyChangedStrategy(topologyChangedStrategy);
}
}
config.addJobTrackerConfig(jConfig);
}
private void handleSemaphore(final org.w3c.dom.Node node) {
final Node attName = node.getAttributes().getNamedItem("name");
final String name = getTextContent(attName);
final SemaphoreConfig sConfig = new SemaphoreConfig();
sConfig.setName(name);
for (org.w3c.dom.Node n : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(n.getNodeName());
final String value = getTextContent(n).trim();
if ("initial-permits".equals(nodeName)) {
sConfig.setInitialPermits(getIntegerValue("initial-permits", value, 0));
} else if ("backup-count".equals(nodeName)) {
sConfig.setBackupCount(getIntegerValue("backup-count", value, SemaphoreConfig.DEFAULT_SYNC_BACKUP_COUNT));
} else if ("async-backup-count".equals(nodeName)) {
sConfig.setAsyncBackupCount(getIntegerValue("async-backup-count", value, SemaphoreConfig.DEFAULT_ASYNC_BACKUP_COUNT));
}
}
config.addSemaphoreConfig(sConfig);
}
private void handleListeners(final org.w3c.dom.Node node) throws Exception {
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
if ("listener".equals(cleanNodeName(child))) {
String listenerClass = getTextContent(child);
config.addListenerConfig(new ListenerConfig(listenerClass));
}
}
}
private void handlePartitionGroup(Node node) {
final NamedNodeMap atts = node.getAttributes();
final Node enabledNode = atts.getNamedItem("enabled");
final boolean enabled = enabledNode != null ? checkTrue(getTextContent(enabledNode)) : false;
config.getPartitionGroupConfig().setEnabled(enabled);
final Node groupTypeNode = atts.getNamedItem("group-type");
final MemberGroupType groupType = groupTypeNode != null
? MemberGroupType.valueOf(upperCaseInternal(getTextContent(groupTypeNode)))
: MemberGroupType.PER_MEMBER;
config.getPartitionGroupConfig().setGroupType(groupType);
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
if ("member-group".equals(cleanNodeName(child))) {
handleMemberGroup(child);
}
}
}
private void handleMemberGroup(Node node) {
MemberGroupConfig memberGroupConfig = new MemberGroupConfig();
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
if ("interface".equals(cleanNodeName(child))) {
String value = getTextContent(child);
memberGroupConfig.addInterface(value);
}
}
config.getPartitionGroupConfig().addMemberGroupConfig(memberGroupConfig);
}
private void handleSerialization(final Node node) {
SerializationConfig serializationConfig = parseSerialization(node);
config.setSerializationConfig(serializationConfig);
}
private void handleManagementCenterConfig(final Node node) {
NamedNodeMap attrs = node.getAttributes();
final Node enabledNode = attrs.getNamedItem("enabled");
boolean enabled = enabledNode != null && checkTrue(getTextContent(enabledNode));
final Node intervalNode = attrs.getNamedItem("update-interval");
final int interval = intervalNode != null ? getIntegerValue("update-interval",
getTextContent(intervalNode), 5) : 5;
final Node securityTokenNode = attrs.getNamedItem("security-token");
final String securityToken = getTextContent(securityTokenNode);
if ((securityToken != null && !"".equals(securityToken)) && enabledNode == null) {
enabled = true;
}
final Node clusterIdNode = attrs.getNamedItem("cluster-id");
final String clusterId = getTextContent(clusterIdNode);
final String url = getTextContent(node);
ManagementCenterConfig managementCenterConfig = config.getManagementCenterConfig();
managementCenterConfig.setEnabled(enabled);
managementCenterConfig.setUpdateInterval(interval);
managementCenterConfig.setSecurityToken("".equals(securityToken) ? null : securityToken);
managementCenterConfig.setClusterId("".equals(clusterId) ? null : clusterId);
managementCenterConfig.setUrl("".equals(url) ? null : url);
}
private void handleSecurity(final org.w3c.dom.Node node) throws Exception {
final NamedNodeMap atts = node.getAttributes();
final Node enabledNode = atts.getNamedItem("enabled");
final boolean enabled = enabledNode != null && checkTrue(getTextContent(enabledNode));
config.getSecurityConfig().setEnabled(enabled);
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("member-credentials-factory".equals(nodeName)) {
handleCredentialsFactory(child);
} else if ("member-login-modules".equals(nodeName)) {
handleLoginModules(child, true);
} else if ("client-login-modules".equals(nodeName)) {
handleLoginModules(child, false);
} else if ("client-permission-policy".equals(nodeName)) {
handlePermissionPolicy(child);
} else if ("client-permissions".equals(nodeName)) { //listener-permission
handleSecurityPermissions(child);
}
}
}
private void handleCredentialsFactory(final org.w3c.dom.Node node) throws Exception {
final NamedNodeMap attrs = node.getAttributes();
Node classNameNode = attrs.getNamedItem("class-name");
String className = getTextContent(classNameNode);
final SecurityConfig cfg = config.getSecurityConfig();
final CredentialsFactoryConfig credentialsFactoryConfig = new CredentialsFactoryConfig(className);
cfg.setMemberCredentialsConfig(credentialsFactoryConfig);
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("properties".equals(nodeName)) {
fillProperties(child, credentialsFactoryConfig.getProperties());
break;
}
}
}
private void handleLoginModules(final org.w3c.dom.Node node, boolean member) throws Exception {
final SecurityConfig cfg = config.getSecurityConfig();
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("login-module".equals(nodeName)) {
LoginModuleConfig lm = handleLoginModule(child);
if (member) {
cfg.addMemberLoginModuleConfig(lm);
} else {
cfg.addClientLoginModuleConfig(lm);
}
}
}
}
private LoginModuleConfig handleLoginModule(final org.w3c.dom.Node node) throws Exception {
final NamedNodeMap attrs = node.getAttributes();
Node classNameNode = attrs.getNamedItem("class-name");
String className = getTextContent(classNameNode);
Node usageNode = attrs.getNamedItem("usage");
LoginModuleUsage usage = usageNode != null ? LoginModuleUsage.get(getTextContent(usageNode))
: LoginModuleUsage.REQUIRED;
final LoginModuleConfig moduleConfig = new LoginModuleConfig(className, usage);
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("properties".equals(nodeName)) {
fillProperties(child, moduleConfig.getProperties());
break;
}
}
return moduleConfig;
}
private void handlePermissionPolicy(final org.w3c.dom.Node node) throws Exception {
final NamedNodeMap attrs = node.getAttributes();
Node classNameNode = attrs.getNamedItem("class-name");
String className = getTextContent(classNameNode);
final SecurityConfig cfg = config.getSecurityConfig();
final PermissionPolicyConfig policyConfig = new PermissionPolicyConfig(className);
cfg.setClientPolicyConfig(policyConfig);
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("properties".equals(nodeName)) {
fillProperties(child, policyConfig.getProperties());
break;
}
}
}
private void handleSecurityPermissions(final org.w3c.dom.Node node) throws Exception {
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
PermissionType type;
if ("map-permission".equals(nodeName)) {
type = PermissionType.MAP;
} else if ("queue-permission".equals(nodeName)) {
type = PermissionType.QUEUE;
} else if ("multimap-permission".equals(nodeName)) {
type = PermissionType.MULTIMAP;
} else if ("topic-permission".equals(nodeName)) {
type = PermissionType.TOPIC;
} else if ("list-permission".equals(nodeName)) {
type = PermissionType.LIST;
} else if ("set-permission".equals(nodeName)) {
type = PermissionType.SET;
} else if ("lock-permission".equals(nodeName)) {
type = PermissionType.LOCK;
} else if ("atomic-long-permission".equals(nodeName)) {
type = PermissionType.ATOMIC_LONG;
} else if ("countdown-latch-permission".equals(nodeName)) {
type = PermissionType.COUNTDOWN_LATCH;
} else if ("semaphore-permission".equals(nodeName)) {
type = PermissionType.SEMAPHORE;
} else if ("id-generator-permission".equals(nodeName)) {
type = PermissionType.ID_GENERATOR;
} else if ("executor-service-permission".equals(nodeName)) {
type = PermissionType.EXECUTOR_SERVICE;
} else if ("transaction-permission".equals(nodeName)) {
type = PermissionType.TRANSACTION;
} else if ("all-permissions".equals(nodeName)) {
type = PermissionType.ALL;
} else {
continue;
}
handleSecurityPermission(child, type);
}
}
private void handleSecurityPermission(final org.w3c.dom.Node node, PermissionType type) throws Exception {
final SecurityConfig cfg = config.getSecurityConfig();
final NamedNodeMap attrs = node.getAttributes();
Node nameNode = attrs.getNamedItem("name");
String name = nameNode != null ? getTextContent(nameNode) : "*";
Node principalNode = attrs.getNamedItem("principal");
String principal = principalNode != null ? getTextContent(principalNode) : "*";
final PermissionConfig permConfig = new PermissionConfig(type, name, principal);
cfg.addClientPermissionConfig(permConfig);
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("endpoints".equals(nodeName)) {
handleSecurityPermissionEndpoints(child, permConfig);
} else if ("actions".equals(nodeName)) {
handleSecurityPermissionActions(child, permConfig);
}
}
}
private void handleSecurityPermissionEndpoints(final org.w3c.dom.Node node, PermissionConfig permConfig)
throws Exception {
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("endpoint".equals(nodeName)) {
permConfig.addEndpoint(getTextContent(child).trim());
}
}
}
private void handleSecurityPermissionActions(final org.w3c.dom.Node node, PermissionConfig permConfig)
throws Exception {
for (org.w3c.dom.Node child : new IterableNodeList(node.getChildNodes())) {
final String nodeName = cleanNodeName(child.getNodeName());
if ("action".equals(nodeName)) {
permConfig.addAction(getTextContent(child).trim());
}
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_config_XmlConfigBuilder.java |
951 | clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
@Override
public void postAdded() {
ClusterState clusterStateV2 = clusterService.state();
if (clusterStateV2.nodes().masterNodeId() != null) {
// now we have a master, try and execute it...
clusterService.remove(this);
innerExecute(request, listener, true);
}
}
@Override
public void onClose() {
clusterService.remove(this);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
clusterService.remove(this);
listener.onFailure(new MasterNotDiscoveredException("waited for [" + timeout + "]"));
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.nodesDelta().masterNodeChanged()) {
clusterService.remove(this);
innerExecute(request, listener, true);
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_support_master_TransportMasterNodeOperationAction.java |
122 | private static final class RecoveredBranchInfo
{
final byte[] branchId;
private RecoveredBranchInfo( byte[] branchId )
{
this.branchId = branchId;
}
@Override
public int hashCode()
{
return Arrays.hashCode( branchId );
}
@Override
public boolean equals( Object obj )
{
if ( obj == null || obj.getClass() != RecoveredBranchInfo.class )
{
return false;
}
return Arrays.equals( branchId, ( ( RecoveredBranchInfo )obj ).branchId );
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_TxManager.java |
34 | public abstract class Adapter
implements ClusterListener
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
}
@Override
public void joinedCluster( InstanceId instanceId, URI member )
{
}
@Override
public void leftCluster( InstanceId instanceId )
{
}
@Override
public void leftCluster()
{
}
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
}
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
}
} | 1no label
| enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterListener.java |
74 | public class CeylonCorrectionProcessor extends QuickAssistAssistant
implements IQuickAssistProcessor {
private CeylonEditor editor; //may only be used for quick assists!!!
private Tree.CompilationUnit model;
private IFile file; //may only be used for markers!
public CeylonCorrectionProcessor(CeylonEditor editor) {
this.editor = editor;
setQuickAssistProcessor(this);
}
public CeylonCorrectionProcessor(IMarker marker) {
IFileEditorInput input = MarkerUtils.getInput(marker);
if (input!=null) {
file = input.getFile();
IProject project = file.getProject();
IJavaProject javaProject = JavaCore.create(project);
TypeChecker tc = getProjectTypeChecker(project);
if (tc!=null) {
try {
for (IPackageFragmentRoot pfr: javaProject.getPackageFragmentRoots()) {
if (pfr.getPath().isPrefixOf(file.getFullPath())) {
IPath relPath = file.getFullPath().makeRelativeTo(pfr.getPath());
model = tc.getPhasedUnitFromRelativePath(relPath.toString())
.getCompilationUnit();
}
}
}
catch (JavaModelException e) {
e.printStackTrace();
}
}
}
setQuickAssistProcessor(this);
}
private IFile getFile() {
if (editor!=null &&
editor.getEditorInput() instanceof FileEditorInput) {
FileEditorInput input = (FileEditorInput) editor.getEditorInput();
if (input!=null) {
return input.getFile();
}
}
return file;
}
private Tree.CompilationUnit getRootNode() {
if (editor!=null) {
return editor.getParseController().getRootNode();
}
else if (model!=null) {
return (Tree.CompilationUnit) model;
}
else {
return null;
}
}
@Override
public String getErrorMessage() {
return null;
}
private void collectProposals(IQuickAssistInvocationContext context,
IAnnotationModel model, Collection<Annotation> annotations,
boolean addQuickFixes, boolean addQuickAssists,
Collection<ICompletionProposal> proposals) {
ArrayList<ProblemLocation> problems = new ArrayList<ProblemLocation>();
// collect problem locations and corrections from marker annotations
for (Annotation curr: annotations) {
if (curr instanceof CeylonAnnotation) {
ProblemLocation problemLocation =
getProblemLocation((CeylonAnnotation) curr, model);
if (problemLocation != null) {
problems.add(problemLocation);
}
}
}
if (problems.isEmpty() && addQuickFixes) {
for (Annotation curr: annotations) {
if (curr instanceof SimpleMarkerAnnotation) {
collectMarkerProposals((SimpleMarkerAnnotation) curr, proposals);
}
}
}
ProblemLocation[] problemLocations =
problems.toArray(new ProblemLocation[problems.size()]);
Arrays.sort(problemLocations);
if (addQuickFixes) {
collectCorrections(context, problemLocations, proposals);
}
if (addQuickAssists) {
collectAssists(context, problemLocations, proposals);
}
}
private static ProblemLocation getProblemLocation(CeylonAnnotation annotation,
IAnnotationModel model) {
int problemId = annotation.getId();
if (problemId != -1) {
Position pos = model.getPosition((Annotation) annotation);
if (pos != null) {
return new ProblemLocation(pos.getOffset(), pos.getLength(),
annotation); // java problems all handled by the quick assist processors
}
}
return null;
}
private void collectAssists(IQuickAssistInvocationContext context,
ProblemLocation[] locations, Collection<ICompletionProposal> proposals) {
if (proposals.isEmpty()) {
addProposals(context, editor, proposals);
}
}
private static void collectMarkerProposals(SimpleMarkerAnnotation annotation,
Collection<ICompletionProposal> proposals) {
IMarker marker = annotation.getMarker();
IMarkerResolution[] res = IDE.getMarkerHelpRegistry().getResolutions(marker);
if (res.length > 0) {
for (int i = 0; i < res.length; i++) {
proposals.add(new CeylonMarkerResolutionProposal(res[i], marker));
}
}
}
@Override
public ICompletionProposal[] computeQuickAssistProposals(IQuickAssistInvocationContext context) {
ArrayList<ICompletionProposal> proposals = new ArrayList<ICompletionProposal>();
ISourceViewer viewer = context.getSourceViewer();
List<Annotation> annotations =
getAnnotationsForLine(viewer, getLine(context, viewer));
collectProposals(context, viewer.getAnnotationModel(),
annotations, true, true, proposals);
return proposals.toArray(new ICompletionProposal[proposals.size()]);
}
private int getLine(IQuickAssistInvocationContext context, ISourceViewer viewer) {
try {
return viewer.getDocument().getLineOfOffset(context.getOffset());
}
catch (BadLocationException e) {
e.printStackTrace();
return 0;
}
}
public void collectCorrections(IQuickAssistInvocationContext context,
ProblemLocation location, Collection<ICompletionProposal> proposals) {
Tree.CompilationUnit rootNode = getRootNode();
if (rootNode!=null) {
addProposals(context, location, getFile(),
rootNode, proposals);
}
}
private void collectCorrections(IQuickAssistInvocationContext context,
ProblemLocation[] locations, Collection<ICompletionProposal> proposals) {
ISourceViewer viewer = context.getSourceViewer();
Tree.CompilationUnit rootNode = getRootNode();
for (int i=locations.length-1; i>=0; i--) {
ProblemLocation loc = locations[i];
if (loc.getOffset()<=viewer.getSelectedRange().x) {
for (int j=i; j>=0; j--) {
ProblemLocation location = locations[j];
if (location.getOffset()!=loc.getOffset()) {
break;
}
addProposals(context, location, getFile(),
rootNode, proposals);
}
if (!proposals.isEmpty()) {
viewer.setSelectedRange(loc.getOffset(),
loc.getLength());
return;
}
}
}
for (int i=0; i<locations.length; i++) {
ProblemLocation loc = locations[i];
for (int j=i; j<locations.length; j++) {
ProblemLocation location = locations[j];
if (location.getOffset()!=loc.getOffset()) break;
addProposals(context, location, getFile(),
rootNode, proposals);
}
if (!proposals.isEmpty()) {
viewer.setSelectedRange(loc.getOffset(),
loc.getLength());
return;
}
}
}
public static boolean canFix(IMarker marker) {
try {
if (marker.getType().equals(PROBLEM_MARKER_ID)) {
return marker.getAttribute(MarkerCreator.ERROR_CODE_KEY,0)>0;
}
else {
return false;
}
}
catch (CoreException e) {
return false;
}
}
@Override
public boolean canFix(Annotation annotation) {
if (annotation instanceof CeylonAnnotation) {
return ((CeylonAnnotation) annotation).getId()>0;
}
else if (annotation instanceof MarkerAnnotation) {
return canFix(((MarkerAnnotation) annotation).getMarker());
}
else {
return false;
}
}
@Override
public boolean canAssist(IQuickAssistInvocationContext context) {
//oops, all this is totally useless, because
//this method never gets called :-/
/*Tree.CompilationUnit cu = (CompilationUnit) context.getModel()
.getAST(new NullMessageHandler(), new NullProgressMonitor());
return CeylonSourcePositionLocator.findNode(cu, context.getOffset(),
context.getOffset()+context.getLength()) instanceof Tree.Term;*/
return true;
}
private void addProposals(IQuickAssistInvocationContext context,
ProblemLocation problem, IFile file,
Tree.CompilationUnit rootNode,
Collection<ICompletionProposal> proposals) {
if (file==null) return;
IProject project = file.getProject();
TypeChecker tc = getProjectTypeChecker(project);
int offset = problem.getOffset();
Node node = Nodes.findNode(rootNode, offset,
offset + problem.getLength());
switch ( problem.getProblemId() ) {
case 100:
addDeclareLocalProposal(rootNode, node, proposals, file, editor);
//fall through:
case 102:
if (tc!=null) {
addImportProposals(rootNode, node, proposals, file);
}
addCreateEnumProposal(rootNode, node, problem, proposals,
project, tc, file);
addCreationProposals(rootNode, node, problem, proposals,
project, tc, file);
if (tc!=null) {
addChangeReferenceProposals(rootNode, node, problem, proposals, file);
}
break;
case 101:
addCreateParameterProposals(rootNode, node, problem, proposals,
project, tc, file);
if (tc!=null) {
addChangeReferenceProposals(rootNode, node, problem, proposals, file);
}
break;
case 200:
addSpecifyTypeProposal(rootNode, node, proposals, null);
break;
case 300:
addRefineFormalMembersProposal(proposals, node, rootNode, false);
addMakeAbstractDecProposal(proposals, project, node);
break;
case 350:
addRefineFormalMembersProposal(proposals, node, rootNode, true);
addMakeAbstractDecProposal(proposals, project, node);
break;
case 310:
addMakeAbstractDecProposal(proposals, project, node);
break;
case 320:
addRemoveAnnotationProposal(node, "formal", proposals, project);
break;
case 400:
addMakeSharedProposal(proposals, project, node);
break;
case 500:
case 510:
addMakeDefaultProposal(proposals, project, node);
break;
case 600:
addMakeActualDecProposal(proposals, project, node);
break;
case 701:
addMakeSharedDecProposal(proposals, project, node);
addRemoveAnnotationDecProposal(proposals, "actual", project, node);
break;
case 702:
addMakeSharedDecProposal(proposals, project, node);
addRemoveAnnotationDecProposal(proposals, "formal", project, node);
break;
case 703:
addMakeSharedDecProposal(proposals, project, node);
addRemoveAnnotationDecProposal(proposals, "default", project, node);
break;
case 710:
case 711:
addMakeSharedProposal(proposals, project, node);
break;
case 712:
addExportModuleImportProposal(proposals, project, node);
break;
case 713:
addMakeSharedProposalForSupertypes(proposals, project, node);
break;
case 714:
addExportModuleImportProposalForSupertypes(proposals, project, node, rootNode);
break;
case 800:
case 804:
addMakeVariableProposal(proposals, project, node);
break;
case 803:
addMakeVariableProposal(proposals, project, node);
break;
case 801:
addMakeVariableDecProposal(proposals, project, rootNode, node);
break;
case 802:
break;
case 905:
addMakeContainerAbstractProposal(proposals, project, node);
break;
case 1100:
addMakeContainerAbstractProposal(proposals, project, node);
addRemoveAnnotationDecProposal(proposals, "formal", project, node);
break;
case 1101:
addRemoveAnnotationDecProposal(proposals, "formal", project, node);
//TODO: replace body with ;
break;
case 1000:
addAddParenthesesProposal(problem, file, proposals, node);
addChangeDeclarationProposal(problem, file, proposals, node);
break;
case 1050:
addFixAliasProposal(proposals, file, problem);
break;
case 1200:
case 1201:
addRemoveAnnotationDecProposal(proposals, "shared", project, node);
break;
case 1300:
case 1301:
addMakeRefinedSharedProposal(proposals, project, node);
addRemoveAnnotationDecProposal(proposals, "actual", project, node);
break;
case 1302:
case 1312:
case 1307:
addRemoveAnnotationDecProposal(proposals, "formal", project, node);
break;
case 1303:
case 1313:
addRemoveAnnotationDecProposal(proposals, "default", project, node);
break;
case 1400:
case 1401:
addMakeFormalDecProposal(proposals, project, node);
break;
case 1450:
addMakeFormalDecProposal(proposals, project, node);
addParameterProposals(proposals, file, rootNode, node, null);
addInitializerProposals(proposals, file, rootNode, node);
break;
case 1500:
addRemoveAnnotationDecProposal(proposals, "variable", project, node);
break;
case 1600:
addRemoveAnnotationDecProposal(proposals, "abstract", project, node);
break;
case 2000:
addCreateParameterProposals(rootNode, node, problem, proposals,
project, tc, file);
break;
case 2100:
addChangeTypeProposals(rootNode, node, problem, proposals, project);
addSatisfiesProposals(rootNode, node, proposals, project);
break;
case 2102:
addChangeTypeArgProposals(rootNode, node, problem, proposals, project);
addSatisfiesProposals(rootNode, node, proposals, project);
break;
case 2101:
addEllipsisToSequenceParameterProposal(rootNode, node, proposals, file);
break;
case 3000:
addAssignToLocalProposal(rootNode, proposals, node, offset);
addAssignToForProposal(rootNode, proposals, node, offset);
addAssignToIfExistsProposal(rootNode, proposals, node, offset);
addAssignToIfNonemptyProposal(rootNode, proposals, node, offset);
addAssignToTryProposal(rootNode, proposals, node, offset);
addAssignToIfIsProposal(rootNode, proposals, node, offset);
addPrintProposal(rootNode, proposals, node, offset);
break;
case 3100:
addShadowReferenceProposal(file, rootNode, proposals, node);
break;
case 3101:
case 3102:
addShadowSwitchReferenceProposal(file, rootNode, proposals, node);
break;
case 5001:
case 5002:
addChangeIdentifierCaseProposal(node, proposals, file);
break;
case 6000:
addFixMultilineStringIndentation(proposals, file, rootNode, node);
break;
case 7000:
addModuleImportProposals(proposals, project, tc, node);
break;
case 8000:
addRenameDescriptorProposal(rootNode, context, problem, proposals, file);
//TODO: figure out some other way to get a Shell!
if (context.getSourceViewer()!=null) {
addMoveDirProposal(file, rootNode, project, proposals,
context.getSourceViewer().getTextWidget().getShell());
}
break;
case 9000:
addChangeRefiningTypeProposal(file, rootNode, proposals, node);
break;
case 9100:
case 9200:
addChangeRefiningParametersProposal(file, rootNode, proposals, node);
break;
}
}
private void addProposals(IQuickAssistInvocationContext context,
CeylonEditor editor, Collection<ICompletionProposal> proposals) {
if (editor==null) return;
IDocument doc = context.getSourceViewer().getDocument();
IProject project = EditorUtil.getProject(editor.getEditorInput());
IFile file = EditorUtil.getFile(editor.getEditorInput());
Tree.CompilationUnit rootNode = editor.getParseController().getRootNode();
if (rootNode!=null) {
Node node = Nodes.findNode(rootNode, context.getOffset(),
context.getOffset() + context.getLength());
int currentOffset = editor.getSelection().getOffset();
RenameProposal.add(proposals, editor);
InlineDeclarationProposal.add(proposals, editor);
ChangeParametersProposal.add(proposals, editor);
ExtractValueProposal.add(proposals, editor, node);
ExtractFunctionProposal.add(proposals, editor, node);
ExtractParameterProposal.add(proposals, editor, node);
CollectParametersProposal.add(proposals, editor);
MoveOutProposal.add(proposals, editor, node);
MakeReceiverProposal.add(proposals, editor, node);
InvertBooleanProposal.add(proposals, editor);
addAssignToLocalProposal(rootNode, proposals, node, currentOffset);
addAssignToForProposal(rootNode, proposals, node, currentOffset);
addAssignToIfExistsProposal(rootNode, proposals, node, currentOffset);
addAssignToIfNonemptyProposal(rootNode, proposals, node, currentOffset);
addAssignToTryProposal(rootNode, proposals, node, currentOffset);
addAssignToIfIsProposal(rootNode, proposals, node, currentOffset);
addPrintProposal(rootNode, proposals, node, currentOffset);
addConvertToNamedArgumentsProposal(proposals, file, rootNode,
editor, currentOffset);
addConvertToPositionalArgumentsProposal(proposals, file, rootNode,
editor, currentOffset);
Tree.Statement statement = findStatement(rootNode, node);
Tree.Declaration declaration = findDeclaration(rootNode, node);
Tree.NamedArgument argument = findArgument(rootNode, node);
Tree.ImportMemberOrType imp = findImport(rootNode, node);
addVerboseRefinementProposal(proposals, file, statement, rootNode);
addAnnotationProposals(proposals, project, declaration,
doc, currentOffset);
addTypingProposals(proposals, file, rootNode, node, declaration, editor);
addAnonymousFunctionProposals(editor, proposals, doc, file, rootNode,
currentOffset);
addDeclarationProposals(editor, proposals, doc, file, rootNode,
declaration, currentOffset);
addConvertToClassProposal(proposals, declaration, editor);
addAssertExistsDeclarationProposals(proposals, doc, file, rootNode, declaration);
addSplitDeclarationProposals(proposals, doc, file, rootNode, declaration);
addJoinDeclarationProposal(proposals, rootNode, statement, file);
addParameterProposals(proposals, file, rootNode, declaration, editor);
addArgumentProposals(proposals, doc, file, argument);
addUseAliasProposal(imp, proposals, editor);
addRenameAliasProposal(imp, proposals, editor);
addRemoveAliasProposal(imp, proposals, file, editor);
addRenameVersionProposals(node, proposals, rootNode, editor);
addConvertToIfElseProposal(doc, proposals, file, statement);
addConvertToThenElseProposal(rootNode, doc, proposals, file, statement);
addReverseIfElseProposal(doc, proposals, file, statement, rootNode);
addConvertGetterToMethodProposal(proposals, editor, file, statement);
addConvertMethodToGetterProposal(proposals, editor, file, statement);
addThrowsAnnotationProposal(proposals, statement, rootNode, file, doc);
MoveToNewUnitProposal.add(proposals, editor);
MoveToUnitProposal.add(proposals, editor);
addRefineFormalMembersProposal(proposals, node, rootNode, false);
addConvertToVerbatimProposal(proposals, file, rootNode, node, doc);
addConvertFromVerbatimProposal(proposals, file, rootNode, node, doc);
addConvertToConcatenationProposal(proposals, file, rootNode, node, doc);
addConvertToInterpolationProposal(proposals, file, rootNode, node, doc);
addExpandTypeProposal(editor, statement, file, doc, proposals);
}
}
private void addAnnotationProposals(Collection<ICompletionProposal> proposals,
IProject project, Tree.Declaration decNode, IDocument doc, int offset) {
if (decNode!=null) {
try {
Node in = Nodes.getIdentifyingNode(decNode);
if (in==null ||
doc.getLineOfOffset(in.getStartIndex())!=
doc.getLineOfOffset(offset)) {
return;
}
}
catch (BadLocationException e) {
e.printStackTrace();
}
Declaration d = decNode.getDeclarationModel();
if (d!=null) {
if (decNode instanceof Tree.AttributeDeclaration) {
addMakeVariableDecProposal(proposals, project, decNode);
}
if ((d.isClassOrInterfaceMember()||d.isToplevel()) &&
!d.isShared()) {
addMakeSharedDecProposal(proposals, project, decNode);
}
if (d.isClassOrInterfaceMember() &&
!d.isDefault() && !d.isFormal()) {
if (decNode instanceof Tree.AnyClass) {
addMakeDefaultDecProposal(proposals, project, decNode);
}
else if (decNode instanceof Tree.AnyAttribute) {
addMakeDefaultDecProposal(proposals, project, decNode);
}
else if (decNode instanceof Tree.AnyMethod) {
addMakeDefaultDecProposal(proposals, project, decNode);
}
if (decNode instanceof Tree.ClassDefinition) {
addMakeFormalDecProposal(proposals, project, decNode);
}
else if (decNode instanceof Tree.AttributeDeclaration) {
if (((Tree.AttributeDeclaration) decNode).getSpecifierOrInitializerExpression()==null) {
addMakeFormalDecProposal(proposals, project, decNode);
}
}
else if (decNode instanceof Tree.MethodDeclaration) {
if (((Tree.MethodDeclaration) decNode).getSpecifierExpression()==null) {
addMakeFormalDecProposal(proposals, project, decNode);
}
}
}
}
}
}
private static void addAnonymousFunctionProposals(CeylonEditor editor,
Collection<ICompletionProposal> proposals, IDocument doc,
IFile file, Tree.CompilationUnit cu,
final int currentOffset) {
class FindAnonFunctionVisitor extends Visitor {
Tree.FunctionArgument result;
public void visit(Tree.FunctionArgument that) {
if (currentOffset>=that.getStartIndex() &&
currentOffset<=that.getStopIndex()+1) {
result = that;
}
super.visit(that);
}
}
FindAnonFunctionVisitor v = new FindAnonFunctionVisitor();
v.visit(cu);
Tree.FunctionArgument fun = v.result;
if (fun!=null) {
if (fun.getExpression()!=null) {
addConvertToBlockProposal(doc, proposals, file, fun);
}
if (fun.getBlock()!=null) {
addConvertToSpecifierProposal(doc, proposals, file,
fun.getBlock(), true);
}
}
}
private static void addDeclarationProposals(CeylonEditor editor,
Collection<ICompletionProposal> proposals, IDocument doc,
IFile file, Tree.CompilationUnit cu,
Tree.Declaration decNode, int currentOffset) {
if (decNode==null) return;
if (decNode.getAnnotationList()!=null) {
Integer stopIndex = decNode.getAnnotationList().getStopIndex();
if (stopIndex!=null && currentOffset<=stopIndex+1) {
return;
}
}
if (decNode instanceof Tree.TypedDeclaration) {
Tree.TypedDeclaration tdn = (Tree.TypedDeclaration) decNode;
if (tdn.getType()!=null) {
Integer stopIndex = tdn.getType().getStopIndex();
if (stopIndex!=null && currentOffset<=stopIndex+1) {
return;
}
}
}
if (decNode instanceof Tree.AttributeDeclaration) {
Tree.AttributeDeclaration attDecNode = (Tree.AttributeDeclaration) decNode;
Tree.SpecifierOrInitializerExpression se =
attDecNode.getSpecifierOrInitializerExpression();
if (se instanceof Tree.LazySpecifierExpression) {
addConvertToBlockProposal(doc, proposals, file, decNode);
}
else {
addConvertToGetterProposal(doc, proposals, file, attDecNode);
}
}
if (decNode instanceof Tree.MethodDeclaration) {
Tree.SpecifierOrInitializerExpression se =
((Tree.MethodDeclaration) decNode).getSpecifierExpression();
if (se instanceof Tree.LazySpecifierExpression) {
addConvertToBlockProposal(doc, proposals, file, decNode);
}
}
if (decNode instanceof Tree.AttributeSetterDefinition) {
Tree.SpecifierOrInitializerExpression se =
((Tree.AttributeSetterDefinition) decNode).getSpecifierExpression();
if (se instanceof Tree.LazySpecifierExpression) {
addConvertToBlockProposal(doc, proposals, file, decNode);
}
Tree.Block b = ((Tree.AttributeSetterDefinition) decNode).getBlock();
if (b!=null) {
addConvertToSpecifierProposal(doc, proposals, file, b);
}
}
if (decNode instanceof Tree.AttributeGetterDefinition) {
Tree.Block b = ((Tree.AttributeGetterDefinition) decNode).getBlock();
if (b!=null) {
addConvertToSpecifierProposal(doc, proposals, file, b);
}
}
if (decNode instanceof Tree.MethodDefinition) {
Tree.Block b = ((Tree.MethodDefinition) decNode).getBlock();
if (b!=null) {
addConvertToSpecifierProposal(doc, proposals, file, b);
}
}
}
private void addArgumentProposals(Collection<ICompletionProposal> proposals,
IDocument doc, IFile file, Tree.StatementOrArgument node) {
if (node instanceof Tree.MethodArgument) {
Tree.MethodArgument ma = (Tree.MethodArgument) node;
Tree.SpecifierOrInitializerExpression se =
ma.getSpecifierExpression();
if (se instanceof Tree.LazySpecifierExpression) {
addConvertToBlockProposal(doc, proposals, file, node);
}
Tree.Block b = ma.getBlock();
if (b!=null) {
addConvertToSpecifierProposal(doc, proposals, file, b);
}
}
if (node instanceof Tree.AttributeArgument) {
Tree.AttributeArgument aa = (Tree.AttributeArgument) node;
Tree.SpecifierOrInitializerExpression se =
aa.getSpecifierExpression();
if (se instanceof Tree.LazySpecifierExpression) {
addConvertToBlockProposal(doc, proposals, file, node);
}
Tree.Block b = aa.getBlock();
if (b!=null) {
addConvertToSpecifierProposal(doc, proposals, file, b);
}
}
if (node instanceof Tree.SpecifiedArgument) {
Tree.SpecifiedArgument sa = (Tree.SpecifiedArgument) node;
addFillInArgumentNameProposal(proposals, doc, file, sa);
}
}
private void addCreationProposals(Tree.CompilationUnit cu, final Node node,
ProblemLocation problem, Collection<ICompletionProposal> proposals,
IProject project, TypeChecker tc, IFile file) {
if (node instanceof Tree.MemberOrTypeExpression) {
addCreateProposals(cu, node, proposals, project, file);
}
else if (node instanceof Tree.SimpleType) {
class FindExtendedTypeExpressionVisitor extends Visitor {
Tree.InvocationExpression invocationExpression;
@Override
public void visit(Tree.ExtendedType that) {
super.visit(that);
if (that.getType()==node) {
invocationExpression = that.getInvocationExpression();
}
}
}
FindExtendedTypeExpressionVisitor v = new FindExtendedTypeExpressionVisitor();
v.visit(cu);
if (v.invocationExpression!=null) {
addCreateProposals(cu, v.invocationExpression.getPrimary(),
proposals, project, file);
}
}
//TODO: should we add this stuff back in??
/*else if (node instanceof Tree.BaseType) {
Tree.BaseType bt = (Tree.BaseType) node;
String brokenName = bt.getIdentifier().getText();
String idef = "interface " + brokenName + " {}";
String idesc = "interface '" + brokenName + "'";
String cdef = "class " + brokenName + "() {}";
String cdesc = "class '" + brokenName + "()'";
//addCreateLocalProposals(proposals, project, idef, idesc, INTERFACE, cu, bt);
addCreateLocalProposals(proposals, project, cdef, cdesc, CLASS, cu, bt, null, null);
addCreateToplevelProposals(proposals, project, idef, idesc, INTERFACE, cu, bt, null, null);
addCreateToplevelProposals(proposals, project, cdef, cdesc, CLASS, cu, bt, null, null);
CreateInNewUnitProposal.addCreateToplevelProposal(proposals, idef, idesc,
INTERFACE, file, brokenName, null, null);
CreateInNewUnitProposal.addCreateToplevelProposal(proposals, cdef, cdesc,
CLASS, file, brokenName, null, null);
}*/
if (node instanceof Tree.BaseType) {
Tree.BaseType bt = (Tree.BaseType) node;
String brokenName = bt.getIdentifier().getText();
addCreateTypeParameterProposal(proposals, project, cu, bt, brokenName);
}
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_CeylonCorrectionProcessor.java |
1,386 | @XmlRootElement(name = "BroadleafEnumerationTypeWrapper")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class BroadleafEnumerationTypeWrapper extends BaseWrapper implements APIWrapper<BroadleafEnumerationType> {
@XmlElement
protected String friendlyName;
@XmlElement
protected String type;
@Override
public void wrapDetails(BroadleafEnumerationType model, HttpServletRequest request) {
if (model == null) return;
this.friendlyName = model.getFriendlyType();
this.type = model.getType();
}
@Override
public void wrapSummary(BroadleafEnumerationType model, HttpServletRequest request) {
wrapDetails(model, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_BroadleafEnumerationTypeWrapper.java |
1,378 | public final class CacheEnvironment {
public static final String CONFIG_FILE_PATH_LEGACY = Environment.CACHE_PROVIDER_CONFIG;
public static final String CONFIG_FILE_PATH = "hibernate.cache.hazelcast.configuration_file_path";
public static final String USE_NATIVE_CLIENT = "hibernate.cache.hazelcast.use_native_client";
public static final String NATIVE_CLIENT_ADDRESS = "hibernate.cache.hazelcast.native_client_address";
public static final String NATIVE_CLIENT_GROUP = "hibernate.cache.hazelcast.native_client_group";
public static final String NATIVE_CLIENT_PASSWORD = "hibernate.cache.hazelcast.native_client_password";
public static final String SHUTDOWN_ON_STOP = "hibernate.cache.hazelcast.shutdown_on_session_factory_close";
public static final String LOCK_TIMEOUT = "hibernate.cache.hazelcast.lock_timeout";
public static final String HAZELCAST_INSTANCE_NAME = "hibernate.cache.hazelcast.instance_name";
private static final int MAXIMUM_LOCK_TIMEOUT = 10000; // milliseconds
private final static int DEFAULT_CACHE_TIMEOUT = (3600 * 1000); // one hour in milliseconds
public static final String EXPLICIT_VERSION_CHECK = "hibernate.cache.hazelcast.explicit_version_check";
private CacheEnvironment() {
}
public static String getConfigFilePath(Properties props) {
String configResourcePath = ConfigurationHelper.getString(CacheEnvironment.CONFIG_FILE_PATH_LEGACY, props, null);
if (StringHelper.isEmpty(configResourcePath)) {
configResourcePath = ConfigurationHelper.getString(CacheEnvironment.CONFIG_FILE_PATH, props, null);
}
return configResourcePath;
}
public static String getInstanceName(Properties props) {
return ConfigurationHelper.getString(HAZELCAST_INSTANCE_NAME, props, null);
}
public static boolean isNativeClient(Properties props) {
return ConfigurationHelper.getBoolean(CacheEnvironment.USE_NATIVE_CLIENT, props, false);
}
public static int getDefaultCacheTimeoutInMillis() {
return DEFAULT_CACHE_TIMEOUT;
}
public static int getLockTimeoutInMillis(Properties props) {
int timeout = -1;
try {
timeout = ConfigurationHelper.getInt(LOCK_TIMEOUT, props, -1);
} catch (Exception ignored) {
}
if (timeout < 0) {
timeout = MAXIMUM_LOCK_TIMEOUT;
}
return timeout;
}
public static boolean shutdownOnStop(Properties props, boolean defaultValue) {
return ConfigurationHelper.getBoolean(CacheEnvironment.SHUTDOWN_ON_STOP, props, defaultValue);
}
public static boolean isExplicitVersionCheckEnabled(Properties props) {
return ConfigurationHelper.getBoolean(CacheEnvironment.EXPLICIT_VERSION_CHECK, props, false);
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_main_java_com_hazelcast_hibernate_CacheEnvironment.java |
2,024 | @Service("blUserDetailsService")
public class UserDetailsServiceImpl implements UserDetailsService {
@Resource(name = "blCustomerService")
protected CustomerService customerService;
@Resource(name = "blRoleService")
protected RoleService roleService;
@Override
public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException, DataAccessException {
Customer customer = customerService.readCustomerByUsername(username);
if (customer == null) {
throw new UsernameNotFoundException("The customer was not found");
}
List<GrantedAuthority> grantedAuthorities = createGrantedAuthorities(roleService.findCustomerRolesByCustomerId(customer.getId()));
return new CustomerUserDetails(customer.getId(), username, customer.getPassword(), !customer.isDeactivated(), true, !customer.isPasswordChangeRequired(), true, grantedAuthorities);
}
protected List<GrantedAuthority> createGrantedAuthorities(List<CustomerRole> customerRoles) {
boolean roleUserFound = false;
List<GrantedAuthority> grantedAuthorities = new ArrayList<GrantedAuthority>();
for (CustomerRole role : customerRoles) {
grantedAuthorities.add(new SimpleGrantedAuthority(role.getRoleName()));
if (role.getRoleName().equals("ROLE_USER")) {
roleUserFound = true;
}
}
if (!roleUserFound) {
grantedAuthorities.add(new SimpleGrantedAuthority("ROLE_USER"));
}
return grantedAuthorities;
}
} | 1no label
| core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_service_UserDetailsServiceImpl.java |
920 | public class QuerySourceBuilder implements ToXContent {
private QueryBuilder queryBuilder;
private BytesReference queryBinary;
public QuerySourceBuilder setQuery(QueryBuilder query) {
this.queryBuilder = query;
return this;
}
public QuerySourceBuilder setQuery(BytesReference queryBinary) {
this.queryBinary = queryBinary;
return this;
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (queryBuilder != null) {
builder.field("query");
queryBuilder.toXContent(builder, params);
}
if (queryBinary != null) {
if (XContentFactory.xContentType(queryBinary) == builder.contentType()) {
builder.rawField("query", queryBinary);
} else {
builder.field("query_binary", queryBinary);
}
}
builder.endObject();
return builder;
}
public BytesReference buildAsBytes(XContentType contentType) throws SearchSourceBuilderException {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
toXContent(builder, ToXContent.EMPTY_PARAMS);
return builder.bytes();
} catch (Exception e) {
throw new SearchSourceBuilderException("Failed to build search source", e);
}
}
} | 0true
| src_main_java_org_elasticsearch_action_support_QuerySourceBuilder.java |
3,735 | public class RootObjectMapper extends ObjectMapper {
public static class Defaults {
public static final FormatDateTimeFormatter[] DYNAMIC_DATE_TIME_FORMATTERS =
new FormatDateTimeFormatter[]{
DateFieldMapper.Defaults.DATE_TIME_FORMATTER,
Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd")
};
public static final boolean DATE_DETECTION = true;
public static final boolean NUMERIC_DETECTION = false;
}
public static class Builder extends ObjectMapper.Builder<Builder, RootObjectMapper> {
protected final List<DynamicTemplate> dynamicTemplates = newArrayList();
// we use this to filter out seen date formats, because we might get duplicates during merging
protected Set<String> seenDateFormats = Sets.newHashSet();
protected List<FormatDateTimeFormatter> dynamicDateTimeFormatters = newArrayList();
protected boolean dateDetection = Defaults.DATE_DETECTION;
protected boolean numericDetection = Defaults.NUMERIC_DETECTION;
public Builder(String name) {
super(name);
this.builder = this;
}
public Builder noDynamicDateTimeFormatter() {
this.dynamicDateTimeFormatters = null;
return builder;
}
public Builder dynamicDateTimeFormatter(Iterable<FormatDateTimeFormatter> dateTimeFormatters) {
for (FormatDateTimeFormatter dateTimeFormatter : dateTimeFormatters) {
if (!seenDateFormats.contains(dateTimeFormatter.format())) {
seenDateFormats.add(dateTimeFormatter.format());
this.dynamicDateTimeFormatters.add(dateTimeFormatter);
}
}
return builder;
}
public Builder add(DynamicTemplate dynamicTemplate) {
this.dynamicTemplates.add(dynamicTemplate);
return this;
}
public Builder add(DynamicTemplate... dynamicTemplate) {
for (DynamicTemplate template : dynamicTemplate) {
this.dynamicTemplates.add(template);
}
return this;
}
@Override
protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers) {
assert !nested.isNested();
FormatDateTimeFormatter[] dates = null;
if (dynamicDateTimeFormatters == null) {
dates = new FormatDateTimeFormatter[0];
} else if (dynamicDateTimeFormatters.isEmpty()) {
// add the default one
dates = Defaults.DYNAMIC_DATE_TIME_FORMATTERS;
} else {
dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]);
}
return new RootObjectMapper(name, enabled, dynamic, pathType, mappers,
dates,
dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]),
dateDetection, numericDetection);
}
}
public static class TypeParser extends ObjectMapper.TypeParser {
@Override
protected ObjectMapper.Builder createBuilder(String name) {
return new Builder(name);
}
@Override
protected void processField(ObjectMapper.Builder builder, String fieldName, Object fieldNode) {
if (fieldName.equals("date_formats") || fieldName.equals("dynamic_date_formats")) {
List<FormatDateTimeFormatter> dateTimeFormatters = newArrayList();
if (fieldNode instanceof List) {
for (Object node1 : (List) fieldNode) {
dateTimeFormatters.add(parseDateTimeFormatter(fieldName, node1));
}
} else if ("none".equals(fieldNode.toString())) {
dateTimeFormatters = null;
} else {
dateTimeFormatters.add(parseDateTimeFormatter(fieldName, fieldNode));
}
if (dateTimeFormatters == null) {
((Builder) builder).noDynamicDateTimeFormatter();
} else {
((Builder) builder).dynamicDateTimeFormatter(dateTimeFormatters);
}
} else if (fieldName.equals("dynamic_templates")) {
// "dynamic_templates" : [
// {
// "template_1" : {
// "match" : "*_test",
// "match_mapping_type" : "string",
// "mapping" : { "type" : "string", "store" : "yes" }
// }
// }
// ]
List tmplNodes = (List) fieldNode;
for (Object tmplNode : tmplNodes) {
Map<String, Object> tmpl = (Map<String, Object>) tmplNode;
if (tmpl.size() != 1) {
throw new MapperParsingException("A dynamic template must be defined with a name");
}
Map.Entry<String, Object> entry = tmpl.entrySet().iterator().next();
((Builder) builder).add(DynamicTemplate.parse(entry.getKey(), (Map<String, Object>) entry.getValue()));
}
} else if (fieldName.equals("date_detection")) {
((Builder) builder).dateDetection = nodeBooleanValue(fieldNode);
} else if (fieldName.equals("numeric_detection")) {
((Builder) builder).numericDetection = nodeBooleanValue(fieldNode);
}
}
}
private final FormatDateTimeFormatter[] dynamicDateTimeFormatters;
private final boolean dateDetection;
private final boolean numericDetection;
private volatile DynamicTemplate dynamicTemplates[];
RootObjectMapper(String name, boolean enabled, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers,
FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) {
super(name, name, enabled, Nested.NO, dynamic, pathType, mappers);
this.dynamicTemplates = dynamicTemplates;
this.dynamicDateTimeFormatters = dynamicDateTimeFormatters;
this.dateDetection = dateDetection;
this.numericDetection = numericDetection;
}
public boolean dateDetection() {
return this.dateDetection;
}
public boolean numericDetection() {
return this.numericDetection;
}
public FormatDateTimeFormatter[] dynamicDateTimeFormatters() {
return dynamicDateTimeFormatters;
}
public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType) {
return findTemplateBuilder(context, name, dynamicType, dynamicType);
}
public Mapper.Builder findTemplateBuilder(ParseContext context, String name, String dynamicType, String matchType) {
DynamicTemplate dynamicTemplate = findTemplate(context.path(), name, matchType);
if (dynamicTemplate == null) {
return null;
}
Mapper.TypeParser.ParserContext parserContext = context.docMapperParser().parserContext();
String mappingType = dynamicTemplate.mappingType(dynamicType);
Mapper.TypeParser typeParser = parserContext.typeParser(mappingType);
if (typeParser == null) {
throw new MapperParsingException("failed to find type parsed [" + mappingType + "] for [" + name + "]");
}
return typeParser.parse(name, dynamicTemplate.mappingForName(name, dynamicType), parserContext);
}
public DynamicTemplate findTemplate(ContentPath path, String name, String matchType) {
for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
if (dynamicTemplate.match(path, name, matchType)) {
return dynamicTemplate;
}
}
return null;
}
@Override
protected boolean allowValue() {
return true;
}
@Override
protected void doMerge(ObjectMapper mergeWith, MergeContext mergeContext) {
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
if (!mergeContext.mergeFlags().simulate()) {
// merge them
List<DynamicTemplate> mergedTemplates = Lists.newArrayList(Arrays.asList(this.dynamicTemplates));
for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
boolean replaced = false;
for (int i = 0; i < mergedTemplates.size(); i++) {
if (mergedTemplates.get(i).name().equals(template.name())) {
mergedTemplates.set(i, template);
replaced = true;
}
}
if (!replaced) {
mergedTemplates.add(template);
}
}
this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
}
}
@Override
protected void doXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
if (dynamicDateTimeFormatters != Defaults.DYNAMIC_DATE_TIME_FORMATTERS) {
if (dynamicDateTimeFormatters.length > 0) {
builder.startArray("dynamic_date_formats");
for (FormatDateTimeFormatter dateTimeFormatter : dynamicDateTimeFormatters) {
builder.value(dateTimeFormatter.format());
}
builder.endArray();
}
}
if (dynamicTemplates != null && dynamicTemplates.length > 0) {
builder.startArray("dynamic_templates");
for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
builder.startObject();
builder.field(dynamicTemplate.name());
builder.map(dynamicTemplate.conf());
builder.endObject();
}
builder.endArray();
}
if (dateDetection != Defaults.DATE_DETECTION) {
builder.field("date_detection", dateDetection);
}
if (numericDetection != Defaults.NUMERIC_DETECTION) {
builder.field("numeric_detection", numericDetection);
}
}
} | 1no label
| src_main_java_org_elasticsearch_index_mapper_object_RootObjectMapper.java |
366 | private class RowIterator implements KeyIterator {
private final HTableInterface table;
private final Iterator<Result> rows;
private Result currentRow;
private boolean isClosed;
public RowIterator(HTableInterface table, ResultScanner rows) {
this.table = table;
this.rows = Iterators.filter(rows.iterator(), new Predicate<Result>() {
@Override
public boolean apply(@Nullable Result result) {
if (result == null)
return false;
try {
StaticBuffer id = StaticArrayBuffer.of(result.getRow());
id.getLong(0);
} catch (NumberFormatException e) {
return false;
}
return true;
}
});
}
@Override
public RecordIterator<Entry> getEntries() {
ensureOpen();
return new RecordIterator<Entry>() {
private final Iterator<Map.Entry<byte[], NavigableMap<Long, byte[]>>> kv = currentRow.getMap().get(columnFamilyBytes).entrySet().iterator();
@Override
public boolean hasNext() {
ensureOpen();
return kv.hasNext();
}
@Override
public Entry next() {
ensureOpen();
return StaticArrayEntry.ofBytes(kv.next(), entryGetter);
}
@Override
public void close() {
isClosed = true;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@Override
public boolean hasNext() {
ensureOpen();
return rows.hasNext();
}
@Override
public StaticBuffer next() {
ensureOpen();
currentRow = rows.next();
return StaticArrayBuffer.of(currentRow.getRow());
}
@Override
public void close() {
IOUtils.closeQuietly(table);
isClosed = true;
logger.debug("RowIterator closed table {}", table);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private void ensureOpen() {
if (isClosed)
throw new IllegalStateException("Iterator has been closed.");
}
} | 0true
| titan-hbase-parent_titan-hbase-core_src_main_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseKeyColumnValueStore.java |
417 | trackedList.addChangeListener(new OMultiValueChangeListener<Integer, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<Integer, String> event) {
firedEvents.add(event);
}
}); | 0true
| core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedListTest.java |
31 | public class ClusterClient extends LifecycleAdapter
implements ClusterMonitor, Cluster, AtomicBroadcast, Snapshot, Election, BindingNotifier
{
public static final Setting<Long> clusterJoinTimeout = Settings.setting("ha.cluster_join_timeout", Settings.DURATION, "0s");
public interface Configuration
{
int getServerId();
HostnamePort getAddress();
List<HostnamePort> getInitialHosts();
String getClusterName();
boolean isAllowedToCreateCluster();
// Cluster timeout settings
long defaultTimeout(); // default is 5s
long heartbeatInterval(); // inherits defaultTimeout
long heartbeatTimeout(); // heartbeatInterval * 2 by default
long broadcastTimeout(); // default is 30s
long learnTimeout(); // inherits defaultTimeout
long paxosTimeout(); // inherits defaultTimeout
long phase1Timeout(); // inherits paxosTimeout
long phase2Timeout(); // inherits paxosTimeout
long joinTimeout(); // inherits defaultTimeout
long configurationTimeout(); // inherits defaultTimeout
long leaveTimeout(); // inherits paxosTimeout
long electionTimeout(); // inherits paxosTimeout
long clusterJoinTimeout(); // Whether to timeout the whole process or not
String name(); // Cluster client name, if any
}
public static Configuration adapt( final Config config )
{
return new Configuration()
{
@Override
public int getServerId()
{
return config.get( ClusterSettings.server_id );
}
@Override
public List<HostnamePort> getInitialHosts()
{
return config.get( ClusterSettings.initial_hosts );
}
@Override
public String getClusterName()
{
return config.get( ClusterSettings.cluster_name );
}
@Override
public HostnamePort getAddress()
{
return config.get( ClusterSettings.cluster_server );
}
@Override
public boolean isAllowedToCreateCluster()
{
return config.get( ClusterSettings.allow_init_cluster );
}
// Timeouts
@Override
public long defaultTimeout()
{
return config.get( ClusterSettings.default_timeout );
}
@Override
public long heartbeatTimeout()
{
return config.get( ClusterSettings.heartbeat_timeout );
}
@Override
public long heartbeatInterval()
{
return config.get( ClusterSettings.heartbeat_interval );
}
@Override
public long joinTimeout()
{
return config.get( ClusterSettings.join_timeout );
}
@Override
public long configurationTimeout()
{
return config.get( ClusterSettings.configuration_timeout );
}
@Override
public long leaveTimeout()
{
return config.get( ClusterSettings.leave_timeout );
}
@Override
public long electionTimeout()
{
return config.get( ClusterSettings.election_timeout );
}
@Override
public long broadcastTimeout()
{
return config.get( ClusterSettings.broadcast_timeout );
}
@Override
public long paxosTimeout()
{
return config.get( ClusterSettings.paxos_timeout );
}
@Override
public long phase1Timeout()
{
return config.get( ClusterSettings.phase1_timeout );
}
@Override
public long phase2Timeout()
{
return config.get( ClusterSettings.phase2_timeout );
}
@Override
public long learnTimeout()
{
return config.get( ClusterSettings.learn_timeout );
}
@Override
public long clusterJoinTimeout()
{
return config.get(clusterJoinTimeout);
}
@Override
public String name()
{
return config.get( ClusterSettings.instance_name );
}
};
}
private final LifeSupport life = new LifeSupport();
private final Cluster cluster;
private final AtomicBroadcast broadcast;
private final Heartbeat heartbeat;
private final Snapshot snapshot;
private final Election election;
private final ProtocolServer server;
public ClusterClient( final Configuration config, final Logging logging,
ElectionCredentialsProvider electionCredentialsProvider,
ObjectInputStreamFactory objectInputStreamFactory,
ObjectOutputStreamFactory objectOutputStreamFactory )
{
MessageTimeoutStrategy timeoutStrategy = new MessageTimeoutStrategy(
new FixedTimeoutStrategy( config.defaultTimeout() ) )
.timeout( HeartbeatMessage.sendHeartbeat, config.heartbeatInterval() )
.timeout( HeartbeatMessage.timed_out, config.heartbeatTimeout() )
.timeout( AtomicBroadcastMessage.broadcastTimeout, config.broadcastTimeout() )
.timeout( LearnerMessage.learnTimedout, config.learnTimeout() )
.timeout( ProposerMessage.phase1Timeout, config.phase1Timeout() )
.timeout( ProposerMessage.phase2Timeout, config.phase2Timeout() )
.timeout( ClusterMessage.joiningTimeout, config.joinTimeout() )
.timeout( ClusterMessage.configurationTimeout, config.configurationTimeout() )
.timeout( ClusterMessage.leaveTimedout, config.leaveTimeout() )
.timeout( ElectionMessage.electionTimeout, config.electionTimeout() );
MultiPaxosServerFactory protocolServerFactory = new MultiPaxosServerFactory(
new ClusterConfiguration( config
.getClusterName(), logging.getMessagesLog( ClusterConfiguration.class ) ), logging );
InMemoryAcceptorInstanceStore acceptorInstanceStore = new InMemoryAcceptorInstanceStore();
InternalLoggerFactory.setDefaultFactory( new NettyLoggerFactory(logging) );
NetworkReceiver receiver = new NetworkReceiver( new NetworkReceiver.Configuration()
{
@Override
public HostnamePort clusterServer()
{
return config.getAddress();
}
@Override
public int defaultPort()
{
return 5001;
}
@Override
public String name()
{
return config.name();
}
}, logging );
NetworkSender sender = new NetworkSender(new NetworkSender.Configuration()
{
@Override
public int defaultPort()
{
return 5001;
}
@Override
public int port()
{
return config.getAddress().getPort();
}
}, receiver, logging);
ExecutorLifecycleAdapter stateMachineExecutor = new ExecutorLifecycleAdapter( new Factory<ExecutorService>()
{
@Override
public ExecutorService newInstance()
{
return Executors.newSingleThreadExecutor( new NamedThreadFactory( "State machine" ) );
}
} );
server = protocolServerFactory.newProtocolServer( new InstanceId( config.getServerId() ), timeoutStrategy,
receiver, sender,
acceptorInstanceStore, electionCredentialsProvider, stateMachineExecutor, objectInputStreamFactory, objectOutputStreamFactory );
receiver.addNetworkChannelsListener( new NetworkReceiver.NetworkChannelsListener()
{
volatile private StateTransitionLogger logger = null;
@Override
public void listeningAt( URI me )
{
server.listeningAt( me );
if (logger == null)
{
logger = new StateTransitionLogger( logging );
server.addStateTransitionListener( logger );
}
}
@Override
public void channelOpened( URI to )
{
}
@Override
public void channelClosed( URI to )
{
}
} );
life.add( sender );
life.add( stateMachineExecutor );
life.add( receiver );
// Timeout timer - triggers every 10 ms
life.add( new TimeoutTrigger() );
life.add( new ClusterJoin( new ClusterJoin.Configuration()
{
@Override
public List<HostnamePort> getInitialHosts()
{
return config.getInitialHosts();
}
@Override
public String getClusterName()
{
return config.getClusterName();
}
@Override
public boolean isAllowedToCreateCluster()
{
return config.isAllowedToCreateCluster();
}
@Override
public long getClusterJoinTimeout()
{
return config.clusterJoinTimeout();
}
}, server, logging ) );
cluster = server.newClient( Cluster.class );
broadcast = server.newClient( AtomicBroadcast.class );
heartbeat = server.newClient( Heartbeat.class );
snapshot = server.newClient( Snapshot.class );
election = server.newClient( Election.class );
}
@Override
public void init() throws Throwable
{
life.init();
}
@Override
public void start() throws Throwable
{
life.start();
}
@Override
public void stop() throws Throwable
{
life.stop();
}
@Override
public void broadcast( Payload payload )
{
broadcast.broadcast( payload );
}
@Override
public void addAtomicBroadcastListener( AtomicBroadcastListener listener )
{
broadcast.addAtomicBroadcastListener( listener );
}
@Override
public void removeAtomicBroadcastListener( AtomicBroadcastListener listener )
{
broadcast.removeAtomicBroadcastListener( listener );
}
@Override
public void create( String clusterName )
{
cluster.create( clusterName );
}
@Override
public Future<ClusterConfiguration> join( String clusterName, URI... otherServerUrls )
{
return cluster.join( clusterName, otherServerUrls );
}
@Override
public void leave()
{
cluster.leave();
}
@Override
public void addClusterListener( ClusterListener listener )
{
cluster.addClusterListener( listener );
}
@Override
public void removeClusterListener( ClusterListener listener )
{
cluster.removeClusterListener( listener );
}
@Override
public void addHeartbeatListener( HeartbeatListener listener )
{
heartbeat.addHeartbeatListener( listener );
}
@Override
public void removeHeartbeatListener( HeartbeatListener listener )
{
heartbeat.removeHeartbeatListener( listener );
}
@Override
public void demote( InstanceId node )
{
election.demote( node );
}
@Override
public void performRoleElections()
{
election.performRoleElections();
}
@Override
public void promote( InstanceId node, String role )
{
election.promote( node, role );
}
@Override
public void setSnapshotProvider( SnapshotProvider snapshotProvider )
{
snapshot.setSnapshotProvider( snapshotProvider );
}
@Override
public void refreshSnapshot()
{
snapshot.refreshSnapshot();
}
public void addBindingListener( BindingListener bindingListener )
{
server.addBindingListener( bindingListener );
}
@Override
public void removeBindingListener( BindingListener listener )
{
server.removeBindingListener( listener );
}
public void dumpDiagnostics( StringBuilder appendTo )
{
StateMachines stateMachines = server.getStateMachines();
for ( StateMachine stateMachine : stateMachines.getStateMachines() )
{
appendTo.append( " " ).append( stateMachine.getMessageType().getSimpleName() ).append( ":" )
.append( stateMachine.getState().toString() ).append( "\n" );
}
appendTo.append( "Current timeouts:\n" );
for ( Map.Entry<Object, Timeouts.Timeout> objectTimeoutEntry : stateMachines.getTimeouts().getTimeouts()
.entrySet() )
{
appendTo.append( objectTimeoutEntry.getKey().toString() ).append( ":" )
.append( objectTimeoutEntry.getValue().getTimeoutMessage().toString() );
}
}
public InstanceId getServerId()
{
return server.getServerId();
}
public URI getClusterServer()
{
return server.boundAt();
}
public class TimeoutTrigger implements Lifecycle
{
private ScheduledExecutorService scheduler;
private ScheduledFuture<?> tickFuture;
@Override
public void init() throws Throwable
{
server.getTimeouts().tick( System.currentTimeMillis() );
}
@Override
public void start() throws Throwable
{
scheduler = Executors.newSingleThreadScheduledExecutor(
new DaemonThreadFactory( "timeout-clusterClient" ) );
tickFuture = scheduler.scheduleWithFixedDelay( new Runnable()
{
@Override
public void run()
{
long now = System.currentTimeMillis();
server.getTimeouts().tick( now );
}
}, 0, 10, TimeUnit.MILLISECONDS );
}
@Override
public void stop() throws Throwable
{
tickFuture.cancel( true );
scheduler.shutdownNow();
}
@Override
public void shutdown() throws Throwable
{
}
}
} | 1no label
| enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java |
777 | public interface OObjectIteratorClassInterface<T> extends Iterator<T>, Iterable<T> {
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_iterator_object_OObjectIteratorClassInterface.java |
64 | soa.visit(new Visitor() {
@Override
public void visit(Tree.SimpleType that) {
super.visit(that);
determineSatisfiedTypesTypeParams(typeDec, that, stTypeParams);
}
@Override
public void visit(Tree.StaticMemberOrTypeExpression that) {
super.visit(that);
determineSatisfiedTypesTypeParams(typeDec, that, stTypeParams);
}
}); | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_AddSatisfiesProposal.java |
117 | private static class VerificationLogHook extends LogHookAdapter<LogEntry>
{
private final Set<Xid> startXids = new HashSet<>();
@Override
public boolean accept( LogEntry item )
{
if ( item instanceof LogEntry.Start )
assertTrue( startXids.add( ((LogEntry.Start) item).getXid() ) );
return true;
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestInjectMultipleStartEntries.java |
1,116 | @Deprecated
public interface LegacyCartService extends LegacyOrderService {
Order addAllItemsToCartFromNamedOrder(Order namedOrder) throws PricingException;
Order addAllItemsToCartFromNamedOrder(Order namedOrder, boolean priceOrder) throws PricingException;
OrderItem moveItemToCartFromNamedOrder(Order order, OrderItem orderItem) throws PricingException;
OrderItem moveItemToCartFromNamedOrder(Order order, OrderItem orderItem, boolean priceOrder) throws PricingException;
OrderItem moveItemToCartFromNamedOrder(Long customerId, String orderName, Long orderItemId, Integer quantity) throws PricingException;
OrderItem moveItemToCartFromNamedOrder(Long customerId, String orderName, Long orderItemId, Integer quantity, boolean priceOrder) throws PricingException;
Order moveAllItemsToCartFromNamedOrder(Order namedOrder) throws PricingException;
Order moveAllItemsToCartFromNamedOrder(Order namedOrder, boolean priceOrder) throws PricingException;
/**
* Merge the anonymous cart with the customer's cart taking into
* consideration sku activation
* @param customer the customer whose cart is to be merged
* @param anonymousCartId the anonymous cart id
* @return the response containing the cart, any items added to the cart,
* and any items removed from the cart
*/
public MergeCartResponse mergeCart(Customer customer, Order anonymousCart, boolean priceOrder) throws PricingException;
public MergeCartResponse mergeCart(Customer customer, Order anonymousCart) throws PricingException;
/**
* Reconstruct the cart using previous stored state taking into
* consideration sku activation
* @param customer the customer whose cart is to be reconstructed
* @return the response containing the cart and any items removed from the
* cart
*/
public ReconstructCartResponse reconstructCart(Customer customer, boolean priceOrder) throws PricingException;
public ReconstructCartResponse reconstructCart(Customer customer) throws PricingException;
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_legacy_LegacyCartService.java |
1,358 | public class ThirdPartyInteractionLatencySimulationActivity extends BaseActivity<ProcessContext> {
private long waitTime = 1000L;
@Override
public ProcessContext execute(ProcessContext context) throws Exception {
try {
Thread.sleep(waitTime);
} catch (Throwable e) {
//do nothing
}
return context;
}
public long getWaitTime() {
return waitTime;
}
public void setWaitTime(long waitTime) {
this.waitTime = waitTime;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_util_ThirdPartyInteractionLatencySimulationActivity.java |
1,451 | public class TimestampsRegionCache extends LocalRegionCache implements RegionCache {
public TimestampsRegionCache(final String name, final HazelcastInstance hazelcastInstance) {
super(name, hazelcastInstance, null);
}
@Override
public boolean put(Object key, Object value, Object currentVersion) {
return update(key, value, currentVersion, null, null);
}
@Override
protected MessageListener<Object> createMessageListener() {
return new MessageListener<Object>() {
public void onMessage(final Message<Object> message) {
final Timestamp ts = (Timestamp) message.getMessageObject();
final Object key = ts.getKey();
for (;;) {
final Value value = cache.get(key);
final Long current = value != null ? (Long) value.getValue() : null;
if (current != null) {
if (ts.getTimestamp() > current) {
if (cache.replace(key, value, new Value(value.getVersion(),
ts.getTimestamp(), Clock.currentTimeMillis()))) {
return;
}
} else {
return;
}
} else {
if (cache.putIfAbsent(key, new Value(null, ts.getTimestamp(),
Clock.currentTimeMillis())) == null) {
return;
}
}
}
}
};
}
@Override
protected Object createMessage(final Object key, final Object value, final Object currentVersion) {
return new Timestamp(key, (Long) value);
}
final void cleanup() {
}
} | 1no label
| hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_local_TimestampsRegionCache.java |
2,990 | public class NoneFilterCache extends AbstractIndexComponent implements FilterCache {
@Inject
public NoneFilterCache(Index index, @IndexSettings Settings indexSettings) {
super(index, indexSettings);
logger.debug("Using no filter cache");
}
@Override
public void setIndexService(IndexService indexService) {
// nothing to do here...
}
@Override
public String type() {
return "none";
}
@Override
public void close() {
// nothing to do here
}
@Override
public Filter cache(Filter filterToCache) {
return filterToCache;
}
@Override
public void clear(String reason) {
// nothing to do here
}
@Override
public void clear(String reason, String[] keys) {
// nothing to do there
}
@Override
public void clear(Object reader) {
// nothing to do here
}
} | 0true
| src_main_java_org_elasticsearch_index_cache_filter_none_NoneFilterCache.java |
44 | @Entity
public class PersistentBufferObject {
@PrimaryKey
private PersistentBufferKey key;
private Map<String, String> data;
public PersistentBufferKey getKey() {
return key;
}
public void setKey(PersistentBufferKey key) {
this.key = key;
}
public Map<String, String> getData() {
return data;
}
public void setData(Map<String, String> data) {
this.data = data;
}
} | 0true
| timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_PersistentBufferObject.java |
208 | protected class DeletePreviousSubWordAction extends PreviousSubWordAction implements IUpdate {
/**
* Creates a new delete previous sub-word action.
*/
public DeletePreviousSubWordAction() {
super(ST.DELETE_WORD_PREVIOUS);
}
@Override
protected void setCaretPosition(int position) {
if (!validateEditorInputState())
return;
final int length;
final ISourceViewer viewer= getSourceViewer();
StyledText text= viewer.getTextWidget();
Point widgetSelection= text.getSelection();
if (isBlockSelectionModeEnabled() && widgetSelection.y != widgetSelection.x) {
final int caret= text.getCaretOffset();
final int offset= modelOffset2WidgetOffset(viewer, position);
if (caret == widgetSelection.x)
text.setSelectionRange(widgetSelection.y, offset - widgetSelection.y);
else
text.setSelectionRange(widgetSelection.x, offset - widgetSelection.x);
text.invokeAction(ST.DELETE_PREVIOUS);
} else {
Point selection= viewer.getSelectedRange();
if (selection.y != 0) {
position= selection.x;
length= selection.y;
} else {
length= widgetOffset2ModelOffset(viewer, text.getCaretOffset()) - position;
}
try {
viewer.getDocument().replace(position, length, ""); //$NON-NLS-1$
} catch (BadLocationException exception) {
// Should not happen
}
}
}
public void update() {
setEnabled(isEditorInputModifiable());
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_CeylonEditor.java |
267 | public class FailingCallable implements Callable<String>, DataSerializable {
public String call() throws Exception {
throw new IllegalStateException();
}
public void writeData(ObjectDataOutput out) throws IOException {
}
public void readData(ObjectDataInput in) throws IOException {
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_executor_tasks_FailingCallable.java |
1,085 | public class NearCacheConfigReadOnly extends NearCacheConfig {
public NearCacheConfigReadOnly(NearCacheConfig config) {
super(config);
}
public void setName(String name) {
throw new UnsupportedOperationException("This config is read-only");
}
public NearCacheConfig setTimeToLiveSeconds(int timeToLiveSeconds) {
throw new UnsupportedOperationException("This config is read-only");
}
public NearCacheConfig setMaxSize(int maxSize) {
throw new UnsupportedOperationException("This config is read-only");
}
public NearCacheConfig setEvictionPolicy(String evictionPolicy) {
throw new UnsupportedOperationException("This config is read-only");
}
public NearCacheConfig setMaxIdleSeconds(int maxIdleSeconds) {
throw new UnsupportedOperationException("This config is read-only");
}
public NearCacheConfig setInvalidateOnChange(boolean invalidateOnChange) {
throw new UnsupportedOperationException("This config is read-only");
}
public NearCacheConfig setInMemoryFormat(InMemoryFormat inMemoryFormat) {
throw new UnsupportedOperationException("This config is read-only");
}
public NearCacheConfig setInMemoryFormat(String inMemoryFormat) {
throw new UnsupportedOperationException("This config is read-only");
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_NearCacheConfigReadOnly.java |
987 | private class TransportHandler extends BaseTransportRequestHandler<Request> {
@Override
public Request newInstance() {
return newRequestInstance();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
// no need for a threaded listener, since we just send a response
request.listenerThreaded(false);
execute(request, new ActionListener<Response>() {
@Override
public void onResponse(Response result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [" + transportAction + "] and request [" + request + "]", e1);
}
}
});
}
} | 0true
| src_main_java_org_elasticsearch_action_support_replication_TransportIndicesReplicationOperationAction.java |
1,602 | public class SimpleHashFunction implements HashFunction {
@Override
public int hash(String routing) {
return routing.hashCode();
}
@Override
public int hash(String type, String id) {
return type.hashCode() + 31 * id.hashCode();
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_operation_hash_simple_SimpleHashFunction.java |
3,613 | public static class Defaults extends AbstractFieldMapper.Defaults {
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.freeze();
}
// NOTE, when adding defaults here, make sure you add them in the builder
public static final String NULL_VALUE = null;
public static final int POSITION_OFFSET_GAP = 0;
public static final int IGNORE_ABOVE = -1;
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_StringFieldMapper.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.