Unnamed: 0
int64
0
6.45k
func
stringlengths
29
253k
target
class label
2 classes
project
stringlengths
36
167
2,257
public class NettyStaticSetup { private static EsThreadNameDeterminer ES_THREAD_NAME_DETERMINER = new EsThreadNameDeterminer(); public static class EsThreadNameDeterminer implements ThreadNameDeterminer { @Override public String determineThreadName(String currentThreadName, String proposedThreadName) throws Exception { // we control the thread name with a context, so use both return currentThreadName + "{" + proposedThreadName + "}"; } } static { InternalLoggerFactory.setDefaultFactory(new NettyInternalESLoggerFactory() { @Override public InternalLogger newInstance(String name) { return super.newInstance(name.replace("org.jboss.netty.", "netty.").replace("org.jboss.netty.", "netty.")); } }); ThreadRenamingRunnable.setThreadNameDeterminer(ES_THREAD_NAME_DETERMINER); } public static void setup() { } }
0true
src_main_java_org_elasticsearch_common_netty_NettyStaticSetup.java
3,468
public class ShardIndexingModule extends AbstractModule { @Override protected void configure() { bind(ShardIndexingService.class).asEagerSingleton(); bind(ShardSlowLogIndexingService.class).asEagerSingleton(); } }
0true
src_main_java_org_elasticsearch_index_indexing_ShardIndexingModule.java
598
public abstract class AbstractBroadleafWebRequestProcessor implements BroadleafWebRequestProcessor { public void postProcess(WebRequest request) { // nada } }
0true
common_src_main_java_org_broadleafcommerce_common_web_AbstractBroadleafWebRequestProcessor.java
2,394
private static class LongArrayWrapper extends AbstractArray implements LongArray { private final long[] array; LongArrayWrapper(long[] array, PageCacheRecycler recycler, boolean clearOnResize) { super(recycler, clearOnResize); this.array = array; } @Override public long size() { return array.length; } @Override public long get(long index) { assert indexIsInt(index); return array[(int) index]; } @Override public long set(long index, long value) { assert indexIsInt(index); final long ret = array[(int) index]; array[(int) index] = value; return ret; } @Override public long increment(long index, long inc) { assert indexIsInt(index); return array[(int) index] += inc; } @Override public void fill(long fromIndex, long toIndex, long value) { assert indexIsInt(fromIndex); assert indexIsInt(toIndex); Arrays.fill(array, (int) fromIndex, (int) toIndex, value); } }
0true
src_main_java_org_elasticsearch_common_util_BigArrays.java
203
public interface ScanBuffer { public boolean hasRemaining(); public byte getByte(); public boolean getBoolean(); public short getShort(); public int getInt(); public long getLong(); public char getChar(); public float getFloat(); public double getDouble(); public byte[] getBytes(int length); public short[] getShorts(int length); public int[] getInts(int length); public long[] getLongs(int length); public char[] getChars(int length); public float[] getFloats(int length); public double[] getDoubles(int length); }
0true
titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_ScanBuffer.java
435
public class ClientTopicProxy<E> extends ClientProxy implements ITopic<E> { private final String name; private volatile Data key; public ClientTopicProxy(String instanceName, String serviceName, String objectId) { super(instanceName, serviceName, objectId); this.name = objectId; } @Override public void publish(E message) { SerializationService serializationService = getContext().getSerializationService(); final Data data = serializationService.toData(message); PublishRequest request = new PublishRequest(name, data); invoke(request); } @Override public String addMessageListener(final MessageListener<E> listener) { AddMessageListenerRequest request = new AddMessageListenerRequest(name); EventHandler<PortableMessage> handler = new EventHandler<PortableMessage>() { @Override public void handle(PortableMessage event) { SerializationService serializationService = getContext().getSerializationService(); ClientClusterService clusterService = getContext().getClusterService(); E messageObject = serializationService.toObject(event.getMessage()); Member member = clusterService.getMember(event.getUuid()); Message<E> message = new Message<E>(name, messageObject, event.getPublishTime(), member); listener.onMessage(message); } @Override public void onListenerRegister() { } }; return listen(request, getKey(), handler); } @Override public boolean removeMessageListener(String registrationId) { final RemoveMessageListenerRequest request = new RemoveMessageListenerRequest(name, registrationId); return stopListening(request, registrationId); } @Override public LocalTopicStats getLocalTopicStats() { throw new UnsupportedOperationException("Locality is ambiguous for client!!!"); } @Override protected void onDestroy() { } private Data getKey() { if (key == null) { key = getContext().getSerializationService().toData(name); } return key; } @Override protected <T> T invoke(ClientRequest req) { return super.invoke(req, getKey()); } @Override public String toString() { return "ITopic{" + "name='" + getName() + '\'' + '}'; } }
1no label
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientTopicProxy.java
810
public abstract class ReadRequest extends PartitionClientRequest implements Portable, SecureRequest { protected String name; public ReadRequest() { } public ReadRequest(String name) { this.name = name; } @Override protected int getPartition() { ClientEngine clientEngine = getClientEngine(); Data key = clientEngine.getSerializationService().toData(name); return clientEngine.getPartitionService().getPartitionId(key); } @Override public String getServiceName() { return AtomicLongService.SERVICE_NAME; } @Override public int getFactoryId() { return AtomicLongPortableHook.F_ID; } @Override public void write(PortableWriter writer) throws IOException { writer.writeUTF("n", name); } @Override public void read(PortableReader reader) throws IOException { name = reader.readUTF("n"); } @Override public Permission getRequiredPermission() { return new AtomicLongPermission(name, ActionConstants.ACTION_READ); } }
0true
hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_client_ReadRequest.java
14
ScheduledFuture future = exe.scheduleWithFixedDelay(new Runnable() { AtomicInteger atomicInt = new AtomicInteger(0); @Override public void run() { try { for (int i=0;i<10;i++) { exe.submit(new Runnable() { private final int number = atomicInt.incrementAndGet(); @Override public void run() { try { Thread.sleep(150); } catch (InterruptedException e) { e.printStackTrace(); } System.out.println(number); } }); System.out.println("Submitted: "+i); // doSomethingExpensive(20); } } catch (Exception e) { e.printStackTrace(); } } },0,1, TimeUnit.SECONDS);
0true
titan-test_src_main_java_com_thinkaurelius_titan_TestBed.java
1,345
threadPool.generic().execute(new Runnable() { @Override public void run() { innerNodeIndexStoreDeleted(index, nodeId); } });
0true
src_main_java_org_elasticsearch_cluster_action_index_NodeIndexDeletedAction.java
1,149
public class OSQLMethodLength extends OAbstractSQLMethod { public static final String NAME = "length"; public OSQLMethodLength() { super(NAME); } @Override public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) { ioResult = ioResult != null ? ioResult.toString().length() : 0; return ioResult; } }
0true
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodLength.java
129
@Repository("blStructuredContentDao") public class StructuredContentDaoImpl implements StructuredContentDao { private static SandBox DUMMY_SANDBOX = new SandBoxImpl(); { DUMMY_SANDBOX.setId(-1l); } @PersistenceContext(unitName = "blPU") protected EntityManager em; @Resource(name="blEntityConfiguration") protected EntityConfiguration entityConfiguration; @Override public StructuredContent findStructuredContentById(Long contentId) { return em.find(StructuredContentImpl.class, contentId); } @Override public StructuredContentType findStructuredContentTypeById(Long contentTypeId) { return em.find(StructuredContentTypeImpl.class, contentTypeId); } @Override public List<StructuredContentType> retrieveAllStructuredContentTypes() { Query query = em.createNamedQuery("BC_READ_ALL_STRUCTURED_CONTENT_TYPES"); return query.getResultList(); } @Override public List<StructuredContent> findAllContentItems() { CriteriaBuilder builder = em.getCriteriaBuilder(); CriteriaQuery<StructuredContent> criteria = builder.createQuery(StructuredContent.class); Root<StructuredContentImpl> sc = criteria.from(StructuredContentImpl.class); criteria.select(sc); try { return em.createQuery(criteria).getResultList(); } catch (NoResultException e) { return new ArrayList<StructuredContent>(); } } @Override public Map<String, StructuredContentField> readFieldsForStructuredContentItem(StructuredContent sc) { Query query = em.createNamedQuery("BC_READ_CONTENT_FIELDS_BY_CONTENT_ID"); query.setParameter("structuredContent", sc); query.setHint(QueryHints.HINT_CACHEABLE, true); List<StructuredContentField> fields = query.getResultList(); Map<String, StructuredContentField> fieldMap = new HashMap<String, StructuredContentField>(); for (StructuredContentField scField : fields) { fieldMap.put(scField.getFieldKey(), scField); } return fieldMap; } @Override public StructuredContent addOrUpdateContentItem(StructuredContent content) { return em.merge(content); } @Override public void delete(StructuredContent content) { if (! em.contains(content)) { content = findStructuredContentById(content.getId()); } em.remove(content); } @Override public StructuredContentType saveStructuredContentType(StructuredContentType type) { return em.merge(type); } @Override public List<StructuredContent> findActiveStructuredContentByType(SandBox sandBox, StructuredContentType type, Locale locale) { return findActiveStructuredContentByType(sandBox, type, locale, null); } @Override public List<StructuredContent> findActiveStructuredContentByType(SandBox sandBox, StructuredContentType type, Locale fullLocale, Locale languageOnlyLocale) { String queryName = null; if (languageOnlyLocale == null) { languageOnlyLocale = fullLocale; } if (sandBox == null) { queryName = "BC_ACTIVE_STRUCTURED_CONTENT_BY_TYPE"; } else if (SandBoxType.PRODUCTION.equals(sandBox)) { queryName = "BC_ACTIVE_STRUCTURED_CONTENT_BY_TYPE_AND_PRODUCTION_SANDBOX"; } else { queryName = "BC_ACTIVE_STRUCTURED_CONTENT_BY_TYPE_AND_USER_SANDBOX"; } Query query = em.createNamedQuery(queryName); query.setParameter("contentType", type); query.setParameter("fullLocale", fullLocale); query.setParameter("languageOnlyLocale", languageOnlyLocale); if (sandBox != null) { query.setParameter("sandboxId", sandBox.getId()); } query.setHint(QueryHints.HINT_CACHEABLE, true); return query.getResultList(); } @Override public List<StructuredContent> findActiveStructuredContentByNameAndType(SandBox sandBox, StructuredContentType type, String name, Locale locale) { return findActiveStructuredContentByNameAndType(sandBox, type, name, locale, null); } @Override public List<StructuredContent> findActiveStructuredContentByNameAndType(SandBox sandBox, StructuredContentType type, String name, Locale fullLocale, Locale languageOnlyLocale) { String queryName = null; if (languageOnlyLocale == null) { languageOnlyLocale = fullLocale; } final Query query; if (sandBox == null) { query = em.createNamedQuery("BC_ACTIVE_STRUCTURED_CONTENT_BY_TYPE_AND_NAME"); } else if (SandBoxType.PRODUCTION.equals(sandBox)) { query = em.createNamedQuery("BC_ACTIVE_STRUCTURED_CONTENT_BY_TYPE_AND_NAME_AND_PRODUCTION_SANDBOX"); query.setParameter("sandbox", sandBox); } else { query = em.createNamedQuery("BC_ACTIVE_STRUCTURED_CONTENT_BY_TYPE_AND_NAME_AND_USER_SANDBOX"); query.setParameter("sandboxId", sandBox.getId()); } query.setParameter("contentType", type); query.setParameter("contentName", name); query.setParameter("fullLocale", fullLocale); query.setParameter("languageOnlyLocale", languageOnlyLocale); query.setHint(QueryHints.HINT_CACHEABLE, true); return query.getResultList(); } @Override public List<StructuredContent> findActiveStructuredContentByName(SandBox sandBox, String name, Locale locale) { return findActiveStructuredContentByName(sandBox, name, locale, null); } @Override public List<StructuredContent> findActiveStructuredContentByName(SandBox sandBox, String name, Locale fullLocale, Locale languageOnlyLocale) { String queryName = null; if (languageOnlyLocale == null) { languageOnlyLocale = fullLocale; } if (sandBox == null) { queryName = "BC_ACTIVE_STRUCTURED_CONTENT_BY_NAME"; } else if (SandBoxType.PRODUCTION.equals(sandBox)) { queryName = "BC_ACTIVE_STRUCTURED_CONTENT_BY_NAME_AND_PRODUCTION_SANDBOX"; } else { queryName = "BC_ACTIVE_STRUCTURED_CONTENT_BY_NAME_AND_USER_SANDBOX"; } Query query = em.createNamedQuery(queryName); query.setParameter("contentName", name); query.setParameter("fullLocale", fullLocale); query.setParameter("languageOnlyLocale", languageOnlyLocale); if (sandBox != null) { query.setParameter("sandbox", sandBox); } query.setHint(QueryHints.HINT_CACHEABLE, true); return query.getResultList(); } @Override public StructuredContentType findStructuredContentTypeByName(String name) { Query query = em.createNamedQuery("BC_READ_STRUCTURED_CONTENT_TYPE_BY_NAME"); query.setParameter("name",name); query.setHint(QueryHints.HINT_CACHEABLE, true); List<StructuredContentType> results = query.getResultList(); if (results.size() > 0) { return results.get(0); } else { return null; } } @Override public void detach(StructuredContent sc) { em.detach(sc); } }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_dao_StructuredContentDaoImpl.java
766
public class SetContainer extends CollectionContainer { private static final int INITIAL_CAPACITY = 1000; private Set<CollectionItem> itemSet; private SetConfig config; public SetContainer() { } public SetContainer(String name, NodeEngine nodeEngine) { super(name, nodeEngine); } @Override protected SetConfig getConfig() { if (config == null) { config = nodeEngine.getConfig().findSetConfig(name); } return config; } @Override protected Map<Long, Data> addAll(List<Data> valueList) { final int size = valueList.size(); final Map<Long, Data> map = new HashMap<Long, Data>(size); List<CollectionItem> list = new ArrayList<CollectionItem>(size); for (Data value : valueList) { final long itemId = nextId(); final CollectionItem item = new CollectionItem(itemId, value); if (!getCollection().contains(item)) { list.add(item); map.put(itemId, value); } } getCollection().addAll(list); return map; } @Override public Set<CollectionItem> getCollection() { if (itemSet == null) { if (itemMap != null && !itemMap.isEmpty()) { itemSet = new HashSet<CollectionItem>(itemMap.values()); itemMap.clear(); } else { itemSet = new HashSet<CollectionItem>(INITIAL_CAPACITY); } itemMap = null; } return itemSet; } @Override protected Map<Long, CollectionItem> getMap() { if (itemMap == null) { if (itemSet != null && !itemSet.isEmpty()) { itemMap = new HashMap<Long, CollectionItem>(itemSet.size()); for (CollectionItem item : itemSet) { itemMap.put(item.getItemId(), item); } itemSet.clear(); } else { itemMap = new HashMap<Long, CollectionItem>(INITIAL_CAPACITY); } itemSet = null; } return itemMap; } @Override protected void onDestroy() { if (itemSet != null) { itemSet.clear(); } } }
0true
hazelcast_src_main_java_com_hazelcast_collection_set_SetContainer.java
1,349
Future future = es.submit("default", new Callable<String>() { @Override public String call() { try { return "success"; } finally { latch1.countDown(); } } });
0true
hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java
2,120
public class JdkESLogger extends AbstractESLogger { private final Logger logger; private final String name; public JdkESLogger(String prefix, String name, Logger logger) { super(prefix); this.logger = logger; this.name = name; } @Override public void setLevel(String level) { if (level == null) { logger.setLevel(null); } else if ("error".equalsIgnoreCase(level)) { logger.setLevel(Level.SEVERE); } else if ("warn".equalsIgnoreCase(level)) { logger.setLevel(Level.WARNING); } else if ("info".equalsIgnoreCase(level)) { logger.setLevel(Level.INFO); } else if ("debug".equalsIgnoreCase(level)) { logger.setLevel(Level.FINE); } else if ("trace".equalsIgnoreCase(level)) { logger.setLevel(Level.FINE); } } @Override public String getLevel() { if (logger.getLevel() == null) { return null; } return logger.getLevel().toString(); } @Override public String getName() { return logger.getName(); } @Override public boolean isTraceEnabled() { return logger.isLoggable(Level.FINEST); } @Override public boolean isDebugEnabled() { return logger.isLoggable(Level.FINE); } @Override public boolean isInfoEnabled() { return logger.isLoggable(Level.INFO); } @Override public boolean isWarnEnabled() { return logger.isLoggable(Level.WARNING); } @Override public boolean isErrorEnabled() { return logger.isLoggable(Level.SEVERE); } @Override protected void internalTrace(String msg) { logger.logp(Level.FINEST, name, null, msg); } @Override protected void internalTrace(String msg, Throwable cause) { logger.logp(Level.FINEST, name, null, msg, cause); } @Override protected void internalDebug(String msg) { logger.logp(Level.FINE, name, null, msg); } @Override protected void internalDebug(String msg, Throwable cause) { logger.logp(Level.FINE, name, null, msg, cause); } @Override protected void internalInfo(String msg) { logger.logp(Level.INFO, name, null, msg); } @Override protected void internalInfo(String msg, Throwable cause) { logger.logp(Level.INFO, name, null, msg, cause); } @Override protected void internalWarn(String msg) { logger.logp(Level.WARNING, name, null, msg); } @Override protected void internalWarn(String msg, Throwable cause) { logger.logp(Level.WARNING, name, null, msg, cause); } @Override protected void internalError(String msg) { logger.logp(Level.SEVERE, name, null, msg); } @Override protected void internalError(String msg, Throwable cause) { logger.logp(Level.SEVERE, name, null, msg, cause); } }
0true
src_main_java_org_elasticsearch_common_logging_jdk_JdkESLogger.java
1,953
public class InstanceBindingImpl<T> extends BindingImpl<T> implements InstanceBinding<T> { final T instance; final Provider<T> provider; final ImmutableSet<InjectionPoint> injectionPoints; public InstanceBindingImpl(Injector injector, Key<T> key, Object source, InternalFactory<? extends T> internalFactory, Set<InjectionPoint> injectionPoints, T instance) { super(injector, key, source, internalFactory, Scoping.UNSCOPED); this.injectionPoints = ImmutableSet.copyOf(injectionPoints); this.instance = instance; this.provider = Providers.of(instance); } public InstanceBindingImpl(Object source, Key<T> key, Scoping scoping, Set<InjectionPoint> injectionPoints, T instance) { super(source, key, scoping); this.injectionPoints = ImmutableSet.copyOf(injectionPoints); this.instance = instance; this.provider = Providers.of(instance); } @Override public Provider<T> getProvider() { return this.provider; } public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) { return visitor.visit(this); } public T getInstance() { return instance; } public Set<InjectionPoint> getInjectionPoints() { return injectionPoints; } public Set<Dependency<?>> getDependencies() { return instance instanceof HasDependencies ? ImmutableSet.copyOf(((HasDependencies) instance).getDependencies()) : Dependency.forInjectionPoints(injectionPoints); } public BindingImpl<T> withScoping(Scoping scoping) { return new InstanceBindingImpl<T>(getSource(), getKey(), scoping, injectionPoints, instance); } public BindingImpl<T> withKey(Key<T> key) { return new InstanceBindingImpl<T>(getSource(), key, getScoping(), injectionPoints, instance); } public void applyTo(Binder binder) { // instance bindings aren't scoped binder.withSource(getSource()).bind(getKey()).toInstance(instance); } @Override public String toString() { return new ToStringBuilder(InstanceBinding.class) .add("key", getKey()) .add("source", getSource()) .add("instance", instance) .toString(); } }
0true
src_main_java_org_elasticsearch_common_inject_internal_InstanceBindingImpl.java
2,644
zenPingA.setNodesProvider(new DiscoveryNodesProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build(); } @Override public NodeService nodeService() { return null; } });
0true
src_test_java_org_elasticsearch_discovery_zen_ping_multicast_MulticastZenPingTests.java
5,057
public class SearchContextMissingException extends ElasticsearchException { private final long id; public SearchContextMissingException(long id) { super("No search context found for id [" + id + "]"); this.id = id; } public long id() { return this.id; } }
1no label
src_main_java_org_elasticsearch_search_SearchContextMissingException.java
1,728
private final class MapLoadAllTask implements Runnable { private Map<Data, Object> keys; private AtomicInteger checkIfMapLoaded; private MapLoadAllTask(Map<Data, Object> keys, AtomicInteger checkIfMapLoaded) { this.keys = keys; this.checkIfMapLoaded = checkIfMapLoaded; } public void run() { final NodeEngine nodeEngine = mapService.getNodeEngine(); try { Map values = mapContainer.getStore().loadAll(keys.values()); if (values == null || values.isEmpty()) { if (checkIfMapLoaded.decrementAndGet() == 0) { loaded.set(true); } return; } MapEntrySet entrySet = new MapEntrySet(); for (Data dataKey : keys.keySet()) { Object key = keys.get(dataKey); Object value = values.get(key); if (value != null) { Data dataValue = mapService.toData(value); entrySet.add(dataKey, dataValue); } } PutAllOperation operation = new PutAllOperation(name, entrySet, true); operation.setNodeEngine(nodeEngine); operation.setResponseHandler(new ResponseHandler() { @Override public void sendResponse(Object obj) { if (checkIfMapLoaded.decrementAndGet() == 0) { loaded.set(true); } } public boolean isLocal() { return true; } }); operation.setPartitionId(partitionId); OperationAccessor.setCallerAddress(operation, nodeEngine.getThisAddress()); operation.setCallerUuid(nodeEngine.getLocalMember().getUuid()); operation.setServiceName(MapService.SERVICE_NAME); nodeEngine.getOperationService().executeOperation(operation); } catch (Exception e) { logger.warning("Exception while load all task:" + e.toString()); } } }
1no label
hazelcast_src_main_java_com_hazelcast_map_DefaultRecordStore.java
568
public final class ClusterDataSerializerHook implements DataSerializerHook { public static final int F_ID = Data.FACTORY_ID; public static final int DATA = Data.ID; public static final int ADDRESS = Address.ID; public static final int MEMBER = 2; public static final int HEARTBEAT = 3; public static final int CONFIG_CHECK = 4; // client public static final int MEMBERSHIP_EVENT = 8; @Override public int getFactoryId() { return F_ID; } @Override public DataSerializableFactory createFactory() { return new DataSerializableFactory() { @Override public IdentifiedDataSerializable create(int typeId) { switch (typeId) { case DATA: return new Data(); case ADDRESS: return new Address(); case MEMBER: return new MemberImpl(); case HEARTBEAT: return new HeartbeatOperation(); case CONFIG_CHECK: return new ConfigCheck(); case MEMBERSHIP_EVENT: return new ClientMembershipEvent(); default: return null; } } }; } }
0true
hazelcast_src_main_java_com_hazelcast_cluster_ClusterDataSerializerHook.java
2,107
public class TryPutOperation extends BasePutOperation { private boolean successful; public TryPutOperation(String name, Data dataKey, Data value, long timeout) { super(name, dataKey, value); setWaitTimeout(timeout); } public TryPutOperation() { } public void run() { successful = recordStore.tryPut(dataKey, dataValue, ttl); } public void afterRun() { if (successful) super.afterRun(); } public boolean shouldBackup() { return successful; } public void onWaitExpire() { getResponseHandler().sendResponse(false); } public Object getResponse() { return successful; } @Override public String toString() { return "TryPutOperation{" + name + "}"; } }
1no label
hazelcast_src_main_java_com_hazelcast_map_operation_TryPutOperation.java
1,572
public static class Single extends Decision { private final Type type; private final String explanation; private final Object[] explanationParams; /** * Creates a new {@link Single} decision of a given type * @param type {@link Type} of the decision */ public Single(Type type) { this(type, null, (Object[]) null); } /** * Creates a new {@link Single} decision of a given type * * @param type {@link Type} of the decision * @param explanation An explanation of this {@link Decision} * @param explanationParams A set of additional parameters */ public Single(Type type, String explanation, Object... explanationParams) { this.type = type; this.explanation = explanation; this.explanationParams = explanationParams; } @Override public Type type() { return this.type; } @Override public String toString() { if (explanation == null) { return type + "()"; } return type + "(" + String.format(Locale.ROOT, explanation, explanationParams) + ")"; } }
0true
src_main_java_org_elasticsearch_cluster_routing_allocation_decider_Decision.java
2,668
public class AliasedIndexDocumentActionsTests extends DocumentActionsTests { protected void createIndex() { logger.info("Creating index [test1] with alias [test]"); try { client().admin().indices().prepareDelete("test1").execute().actionGet(); } catch (Exception e) { // ignore } logger.info("--> creating index test"); client().admin().indices().create(createIndexRequest("test1").settings(settingsBuilder().putArray("index.aliases", "test"))).actionGet(); } @Override protected String getConcreteIndexName() { return "test1"; } }
0true
src_test_java_org_elasticsearch_document_AliasedIndexDocumentActionsTests.java
49
@Component("blStructuredContentTypeCustomPersistenceHandler") public class StructuredContentTypeCustomPersistenceHandler extends CustomPersistenceHandlerAdapter { private final Log LOG = LogFactory.getLog(StructuredContentTypeCustomPersistenceHandler.class); @Resource(name="blStructuredContentService") protected StructuredContentService structuredContentService; @Resource(name="blSandBoxService") protected SandBoxService sandBoxService; @Resource(name = "blDynamicFieldPersistenceHandlerHelper") protected DynamicFieldPersistenceHandlerHelper dynamicFieldUtil; @Override public Boolean canHandleFetch(PersistencePackage persistencePackage) { String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname(); return StructuredContentType.class.getName().equals(ceilingEntityFullyQualifiedClassname) && persistencePackage.getCustomCriteria() != null && persistencePackage.getCustomCriteria().length > 0 && persistencePackage.getCustomCriteria()[0].equals("constructForm"); } @Override public Boolean canHandleAdd(PersistencePackage persistencePackage) { return canHandleFetch(persistencePackage); } @Override public Boolean canHandleInspect(PersistencePackage persistencePackage) { return canHandleFetch(persistencePackage); } @Override public Boolean canHandleRemove(PersistencePackage persistencePackage) { return false; } @Override public Boolean canHandleUpdate(PersistencePackage persistencePackage) { return canHandleFetch(persistencePackage); } protected SandBox getSandBox() { return sandBoxService.retrieveSandboxById(SandBoxContext.getSandBoxContext().getSandBoxId()); } @Override public DynamicResultSet inspect(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, InspectHelper helper) throws ServiceException { String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname(); try { String structuredContentTypeId = persistencePackage.getCustomCriteria()[3]; StructuredContentType structuredContentType = structuredContentService.findStructuredContentTypeById(Long.valueOf(structuredContentTypeId)); ClassMetadata metadata = new ClassMetadata(); metadata.setCeilingType(StructuredContentType.class.getName()); ClassTree entities = new ClassTree(StructuredContentTypeImpl.class.getName()); metadata.setPolymorphicEntities(entities); Property[] properties = dynamicFieldUtil.buildDynamicPropertyList(structuredContentType.getStructuredContentFieldTemplate().getFieldGroups(), StructuredContentType.class); metadata.setProperties(properties); DynamicResultSet results = new DynamicResultSet(metadata); return results; } catch (Exception e) { throw new ServiceException("Unable to perform inspect for entity: "+ceilingEntityFullyQualifiedClassname, e); } } @Override public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException { String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname(); try { String structuredContentId = persistencePackage.getCustomCriteria()[1]; Entity entity = fetchEntityBasedOnId(structuredContentId); DynamicResultSet results = new DynamicResultSet(new Entity[]{entity}, 1); return results; } catch (Exception e) { throw new ServiceException("Unable to perform fetch for entity: "+ceilingEntityFullyQualifiedClassname, e); } } protected Entity fetchEntityBasedOnId(String structuredContentId) throws Exception { StructuredContent structuredContent = structuredContentService.findStructuredContentById(Long.valueOf(structuredContentId)); Map<String, StructuredContentField> structuredContentFieldMap = structuredContent.getStructuredContentFields(); Entity entity = new Entity(); entity.setType(new String[]{StructuredContentType.class.getName()}); List<Property> propertiesList = new ArrayList<Property>(); for (FieldGroup fieldGroup : structuredContent.getStructuredContentType().getStructuredContentFieldTemplate().getFieldGroups()) { for (FieldDefinition definition : fieldGroup.getFieldDefinitions()) { Property property = new Property(); propertiesList.add(property); property.setName(definition.getName()); String value = null; if (!MapUtils.isEmpty(structuredContentFieldMap)) { StructuredContentField structuredContentField = structuredContentFieldMap.get(definition.getName()); if (structuredContentField != null) { value = structuredContentField.getValue(); } } property.setValue(value); } } Property property = new Property(); propertiesList.add(property); property.setName("id"); property.setValue(structuredContentId); entity.setProperties(propertiesList.toArray(new Property[]{})); return entity; } /** * Invoked when {@link StructuredContent} is saved in order to fill out the dynamic form for the structured content type */ @Override public Entity update(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException { return addOrUpdate(persistencePackage, dynamicEntityDao, helper); } /** * Invoked when {@link StructuredContent} is saved in order to fill out the dynamic form for the structured content type */ @Override public Entity add(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException { return addOrUpdate(persistencePackage, dynamicEntityDao, helper); } protected Entity addOrUpdate(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException { String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname(); try { String structuredContentId = persistencePackage.getCustomCriteria()[1]; StructuredContent structuredContent = structuredContentService.findStructuredContentById(Long.valueOf(structuredContentId)); Property[] properties = dynamicFieldUtil.buildDynamicPropertyList(structuredContent.getStructuredContentType().getStructuredContentFieldTemplate().getFieldGroups(), StructuredContentType.class); Map<String, FieldMetadata> md = new HashMap<String, FieldMetadata>(); for (Property property : properties) { md.put(property.getName(), property.getMetadata()); } boolean validated = helper.validate(persistencePackage.getEntity(), null, md); if (!validated) { throw new ValidationException(persistencePackage.getEntity(), "Structured Content dynamic fields failed validation"); } List<String> templateFieldNames = new ArrayList<String>(20); for (FieldGroup group : structuredContent.getStructuredContentType().getStructuredContentFieldTemplate().getFieldGroups()) { for (FieldDefinition definition: group.getFieldDefinitions()) { templateFieldNames.add(definition.getName()); } } Map<String, StructuredContentField> structuredContentFieldMap = structuredContent.getStructuredContentFields(); for (Property property : persistencePackage.getEntity().getProperties()) { if (templateFieldNames.contains(property.getName())) { StructuredContentField structuredContentField = structuredContentFieldMap.get(property.getName()); if (structuredContentField != null) { structuredContentField.setValue(property.getValue()); } else { structuredContentField = new StructuredContentFieldImpl(); structuredContentFieldMap.put(property.getName(), structuredContentField); structuredContentField.setFieldKey(property.getName()); structuredContentField.setStructuredContent(structuredContent); structuredContentField.setValue(property.getValue()); } } } List<String> removeItems = new ArrayList<String>(); for (String key : structuredContentFieldMap.keySet()) { if (persistencePackage.getEntity().findProperty(key)==null) { removeItems.add(key); } } if (removeItems.size() > 0) { for (String removeKey : removeItems) { StructuredContentField structuredContentField = structuredContentFieldMap.remove(removeKey); structuredContentField.setStructuredContent(null); } } structuredContentService.updateStructuredContent(structuredContent, getSandBox()); return fetchEntityBasedOnId(structuredContentId); } catch (ValidationException e) { throw e; } catch (Exception e) { throw new ServiceException("Unable to perform fetch for entity: "+ceilingEntityFullyQualifiedClassname, e); } } }
1no label
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_StructuredContentTypeCustomPersistenceHandler.java
428
map.addChangeListener(new OMultiValueChangeListener<Object, String>() { public void onAfterRecordChanged(final OMultiValueChangeEvent<Object, String> event) { Assert.assertEquals(event.getChangeType(), OMultiValueChangeEvent.OChangeType.ADD); Assert.assertNull(event.getOldValue()); Assert.assertEquals(event.getKey(), "key1"); Assert.assertEquals(event.getValue(), "value1"); changed.value = true; } });
0true
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedMapTest.java
1,800
@Target(ANNOTATION_TYPE) @Retention(RUNTIME) public @interface BindingAnnotation { }
0true
src_main_java_org_elasticsearch_common_inject_BindingAnnotation.java
1,574
public class AdornedTargetCollectionMetadata extends CollectionMetadata { private boolean ignoreAdornedProperties; private String parentObjectClass; private String[] maintainedAdornedTargetFields = {}; private String[] gridVisibleFields = {}; public boolean isIgnoreAdornedProperties() { return ignoreAdornedProperties; } public void setIgnoreAdornedProperties(boolean ignoreAdornedProperties) { this.ignoreAdornedProperties = ignoreAdornedProperties; } public String getParentObjectClass() { return parentObjectClass; } public void setParentObjectClass(String parentObjectClass) { this.parentObjectClass = parentObjectClass; } public String[] getGridVisibleFields() { return gridVisibleFields; } public void setGridVisibleFields(String[] gridVisibleFields) { this.gridVisibleFields = gridVisibleFields; } public String[] getMaintainedAdornedTargetFields() { return maintainedAdornedTargetFields; } public void setMaintainedAdornedTargetFields(String[] maintainedAdornedTargetFields) { this.maintainedAdornedTargetFields = maintainedAdornedTargetFields; } @Override public void accept(MetadataVisitor visitor) { visitor.visit(this); } @Override protected FieldMetadata populate(FieldMetadata metadata) { ((AdornedTargetCollectionMetadata) metadata).ignoreAdornedProperties = ignoreAdornedProperties; ((AdornedTargetCollectionMetadata) metadata).parentObjectClass = parentObjectClass; ((AdornedTargetCollectionMetadata) metadata).maintainedAdornedTargetFields = maintainedAdornedTargetFields; ((AdornedTargetCollectionMetadata) metadata).gridVisibleFields = gridVisibleFields; return super.populate(metadata); } @Override public FieldMetadata cloneFieldMetadata() { AdornedTargetCollectionMetadata metadata = new AdornedTargetCollectionMetadata(); return populate(metadata); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof AdornedTargetCollectionMetadata)) return false; if (!super.equals(o)) return false; AdornedTargetCollectionMetadata metadata = (AdornedTargetCollectionMetadata) o; if (ignoreAdornedProperties != metadata.ignoreAdornedProperties) return false; if (!Arrays.equals(gridVisibleFields, metadata.gridVisibleFields)) return false; if (!Arrays.equals(maintainedAdornedTargetFields, metadata.maintainedAdornedTargetFields)) return false; if (parentObjectClass != null ? !parentObjectClass.equals(metadata.parentObjectClass) : metadata.parentObjectClass != null) return false; return true; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + (ignoreAdornedProperties ? 1 : 0); result = 31 * result + (parentObjectClass != null ? parentObjectClass.hashCode() : 0); result = 31 * result + (maintainedAdornedTargetFields != null ? Arrays.hashCode(maintainedAdornedTargetFields) : 0); result = 31 * result + (gridVisibleFields != null ? Arrays.hashCode(gridVisibleFields) : 0); return result; } }
1no label
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_AdornedTargetCollectionMetadata.java
1,502
public class OObjectMethodFilter implements MethodFilter { public boolean isHandled(final Method m) { final String methodName = m.getName(); final String fieldName = getFieldName(m); if (fieldName == null) return false; try { if (!OObjectEntitySerializer.isClassField(m.getDeclaringClass(), fieldName)) return false; return (isSetterMethod(methodName, m) || isGetterMethod(methodName, m)); } catch (NoSuchFieldException nsfe) { OLogManager.instance().warn(this, "Error handling the method %s in class %s", nsfe, m.getName(), m.getDeclaringClass().getName()); return false; } catch (SecurityException se) { OLogManager.instance().warn(this, "", se, m.getName(), m.getDeclaringClass().getName()); return false; } } public String getFieldName(final Method m) { final String methodName = m.getName(); final Class<?> clz = m.getDeclaringClass(); if (methodName.startsWith("get")) return getFieldName(methodName, "get"); else if (methodName.startsWith("set")) return getFieldName(methodName, "set"); else if (methodName.startsWith("is")) return getFieldName(methodName, "is"); else if (isScalaClass(clz)) { return getScalaFieldName(clz, methodName); } // NO FIELD return null; } protected String getFieldName(final String methodName, final String prefix) { final StringBuffer fieldName = new StringBuffer(); fieldName.append(Character.toLowerCase(methodName.charAt(prefix.length()))); fieldName.append(methodName.substring(prefix.length() + 1)); return fieldName.toString(); } public boolean isSetterMethod(final String methodName, final Method m) throws SecurityException, NoSuchFieldException { Class<?> clz = m.getDeclaringClass(); if (!methodName.startsWith("set") || !checkIfFirstCharAfterPrefixIsUpperCase(methodName, "set") || (isScalaClass(clz) && !methodName.endsWith("_$eq"))) return false; if (m.getParameterTypes() != null && m.getParameterTypes().length != 1) return false; if (OObjectEntitySerializer.isTransientField(m.getDeclaringClass(), getFieldName(m))) return false; Class<?>[] parameters = m.getParameterTypes(); Field f = OObjectEntitySerializer.getField(getFieldName(m), m.getDeclaringClass()); if (!f.getType().isAssignableFrom(parameters[0])) { OLogManager.instance().warn( this, "Setter method " + m.toString() + " for field " + f.getName() + " in class " + m.getDeclaringClass().toString() + " cannot be bound to proxied instance: parameter class don't match with field type " + f.getType().toString()); return false; } return true; } public boolean isGetterMethod(String fieldName, Method m) throws SecurityException, NoSuchFieldException { int prefixLength; Class<?> clz = m.getDeclaringClass(); if (fieldName.startsWith("get") && checkIfFirstCharAfterPrefixIsUpperCase(fieldName, "get")) prefixLength = "get".length(); else if (fieldName.startsWith("is") && checkIfFirstCharAfterPrefixIsUpperCase(fieldName, "is")) prefixLength = "is".length(); else if (isScalaClass(clz) && fieldName.equals(getFieldName(m))) prefixLength = 0; else return false; if (m.getParameterTypes() != null && m.getParameterTypes().length > 0) return false; if (fieldName.length() <= prefixLength) return false; return !OObjectEntitySerializer.isTransientField(m.getDeclaringClass(), getFieldName(m)); } private boolean checkIfFirstCharAfterPrefixIsUpperCase(String methodName, String prefix) { return methodName.length() > prefix.length() ? Character.isUpperCase(methodName.charAt(prefix.length())) : false; } protected boolean isScalaClass(Class<?> clz) { Annotation[] annotations = clz.getDeclaredAnnotations(); for (Annotation a : annotations) { if ("scala.reflect.ScalaSignature".contains(a.annotationType().getName()) || "scala.reflect.ScalaLongSignature".contains(a.getClass().getName())) { return true; } } return false; } protected String getScalaFieldName(Class<?> clz, String name) { Field[] fields = clz.getDeclaredFields(); for (Field field : fields) { if (name.equals(field.getName() + "_$eq")) { return field.getName(); } else if (name.equals(field.getName())) { return field.getName(); } } return null; } }
0true
object_src_main_java_com_orientechnologies_orient_object_enhancement_OObjectMethodFilter.java
673
constructors[COLLECTION_PREPARE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() { public IdentifiedDataSerializable createNew(Integer arg) { return new CollectionPrepareOperation(); } };
0true
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
2,060
public final class TypeListenerBinding implements Element { private final Object source; private final Matcher<? super TypeLiteral<?>> typeMatcher; private final TypeListener listener; TypeListenerBinding(Object source, TypeListener listener, Matcher<? super TypeLiteral<?>> typeMatcher) { this.source = source; this.listener = listener; this.typeMatcher = typeMatcher; } /** * Returns the registered listener. */ public TypeListener getListener() { return listener; } /** * Returns the type matcher which chooses which types the listener should be notified of. */ public Matcher<? super TypeLiteral<?>> getTypeMatcher() { return typeMatcher; } public Object getSource() { return source; } public <T> T acceptVisitor(ElementVisitor<T> visitor) { return visitor.visit(this); } public void applyTo(Binder binder) { binder.withSource(getSource()).bindListener(typeMatcher, listener); } }
0true
src_main_java_org_elasticsearch_common_inject_spi_TypeListenerBinding.java
1,209
public interface PostProcessingMapStore { }
0true
hazelcast_src_main_java_com_hazelcast_core_PostProcessingMapStore.java
2,828
public interface CharFilterFactory { String name(); Reader create(Reader tokenStream); }
0true
src_main_java_org_elasticsearch_index_analysis_CharFilterFactory.java
6,422
targetTransport.threadPool().generic().execute(new Runnable() { @Override public void run() { targetTransport.messageReceived(data, action, sourceTransport, version, null); } });
1no label
src_main_java_org_elasticsearch_transport_local_LocalTransportChannel.java
1,692
public class MemoryCircuitBreaker { private final long memoryBytesLimit; private final double overheadConstant; private final AtomicLong used; private final ESLogger logger; /** * Create a circuit breaker that will break if the number of estimated * bytes grows above the limit. All estimations will be multiplied by * the given overheadConstant. This breaker starts with 0 bytes used. * @param limit circuit breaker limit * @param overheadConstant constant multiplier for byte estimations */ public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, ESLogger logger) { this.memoryBytesLimit = limit.bytes(); this.overheadConstant = overheadConstant; this.used = new AtomicLong(0); this.logger = logger; if (logger.isTraceEnabled()) { logger.trace("Creating MemoryCircuitBreaker with a limit of {} bytes ({}) and a overhead constant of {}", this.memoryBytesLimit, limit, this.overheadConstant); } } /** * Create a circuit breaker that will break if the number of estimated * bytes grows above the limit. All estimations will be multiplied by * the given overheadConstant. Uses the given oldBreaker to initialize * the starting offset. * @param limit circuit breaker limit * @param overheadConstant constant multiplier for byte estimations * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset) */ public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) { this.memoryBytesLimit = limit.bytes(); this.overheadConstant = overheadConstant; if (oldBreaker == null) { this.used = new AtomicLong(0); } else { this.used = oldBreaker.used; } this.logger = logger; if (logger.isTraceEnabled()) { logger.trace("Creating MemoryCircuitBreaker with a limit of {} bytes ({}) and a overhead constant of {}", this.memoryBytesLimit, limit, this.overheadConstant); } } /** * Method used to trip the breaker * @throws CircuitBreakingException */ public void circuitBreak() throws CircuitBreakingException { throw new CircuitBreakingException("Data too large, data would be larger than limit of [" + memoryBytesLimit + "] bytes"); } /** * Add a number of bytes, tripping the circuit breaker if the aggregated * estimates are above the limit. Automatically trips the breaker if the * memory limit is set to 0. Will never trip the breaker if the limit is * set < 0, but can still be used to aggregate estimations. * @param bytes number of bytes to add to the breaker * @return number of "used" bytes so far * @throws CircuitBreakingException */ public double addEstimateBytesAndMaybeBreak(long bytes) throws CircuitBreakingException { // short-circuit on no data allowed, immediately throwing an exception if (memoryBytesLimit == 0) { circuitBreak(); } long newUsed; // If there is no limit (-1), we can optimize a bit by using // .addAndGet() instead of looping (because we don't have to check a // limit), which makes the RamAccountingTermsEnum case faster. if (this.memoryBytesLimit == -1) { newUsed = this.used.addAndGet(bytes); if (logger.isTraceEnabled()) { logger.trace("Adding [{}] to used bytes [new used: [{}], limit: [-1b]]", new ByteSizeValue(bytes), new ByteSizeValue(newUsed)); } return newUsed; } // Otherwise, check the addition and commit the addition, looping if // there are conflicts. May result in additional logging, but it's // trace logging and shouldn't be counted on for additions. long currentUsed; do { currentUsed = this.used.get(); newUsed = currentUsed + bytes; long newUsedWithOverhead = (long)(newUsed * overheadConstant); if (logger.isTraceEnabled()) { logger.trace("Adding [{}] to used bytes [new used: [{}], limit: {} [{}], estimate: {} [{}]]", new ByteSizeValue(bytes), new ByteSizeValue(newUsed), memoryBytesLimit, new ByteSizeValue(memoryBytesLimit), newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead)); } if (memoryBytesLimit > 0 && newUsedWithOverhead > memoryBytesLimit) { logger.error("New used memory {} [{}] would be larger than configured breaker: {} [{}], breaking", newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead), memoryBytesLimit, new ByteSizeValue(memoryBytesLimit)); circuitBreak(); } // Attempt to set the new used value, but make sure it hasn't changed // underneath us, if it has, keep trying until we are able to set it } while (!this.used.compareAndSet(currentUsed, newUsed)); return newUsed; } /** * Add an <b>exact</b> number of bytes, not checking for tripping the * circuit breaker. This bypasses the overheadConstant multiplication. * @param bytes number of bytes to add to the breaker * @return number of "used" bytes so far */ public long addWithoutBreaking(long bytes) { long u = used.addAndGet(bytes); if (logger.isTraceEnabled()) { logger.trace("Adjusted breaker by [{}] bytes, now [{}]", bytes, u); } assert u >= 0 : "Used bytes: [" + u + "] must be >= 0"; return u; } /** * @return the number of aggregated "used" bytes so far */ public long getUsed() { return this.used.get(); } /** * @return the maximum number of bytes before the circuit breaker will trip */ public long getMaximum() { return this.memoryBytesLimit; } /** * @return the constant multiplier the breaker uses for aggregations */ public double getOverhead() { return this.overheadConstant; } }
0true
src_main_java_org_elasticsearch_common_breaker_MemoryCircuitBreaker.java
882
private class AsyncAction { private final SearchScrollRequest request; private final ActionListener<SearchResponse> listener; private final ParsedScrollId scrollId; private final DiscoveryNodes nodes; private volatile AtomicArray<ShardSearchFailure> shardFailures; private final AtomicArray<QueryFetchSearchResult> queryFetchResults; private final AtomicInteger successfulOps; private final AtomicInteger counter; private final long startTime = System.currentTimeMillis(); private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) { this.request = request; this.listener = listener; this.scrollId = scrollId; this.nodes = clusterService.state().nodes(); this.successfulOps = new AtomicInteger(scrollId.getContext().length); this.counter = new AtomicInteger(scrollId.getContext().length); this.queryFetchResults = new AtomicArray<QueryFetchSearchResult>(scrollId.getContext().length); } protected final ShardSearchFailure[] buildShardFailures() { if (shardFailures == null) { return ShardSearchFailure.EMPTY_ARRAY; } List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList(); ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()]; for (int i = 0; i < failures.length; i++) { failures[i] = entries.get(i).value; } return failures; } // we do our best to return the shard failures, but its ok if its not fully concurrently safe // we simply try and return as much as possible protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) { if (shardFailures == null) { shardFailures = new AtomicArray<ShardSearchFailure>(scrollId.getContext().length); } shardFailures.set(shardIndex, failure); } public void start() { if (scrollId.getContext().length == 0) { listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", null)); return; } int localOperations = 0; Tuple<String, Long>[] context = scrollId.getContext(); for (int i = 0; i < context.length; i++) { Tuple<String, Long> target = context[i]; DiscoveryNode node = nodes.get(target.v1()); if (node != null) { if (nodes.localNodeId().equals(node.id())) { localOperations++; } else { executePhase(i, node, target.v2()); } } else { if (logger.isDebugEnabled()) { logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]"); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } } } if (localOperations > 0) { if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { Tuple<String, Long>[] context1 = scrollId.getContext(); for (int i = 0; i < context1.length; i++) { Tuple<String, Long> target = context1[i]; DiscoveryNode node = nodes.get(target.v1()); if (node != null && nodes.localNodeId().equals(node.id())) { executePhase(i, node, target.v2()); } } } }); } else { boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; Tuple<String, Long>[] context1 = scrollId.getContext(); for (int i = 0; i < context1.length; i++) { final Tuple<String, Long> target = context1[i]; final int shardIndex = i; final DiscoveryNode node = nodes.get(target.v1()); if (node != null && nodes.localNodeId().equals(node.id())) { try { if (localAsync) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { executePhase(shardIndex, node, target.v2()); } }); } else { executePhase(shardIndex, node, target.v2()); } } catch (Throwable t) { onPhaseFailure(t, target.v2(), shardIndex); } } } } } for (Tuple<String, Long> target : scrollId.getContext()) { DiscoveryNode node = nodes.get(target.v1()); if (node == null) { if (logger.isDebugEnabled()) { logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]"); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } } else { } } } void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) { searchService.sendExecuteFetch(node, internalScrollSearchRequest(searchId, request), new SearchServiceListener<QueryFetchSearchResult>() { @Override public void onResult(QueryFetchSearchResult result) { queryFetchResults.set(shardIndex, result); if (counter.decrementAndGet() == 0) { finishHim(); } } @Override public void onFailure(Throwable t) { onPhaseFailure(t, searchId, shardIndex); } }); } private void onPhaseFailure(Throwable t, long searchId, int shardIndex) { if (logger.isDebugEnabled()) { logger.debug("[{}] Failed to execute query phase", t, searchId); } addShardFailure(shardIndex, new ShardSearchFailure(t)); successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } } private void finishHim() { try { innerFinishHim(); } catch (Throwable e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures())); } } private void innerFinishHim() { ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(queryFetchResults); final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); } listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(), System.currentTimeMillis() - startTime, buildShardFailures())); } }
1no label
src_main_java_org_elasticsearch_action_search_type_TransportSearchScrollQueryAndFetchAction.java
303
public class NeoStoreTransaction extends XaTransaction { private final RecordChanges<Long, NodeRecord, Void> nodeRecords = new RecordChanges<>( new RecordChanges.Loader<Long, NodeRecord, Void>() { @Override public NodeRecord newUnused( Long key, Void additionalData ) { return new NodeRecord( key, Record.NO_NEXT_RELATIONSHIP.intValue(), Record.NO_NEXT_PROPERTY.intValue() ); } @Override public NodeRecord load( Long key, Void additionalData ) { return getNodeStore().getRecord( key ); } @Override public void ensureHeavy( NodeRecord record ) { getNodeStore().ensureHeavy( record ); } @Override public NodeRecord clone(NodeRecord nodeRecord) { return nodeRecord.clone(); } }, true ); private final RecordChanges<Long, PropertyRecord, PrimitiveRecord> propertyRecords = new RecordChanges<>( new RecordChanges.Loader<Long, PropertyRecord, PrimitiveRecord>() { @Override public PropertyRecord newUnused( Long key, PrimitiveRecord additionalData ) { PropertyRecord record = new PropertyRecord( key ); setOwner( record, additionalData ); return record; } private void setOwner( PropertyRecord record, PrimitiveRecord owner ) { if ( owner != null ) { owner.setIdTo( record ); } } @Override public PropertyRecord load( Long key, PrimitiveRecord additionalData ) { PropertyRecord record = getPropertyStore().getRecord( key.longValue() ); setOwner( record, additionalData ); return record; } @Override public void ensureHeavy( PropertyRecord record ) { for ( PropertyBlock block : record.getPropertyBlocks() ) { getPropertyStore().ensureHeavy( block ); } } @Override public PropertyRecord clone(PropertyRecord propertyRecord) { return propertyRecord.clone(); } }, true ); private final RecordChanges<Long, RelationshipRecord, Void> relRecords = new RecordChanges<>( new RecordChanges.Loader<Long, RelationshipRecord, Void>() { @Override public RelationshipRecord newUnused( Long key, Void additionalData ) { return new RelationshipRecord( key ); } @Override public RelationshipRecord load( Long key, Void additionalData ) { return getRelationshipStore().getRecord( key ); } @Override public void ensureHeavy( RelationshipRecord record ) { } @Override public RelationshipRecord clone(RelationshipRecord relationshipRecord) { // Not needed because we don't manage before state for relationship records. throw new UnsupportedOperationException("Unexpected call to clone on a relationshipRecord"); } }, false ); private final RecordChanges<Long, Collection<DynamicRecord>, SchemaRule> schemaRuleChanges = new RecordChanges<>(new RecordChanges.Loader<Long, Collection<DynamicRecord>, SchemaRule>() { @Override public Collection<DynamicRecord> newUnused(Long key, SchemaRule additionalData) { return getSchemaStore().allocateFrom(additionalData); } @Override public Collection<DynamicRecord> load(Long key, SchemaRule additionalData) { return getSchemaStore().getRecords( key ); } @Override public void ensureHeavy(Collection<DynamicRecord> dynamicRecords) { SchemaStore schemaStore = getSchemaStore(); for ( DynamicRecord record : dynamicRecords) { schemaStore.ensureHeavy(record); } } @Override public Collection<DynamicRecord> clone(Collection<DynamicRecord> dynamicRecords) { Collection<DynamicRecord> list = new ArrayList<>( dynamicRecords.size() ); for ( DynamicRecord record : dynamicRecords) { list.add( record.clone() ); } return list; } }, true); private Map<Integer, RelationshipTypeTokenRecord> relationshipTypeTokenRecords; private Map<Integer, LabelTokenRecord> labelTokenRecords; private Map<Integer, PropertyKeyTokenRecord> propertyKeyTokenRecords; private RecordChanges<Long, NeoStoreRecord, Void> neoStoreRecord; private final Map<Long, Command.NodeCommand> nodeCommands = new TreeMap<>(); private final ArrayList<Command.PropertyCommand> propCommands = new ArrayList<>(); private final ArrayList<Command.RelationshipCommand> relCommands = new ArrayList<>(); private final ArrayList<Command.SchemaRuleCommand> schemaRuleCommands = new ArrayList<>(); private ArrayList<Command.RelationshipTypeTokenCommand> relationshipTypeTokenCommands; private ArrayList<Command.LabelTokenCommand> labelTokenCommands; private ArrayList<Command.PropertyKeyTokenCommand> propertyKeyTokenCommands; private Command.NeoStoreCommand neoStoreCommand; private boolean committed = false; private boolean prepared = false; private final long lastCommittedTxWhenTransactionStarted; private final TransactionState state; private final CacheAccessBackDoor cacheAccess; private final IndexingService indexes; private final NeoStore neoStore; private final LabelScanStore labelScanStore; private final IntegrityValidator integrityValidator; private final KernelTransactionImplementation kernelTransaction; private final LockService locks; /** * @param lastCommittedTxWhenTransactionStarted is the highest committed transaction id when this transaction * begun. No operations in this transaction are allowed to have * taken place before that transaction id. This is used by * constraint validation - if a constraint was not online when this * transaction begun, it will be verified during prepare. If you are * writing code against this API and are unsure about what to set * this value to, 0 is a safe choice. That will ensure all * constraints are checked. * @param kernelTransaction is the vanilla sauce to the WriteTransaction apple pie. */ NeoStoreTransaction( long lastCommittedTxWhenTransactionStarted, XaLogicalLog log, TransactionState state, NeoStore neoStore, CacheAccessBackDoor cacheAccess, IndexingService indexingService, LabelScanStore labelScanStore, IntegrityValidator integrityValidator, KernelTransactionImplementation kernelTransaction, LockService locks ) { super( log, state ); this.lastCommittedTxWhenTransactionStarted = lastCommittedTxWhenTransactionStarted; this.neoStore = neoStore; this.state = state; this.cacheAccess = cacheAccess; this.indexes = indexingService; this.labelScanStore = labelScanStore; this.integrityValidator = integrityValidator; this.kernelTransaction = kernelTransaction; this.locks = locks; } /** * This is a smell, a result of the kernel refactorings. Right now, both NeoStoreTransaction and KernelTransaction * are "publicly" consumable, and one owns the other. In the future, they should be merged such that * KernelTransaction rules supreme, and has internal components to manage the responsibilities currently handled by * WriteTransaction and ReadTransaction. */ public KernelTransactionImplementation kernelTransaction() { return kernelTransaction; } @Override public boolean isReadOnly() { if ( isRecovered() ) { return nodeCommands.size() == 0 && propCommands.size() == 0 && relCommands.size() == 0 && schemaRuleCommands.size() == 0 && relationshipTypeTokenCommands == null && labelTokenCommands == null && propertyKeyTokenCommands == null && kernelTransaction.isReadOnly(); } return nodeRecords.changeSize() == 0 && relRecords.changeSize() == 0 && schemaRuleChanges.changeSize() == 0 && propertyRecords.changeSize() == 0 && relationshipTypeTokenRecords == null && labelTokenRecords == null && propertyKeyTokenRecords == null && kernelTransaction.isReadOnly(); } // Make this accessible in this package @Override protected void setRecovered() { super.setRecovered(); } @Override public void doAddCommand( XaCommand command ) { // override } @Override protected void doPrepare() throws XAException { if ( committed ) { throw new XAException( "Cannot prepare committed transaction[" + getIdentifier() + "]" ); } if ( prepared ) { throw new XAException( "Cannot prepare prepared transaction[" + getIdentifier() + "]" ); } kernelTransaction.prepare(); prepared = true; int noOfCommands = nodeRecords.changeSize() + relRecords.changeSize() + propertyRecords.changeSize() + schemaRuleChanges.changeSize() + (propertyKeyTokenRecords != null ? propertyKeyTokenRecords.size() : 0) + (relationshipTypeTokenRecords != null ? relationshipTypeTokenRecords.size() : 0) + (labelTokenRecords != null ? labelTokenRecords.size() : 0); List<Command> commands = new ArrayList<>( noOfCommands ); if ( relationshipTypeTokenRecords != null ) { relationshipTypeTokenCommands = new ArrayList<>(); for ( RelationshipTypeTokenRecord record : relationshipTypeTokenRecords.values() ) { Command.RelationshipTypeTokenCommand command = new Command.RelationshipTypeTokenCommand( neoStore.getRelationshipTypeStore(), record ); relationshipTypeTokenCommands.add( command ); commands.add( command ); } } if ( labelTokenRecords != null ) { labelTokenCommands = new ArrayList<>(); for ( LabelTokenRecord record : labelTokenRecords.values() ) { Command.LabelTokenCommand command = new Command.LabelTokenCommand( neoStore.getLabelTokenStore(), record ); labelTokenCommands.add( command ); commands.add( command ); } } for ( RecordChange<Long, NodeRecord, Void> change : nodeRecords.changes() ) { NodeRecord record = change.forReadingLinkage(); integrityValidator.validateNodeRecord( record ); Command.NodeCommand command = new Command.NodeCommand( neoStore.getNodeStore(), change.getBefore(), record ); nodeCommands.put( record.getId(), command ); commands.add( command ); } for ( RecordChange<Long, RelationshipRecord, Void> record : relRecords.changes() ) { Command.RelationshipCommand command = new Command.RelationshipCommand( neoStore.getRelationshipStore(), record.forReadingLinkage() ); relCommands.add( command ); commands.add( command ); } if ( neoStoreRecord != null ) { for ( RecordChange<Long, NeoStoreRecord, Void> change : neoStoreRecord.changes() ) { neoStoreCommand = new Command.NeoStoreCommand( neoStore, change.forReadingData() ); addCommand( neoStoreCommand ); } } if ( propertyKeyTokenRecords != null ) { propertyKeyTokenCommands = new ArrayList<>(); for ( PropertyKeyTokenRecord record : propertyKeyTokenRecords.values() ) { Command.PropertyKeyTokenCommand command = new Command.PropertyKeyTokenCommand( neoStore.getPropertyStore().getPropertyKeyTokenStore(), record ); propertyKeyTokenCommands.add( command ); commands.add( command ); } } for ( RecordChange<Long, PropertyRecord, PrimitiveRecord> change : propertyRecords.changes() ) { Command.PropertyCommand command = new Command.PropertyCommand( neoStore.getPropertyStore(), change.getBefore(), change.forReadingLinkage() ); propCommands.add( command ); commands.add( command ); } for ( RecordChange<Long, Collection<DynamicRecord>, SchemaRule> change : schemaRuleChanges.changes() ) { integrityValidator.validateSchemaRule( change.getAdditionalData() ); Command.SchemaRuleCommand command = new Command.SchemaRuleCommand( neoStore, neoStore.getSchemaStore(), indexes, change.getBefore(), change.forChangingData(), change.getAdditionalData(), -1 ); schemaRuleCommands.add( command ); commands.add( command ); } assert commands.size() == noOfCommands : "Expected " + noOfCommands + " final commands, got " + commands.size() + " instead"; intercept( commands ); for ( Command command : commands ) { addCommand( command ); } integrityValidator.validateTransactionStartKnowledge( lastCommittedTxWhenTransactionStarted ); } protected void intercept( List<Command> commands ) { // default no op } @Override protected void injectCommand( XaCommand xaCommand ) { if ( xaCommand instanceof Command.NodeCommand ) { NodeCommand nodeCommand = (Command.NodeCommand) xaCommand; nodeCommands.put( nodeCommand.getKey(), nodeCommand ); } else if ( xaCommand instanceof Command.RelationshipCommand ) { relCommands.add( (Command.RelationshipCommand) xaCommand ); } else if ( xaCommand instanceof Command.PropertyCommand ) { propCommands.add( (Command.PropertyCommand) xaCommand ); } else if ( xaCommand instanceof Command.PropertyKeyTokenCommand ) { if ( propertyKeyTokenCommands == null ) { propertyKeyTokenCommands = new ArrayList<>(); } propertyKeyTokenCommands.add( (Command.PropertyKeyTokenCommand) xaCommand ); } else if ( xaCommand instanceof Command.RelationshipTypeTokenCommand ) { if ( relationshipTypeTokenCommands == null ) { relationshipTypeTokenCommands = new ArrayList<>(); } relationshipTypeTokenCommands.add( (Command.RelationshipTypeTokenCommand) xaCommand ); } else if ( xaCommand instanceof Command.LabelTokenCommand ) { if ( labelTokenCommands == null ) { labelTokenCommands = new ArrayList<>(); } labelTokenCommands.add( (Command.LabelTokenCommand) xaCommand ); } else if ( xaCommand instanceof Command.NeoStoreCommand ) { assert neoStoreCommand == null; neoStoreCommand = (Command.NeoStoreCommand) xaCommand; } else if ( xaCommand instanceof Command.SchemaRuleCommand ) { schemaRuleCommands.add( (Command.SchemaRuleCommand) xaCommand ); } else { throw new IllegalArgumentException( "Unknown command " + xaCommand ); } } @Override public void doRollback() throws XAException { if ( committed ) { throw new XAException( "Cannot rollback partialy commited " + "transaction[" + getIdentifier() + "]. Recover and " + "commit" ); } try { boolean freeIds = neoStore.freeIdsDuringRollback(); if ( relationshipTypeTokenRecords != null ) { for ( RelationshipTypeTokenRecord record : relationshipTypeTokenRecords.values() ) { if ( record.isCreated() ) { if ( freeIds ) { getRelationshipTypeStore().freeId( record.getId() ); } for ( DynamicRecord dynamicRecord : record.getNameRecords() ) { if ( dynamicRecord.isCreated() ) { getRelationshipTypeStore().freeId( (int) dynamicRecord.getId() ); } } } removeRelationshipTypeFromCache( record.getId() ); } } for ( RecordChange<Long, NodeRecord, Void> change : nodeRecords.changes() ) { NodeRecord record = change.forReadingLinkage(); if ( freeIds && record.isCreated() ) { getNodeStore().freeId( record.getId() ); } removeNodeFromCache( record.getId() ); } for ( RecordChange<Long, RelationshipRecord, Void> change : relRecords.changes() ) { long id = change.getKey(); RelationshipRecord record = change.forReadingLinkage(); if ( freeIds && change.isCreated() ) { getRelationshipStore().freeId( id ); } removeRelationshipFromCache( id ); patchDeletedRelationshipNodes( id, record.getFirstNode(), record.getFirstNextRel(), record.getSecondNode(), record.getSecondNextRel() ); } if ( neoStoreRecord != null ) { removeGraphPropertiesFromCache(); } if ( propertyKeyTokenRecords != null ) { for ( PropertyKeyTokenRecord record : propertyKeyTokenRecords.values() ) { if ( record.isCreated() ) { if ( freeIds ) { getPropertyStore().getPropertyKeyTokenStore().freeId( record.getId() ); } for ( DynamicRecord dynamicRecord : record.getNameRecords() ) { if ( dynamicRecord.isCreated() ) { getPropertyStore().getPropertyKeyTokenStore().freeId( (int) dynamicRecord.getId() ); } } } } } for ( RecordChange<Long, PropertyRecord, PrimitiveRecord> change : propertyRecords.changes() ) { PropertyRecord record = change.forReadingLinkage(); if ( record.getNodeId() != -1 ) { removeNodeFromCache( record.getNodeId() ); } else if ( record.getRelId() != -1 ) { removeRelationshipFromCache( record.getRelId() ); } if ( record.isCreated() ) { if ( freeIds ) { getPropertyStore().freeId( record.getId() ); } for ( PropertyBlock block : record.getPropertyBlocks() ) { for ( DynamicRecord dynamicRecord : block.getValueRecords() ) { if ( dynamicRecord.isCreated() ) { if ( dynamicRecord.getType() == PropertyType.STRING.intValue() ) { getPropertyStore().freeStringBlockId( dynamicRecord.getId() ); } else if ( dynamicRecord.getType() == PropertyType.ARRAY.intValue() ) { getPropertyStore().freeArrayBlockId( dynamicRecord.getId() ); } else { throw new InvalidRecordException( "Unknown type on " + dynamicRecord ); } } } } } } for ( RecordChange<Long, Collection<DynamicRecord>, SchemaRule> records : schemaRuleChanges.changes() ) { long id = -1; for ( DynamicRecord record : records.forChangingData() ) { if ( id == -1 ) { id = record.getId(); } if ( freeIds && record.isCreated() ) { getSchemaStore().freeId( record.getId() ); } } } } finally { clear(); } } private void removeRelationshipTypeFromCache( int id ) { cacheAccess.removeRelationshipTypeFromCache( id ); } private void patchDeletedRelationshipNodes( long id, long firstNodeId, long firstNodeNextRelId, long secondNodeId, long secondNextRelId ) { cacheAccess.patchDeletedRelationshipNodes( id, firstNodeId, firstNodeNextRelId, secondNodeId, secondNextRelId ); } private void removeRelationshipFromCache( long id ) { cacheAccess.removeRelationshipFromCache( id ); } private void removeNodeFromCache( long id ) { cacheAccess.removeNodeFromCache( id ); } private void removeGraphPropertiesFromCache() { cacheAccess.removeGraphPropertiesFromCache(); } private void addRelationshipType( int id ) { setRecovered(); Token type = isRecovered() ? neoStore.getRelationshipTypeStore().getToken( id, true ) : neoStore.getRelationshipTypeStore().getToken( id ); cacheAccess.addRelationshipTypeToken( type ); } private void addLabel( int id ) { Token labelId = isRecovered() ? neoStore.getLabelTokenStore().getToken( id, true ) : neoStore.getLabelTokenStore().getToken( id ); cacheAccess.addLabelToken( labelId ); } private void addPropertyKey( int id ) { Token index = isRecovered() ? neoStore.getPropertyStore().getPropertyKeyTokenStore().getToken( id, true ) : neoStore.getPropertyStore().getPropertyKeyTokenStore().getToken( id ); cacheAccess.addPropertyKeyToken( index ); } @Override public void doCommit() throws XAException { if ( !isRecovered() && !prepared ) { throw new XAException( "Cannot commit non prepared transaction[" + getIdentifier() + "]" ); } if ( isRecovered() ) { boolean wasInRecovery = neoStore.isInRecoveryMode(); neoStore.setRecoveredStatus( true ); try { applyCommit( true ); return; } finally { neoStore.setRecoveredStatus( wasInRecovery ); } } if ( getCommitTxId() != neoStore.getLastCommittedTx() + 1 ) { throw new RuntimeException( "Tx id: " + getCommitTxId() + " not next transaction (" + neoStore.getLastCommittedTx() + ")" ); } applyCommit( false ); } private void applyCommit( boolean isRecovered ) { try ( LockGroup lockGroup = new LockGroup() ) { committed = true; CommandSorter sorter = new CommandSorter(); // reltypes if ( relationshipTypeTokenCommands != null ) { java.util.Collections.sort( relationshipTypeTokenCommands, sorter ); for ( Command.RelationshipTypeTokenCommand command : relationshipTypeTokenCommands ) { command.execute(); if ( isRecovered ) { addRelationshipType( (int) command.getKey() ); } } } // label keys if ( labelTokenCommands != null ) { java.util.Collections.sort( labelTokenCommands, sorter ); for ( Command.LabelTokenCommand command : labelTokenCommands ) { command.execute(); if ( isRecovered ) { addLabel( (int) command.getKey() ); } } } // property keys if ( propertyKeyTokenCommands != null ) { java.util.Collections.sort( propertyKeyTokenCommands, sorter ); for ( Command.PropertyKeyTokenCommand command : propertyKeyTokenCommands ) { command.execute(); if ( isRecovered ) { addPropertyKey( (int) command.getKey() ); } } } // primitives java.util.Collections.sort( relCommands, sorter ); java.util.Collections.sort( propCommands, sorter ); executeCreated( lockGroup, isRecovered, propCommands, relCommands, nodeCommands.values() ); executeModified( lockGroup, isRecovered, propCommands, relCommands, nodeCommands.values() ); executeDeleted( lockGroup, propCommands, relCommands, nodeCommands.values() ); // property change set for index updates Collection<NodeLabelUpdate> labelUpdates = gatherLabelUpdatesSortedByNodeId(); if ( !labelUpdates.isEmpty() ) { updateLabelScanStore( labelUpdates ); cacheAccess.applyLabelUpdates( labelUpdates ); } if ( !nodeCommands.isEmpty() || !propCommands.isEmpty() ) { indexes.updateIndexes( new LazyIndexUpdates( getNodeStore(), getPropertyStore(), groupedNodePropertyCommands( propCommands ), new HashMap<>( nodeCommands ) ) ); } // schema rules. Execute these after generating the property updates so. If executed // before and we've got a transaction that sets properties/labels as well as creating an index // we might end up with this corner-case: // 1) index rule created and index population job started // 2) index population job processes some nodes, but doesn't complete // 3) we gather up property updates and send those to the indexes. The newly created population // job might get those as updates // 4) the population job will apply those updates as added properties, and might end up with duplicate // entries for the same property for ( SchemaRuleCommand command : schemaRuleCommands ) { command.setTxId( getCommitTxId() ); command.execute(); switch ( command.getMode() ) { case DELETE: cacheAccess.removeSchemaRuleFromCache( command.getKey() ); break; default: cacheAccess.addSchemaRule( command.getSchemaRule() ); } } if ( neoStoreCommand != null ) { neoStoreCommand.execute(); if ( isRecovered ) { removeGraphPropertiesFromCache(); } } if ( !isRecovered ) { updateFirstRelationships(); // Update of the cached primitives will happen when calling commitChangesToCache, // which should be done after applyCommit and after the XaResourceManager monitor // has been released. } neoStore.setLastCommittedTx( getCommitTxId() ); if ( isRecovered ) { neoStore.updateIdGenerators(); } } finally { // clear() will be called in commitChangesToCache outside of the XaResourceManager monitor } } private Collection<List<PropertyCommand>> groupedNodePropertyCommands( Iterable<PropertyCommand> propCommands ) { // A bit too expensive data structure, but don't know off the top of my head how to make it better. Map<Long, List<PropertyCommand>> groups = new HashMap<>(); for ( PropertyCommand command : propCommands ) { PropertyRecord record = command.getAfter(); if ( !record.isNodeSet() ) { continue; } long nodeId = command.getAfter().getNodeId(); List<PropertyCommand> group = groups.get( nodeId ); if ( group == null ) { groups.put( nodeId, group = new ArrayList<>() ); } group.add( command ); } return groups.values(); } public void commitChangesToCache() { try { if ( !isRecovered() ) { state.commitCows(); // updates the cached primitives } } finally { clear(); } } private Collection<NodeLabelUpdate> gatherLabelUpdatesSortedByNodeId() { List<NodeLabelUpdate> labelUpdates = new ArrayList<>(); for ( NodeCommand nodeCommand : nodeCommands.values() ) { NodeLabels labelFieldBefore = parseLabelsField( nodeCommand.getBefore() ); NodeLabels labelFieldAfter = parseLabelsField( nodeCommand.getAfter() ); if ( labelFieldBefore.isInlined() && labelFieldAfter.isInlined() && nodeCommand.getBefore().getLabelField() == nodeCommand.getAfter().getLabelField() ) { continue; } long[] labelsBefore = labelFieldBefore.getIfLoaded(); long[] labelsAfter = labelFieldAfter.getIfLoaded(); if ( labelsBefore == null || labelsAfter == null ) { continue; } labelUpdates.add( NodeLabelUpdate.labelChanges( nodeCommand.getKey(), labelsBefore, labelsAfter ) ); } Collections.sort(labelUpdates, new NodeLabelUpdateNodeIdComparator()); return labelUpdates; } private void updateLabelScanStore( Iterable<NodeLabelUpdate> labelUpdates ) { try ( LabelScanWriter writer = labelScanStore.newWriter() ) { for ( NodeLabelUpdate update : labelUpdates ) { writer.write( update ); } } catch ( IOException e ) { throw new UnderlyingStorageException( e ); } } static class LabelChangeSummary { private static final long[] NO_LABELS = new long[0]; private final long[] addedLabels; private final long[] removedLabels; LabelChangeSummary( long[] labelsBefore, long[] labelsAfter ) { // Ids are sorted in the store long[] addedLabels = new long[labelsAfter.length]; long[] removedLabels = new long[labelsBefore.length]; int addedLabelsCursor = 0, removedLabelsCursor = 0; for ( long labelAfter : labelsAfter ) { if ( binarySearch( labelsBefore, labelAfter ) < 0 ) { addedLabels[addedLabelsCursor++] = labelAfter; } } for ( long labelBefore : labelsBefore ) { if ( binarySearch( labelsAfter, labelBefore ) < 0 ) { removedLabels[removedLabelsCursor++] = labelBefore; } } // For each property on the node, produce one update for added labels and one for removed labels. this.addedLabels = shrink( addedLabels, addedLabelsCursor ); this.removedLabels = shrink( removedLabels, removedLabelsCursor ); } private long[] shrink( long[] array, int toLength ) { if ( toLength == 0 ) { return NO_LABELS; } return array.length == toLength ? array : copyOf( array, toLength ); } public boolean hasAddedLabels() { return addedLabels.length > 0; } public boolean hasRemovedLabels() { return removedLabels.length > 0; } public long[] getAddedLabels() { return addedLabels; } public long[] getRemovedLabels() { return removedLabels; } } private void updateFirstRelationships() { for ( RecordChange<Long, NodeRecord, Void> change : nodeRecords.changes() ) { NodeRecord record = change.forReadingLinkage(); state.setFirstIds( record.getId(), record.getNextRel(), record.getNextProp() ); } } @SafeVarargs private final void executeCreated( LockGroup lockGroup, boolean removeFromCache, Collection<? extends Command>... commands ) { for ( Collection<? extends Command> c : commands ) { for ( Command command : c ) { if ( command.getMode() == CREATE ) { lockEntity( lockGroup, command ); command.execute(); if ( removeFromCache ) { command.removeFromCache( cacheAccess ); } } } } } @SafeVarargs private final void executeModified( LockGroup lockGroup, boolean removeFromCache, Collection<? extends Command>... commands ) { for ( Collection<? extends Command> c : commands ) { for ( Command command : c ) { if ( command.getMode() == UPDATE ) { lockEntity( lockGroup, command ); command.execute(); if ( removeFromCache ) { command.removeFromCache( cacheAccess ); } } } } } @SafeVarargs private final void executeDeleted( LockGroup lockGroup, Collection<? extends Command>... commands ) { for ( Collection<? extends Command> c : commands ) { for ( Command command : c ) { if ( command.getMode() == DELETE ) { /* * We always update the disk image and then always invalidate the cache. In the case of relationships * this is expected to also patch the relChainPosition in the start and end NodeImpls (if they actually * are in cache). */ lockEntity( lockGroup, command ); command.execute(); command.removeFromCache( cacheAccess ); } } } } private void lockEntity( LockGroup lockGroup, Command command ) { if ( command instanceof NodeCommand ) { lockGroup.add( locks.acquireNodeLock( command.getKey(), LockService.LockType.WRITE_LOCK ) ); } if ( command instanceof Command.PropertyCommand ) { long nodeId = ((Command.PropertyCommand) command).getNodeId(); if ( nodeId != -1 ) { lockGroup.add( locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK ) ); } } } private void clear() { nodeRecords.clear(); propertyRecords.clear(); relRecords.clear(); schemaRuleChanges.clear(); relationshipTypeTokenRecords = null; propertyKeyTokenRecords = null; neoStoreRecord = null; nodeCommands.clear(); propCommands.clear(); propertyKeyTokenCommands = null; relCommands.clear(); schemaRuleCommands.clear(); relationshipTypeTokenCommands = null; labelTokenCommands = null; neoStoreCommand = null; } private RelationshipTypeTokenStore getRelationshipTypeStore() { return neoStore.getRelationshipTypeStore(); } private LabelTokenStore getLabelTokenStore() { return neoStore.getLabelTokenStore(); } private int getRelGrabSize() { return neoStore.getRelationshipGrabSize(); } private NodeStore getNodeStore() { return neoStore.getNodeStore(); } private SchemaStore getSchemaStore() { return neoStore.getSchemaStore(); } private RelationshipStore getRelationshipStore() { return neoStore.getRelationshipStore(); } private PropertyStore getPropertyStore() { return neoStore.getPropertyStore(); } /** * Tries to load the light node with the given id, returns true on success. * * @param nodeId The id of the node to load. * @return True iff the node record can be found. */ public NodeRecord nodeLoadLight( long nodeId ) { try { return nodeRecords.getOrLoad( nodeId, null ).forReadingLinkage(); } catch ( InvalidRecordException e ) { return null; } } /** * Tries to load the light relationship with the given id, returns the * record on success. * * @param id The id of the relationship to load. * @return The light RelationshipRecord if it was found, null otherwise. */ public RelationshipRecord relLoadLight( long id ) { try { return relRecords.getOrLoad( id, null ).forReadingLinkage(); } catch ( InvalidRecordException e ) { return null; } } /** * Deletes a node by its id, returning its properties which are now removed. * * @param nodeId The id of the node to delete. * @return The properties of the node that were removed during the delete. */ public ArrayMap<Integer, DefinedProperty> nodeDelete( long nodeId ) { NodeRecord nodeRecord = nodeRecords.getOrLoad( nodeId, null ).forChangingData(); if ( !nodeRecord.inUse() ) { throw new IllegalStateException( "Unable to delete Node[" + nodeId + "] since it has already been deleted." ); } nodeRecord.setInUse( false ); nodeRecord.setLabelField( 0, Collections.<DynamicRecord>emptyList() ); return getAndDeletePropertyChain( nodeRecord ); } /** * Deletes a relationship by its id, returning its properties which are now * removed. It is assumed that the nodes it connects have already been * deleted in this * transaction. * * @param id The id of the relationship to delete. * @return The properties of the relationship that were removed during the * delete. */ public ArrayMap<Integer, DefinedProperty> relDelete( long id ) { RelationshipRecord record = relRecords.getOrLoad( id, null ).forChangingLinkage(); if ( !record.inUse() ) { throw new IllegalStateException( "Unable to delete relationship[" + id + "] since it is already deleted." ); } ArrayMap<Integer, DefinedProperty> propertyMap = getAndDeletePropertyChain( record ); disconnectRelationship( record ); updateNodes( record ); record.setInUse( false ); return propertyMap; } private ArrayMap<Integer, DefinedProperty> getAndDeletePropertyChain( PrimitiveRecord primitive ) { ArrayMap<Integer, DefinedProperty> result = new ArrayMap<>( (byte) 9, false, true ); long nextProp = primitive.getNextProp(); while ( nextProp != Record.NO_NEXT_PROPERTY.intValue() ) { RecordChange<Long, PropertyRecord, PrimitiveRecord> propertyChange = propertyRecords.getOrLoad( nextProp, primitive ); // TODO forChanging/forReading piggy-backing PropertyRecord propRecord = propertyChange.forChangingData(); PropertyRecord before = propertyChange.getBefore(); for ( PropertyBlock block : before.getPropertyBlocks() ) { result.put( block.getKeyIndexId(), block.newPropertyData( getPropertyStore() ) ); } for ( PropertyBlock block : propRecord.getPropertyBlocks() ) { for ( DynamicRecord valueRecord : block.getValueRecords() ) { assert valueRecord.inUse(); valueRecord.setInUse( false ); propRecord.addDeletedRecord( valueRecord ); } } nextProp = propRecord.getNextProp(); propRecord.setInUse( false ); propRecord.setChanged( primitive ); // We do not remove them individually, but all together here propRecord.getPropertyBlocks().clear(); } return result; } private void disconnectRelationship( RelationshipRecord rel ) { // update first node prev if ( rel.getFirstPrevRel() != Record.NO_NEXT_RELATIONSHIP.intValue() ) { Relationship lockableRel = new LockableRelationship( rel.getFirstPrevRel() ); getWriteLock( lockableRel ); RelationshipRecord prevRel = relRecords.getOrLoad( rel.getFirstPrevRel(), null ).forChangingLinkage(); boolean changed = false; if ( prevRel.getFirstNode() == rel.getFirstNode() ) { prevRel.setFirstNextRel( rel.getFirstNextRel() ); changed = true; } if ( prevRel.getSecondNode() == rel.getFirstNode() ) { prevRel.setSecondNextRel( rel.getFirstNextRel() ); changed = true; } if ( !changed ) { throw new InvalidRecordException( prevRel + " don't match " + rel ); } } // update first node next if ( rel.getFirstNextRel() != Record.NO_NEXT_RELATIONSHIP.intValue() ) { Relationship lockableRel = new LockableRelationship( rel.getFirstNextRel() ); getWriteLock( lockableRel ); RelationshipRecord nextRel = relRecords.getOrLoad( rel.getFirstNextRel(), null ).forChangingLinkage(); boolean changed = false; if ( nextRel.getFirstNode() == rel.getFirstNode() ) { nextRel.setFirstPrevRel( rel.getFirstPrevRel() ); changed = true; } if ( nextRel.getSecondNode() == rel.getFirstNode() ) { nextRel.setSecondPrevRel( rel.getFirstPrevRel() ); changed = true; } if ( !changed ) { throw new InvalidRecordException( nextRel + " don't match " + rel ); } } // update second node prev if ( rel.getSecondPrevRel() != Record.NO_NEXT_RELATIONSHIP.intValue() ) { Relationship lockableRel = new LockableRelationship( rel.getSecondPrevRel() ); getWriteLock( lockableRel ); RelationshipRecord prevRel = relRecords.getOrLoad( rel.getSecondPrevRel(), null ).forChangingLinkage(); boolean changed = false; if ( prevRel.getFirstNode() == rel.getSecondNode() ) { prevRel.setFirstNextRel( rel.getSecondNextRel() ); changed = true; } if ( prevRel.getSecondNode() == rel.getSecondNode() ) { prevRel.setSecondNextRel( rel.getSecondNextRel() ); changed = true; } if ( !changed ) { throw new InvalidRecordException( prevRel + " don't match " + rel ); } } // update second node next if ( rel.getSecondNextRel() != Record.NO_NEXT_RELATIONSHIP.intValue() ) { Relationship lockableRel = new LockableRelationship( rel.getSecondNextRel() ); getWriteLock( lockableRel ); RelationshipRecord nextRel = relRecords.getOrLoad( rel.getSecondNextRel(), null ).forChangingLinkage(); boolean changed = false; if ( nextRel.getFirstNode() == rel.getSecondNode() ) { nextRel.setFirstPrevRel( rel.getSecondPrevRel() ); changed = true; } if ( nextRel.getSecondNode() == rel.getSecondNode() ) { nextRel.setSecondPrevRel( rel.getSecondPrevRel() ); changed = true; } if ( !changed ) { throw new InvalidRecordException( nextRel + " don't match " + rel ); } } } private void getWriteLock( Relationship lockableRel ) { state.acquireWriteLock( lockableRel ); } public long getRelationshipChainPosition( long nodeId ) { return nodeRecords.getOrLoad( nodeId, null ).getBefore().getNextRel(); } /* * List<Iterable<RelationshipRecord>> is a list with three items: * 0: outgoing relationships * 1: incoming relationships * 2: loop relationships * * Long is the relationship chain position as it stands after this * batch of relationships has been loaded. */ public Pair<Map<DirectionWrapper, Iterable<RelationshipRecord>>, Long> getMoreRelationships( long nodeId, long position ) { return getMoreRelationships( nodeId, position, getRelGrabSize(), getRelationshipStore() ); } private void updateNodes( RelationshipRecord rel ) { if ( rel.getFirstPrevRel() == Record.NO_PREV_RELATIONSHIP.intValue() ) { NodeRecord firstNode = nodeRecords.getOrLoad( rel.getFirstNode(), null ).forChangingLinkage(); firstNode.setNextRel( rel.getFirstNextRel() ); } if ( rel.getSecondPrevRel() == Record.NO_PREV_RELATIONSHIP.intValue() ) { NodeRecord secondNode = nodeRecords.getOrLoad( rel.getSecondNode(), null ).forChangingLinkage(); secondNode.setNextRel( rel.getSecondNextRel() ); } } /** * Removes the given property identified by its index from the relationship * with the given id. * * @param relId The id of the relationship that is to have the property * removed. * @param propertyKey The index key of the property. */ public void relRemoveProperty( long relId, int propertyKey ) { RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getOrLoad( relId, null ); RelationshipRecord relRecord = rel.forReadingLinkage(); if ( !relRecord.inUse() ) { throw new IllegalStateException( "Property remove on relationship[" + relId + "] illegal since it has been deleted." ); } assert assertPropertyChain( relRecord ); removeProperty( relRecord, rel, propertyKey ); } /** * Loads the complete property chain for the given relationship and returns * it as a map from property index id to property data. * * @param relId The id of the relationship whose properties to load. * @param light If the properties should be loaded light or not. * @param receiver receiver of loaded properties. */ public void relLoadProperties( long relId, boolean light, PropertyReceiver receiver ) { RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getIfLoaded( relId ); if ( rel != null ) { if ( rel.isCreated() ) { return; } if ( !rel.forReadingLinkage().inUse() && !light ) { throw new IllegalStateException( "Relationship[" + relId + "] has been deleted in this tx" ); } } RelationshipRecord relRecord = getRelationshipStore().getRecord( relId ); if ( !relRecord.inUse() ) { throw new InvalidRecordException( "Relationship[" + relId + "] not in use" ); } loadProperties( getPropertyStore(), relRecord.getNextProp(), receiver ); } /** * Loads the complete property chain for the given node and returns it as a * map from property index id to property data. * * @param nodeId The id of the node whose properties to load. * @param light If the properties should be loaded light or not. * @param receiver receiver of loaded properties. */ public void nodeLoadProperties( long nodeId, boolean light, PropertyReceiver receiver ) { RecordChange<Long, NodeRecord, Void> node = nodeRecords.getIfLoaded( nodeId ); if ( node != null ) { if ( node.isCreated() ) { return; } if ( !node.forReadingLinkage().inUse() && !light ) { throw new IllegalStateException( "Node[" + nodeId + "] has been deleted in this tx" ); } } NodeRecord nodeRecord = getNodeStore().getRecord( nodeId ); if ( !nodeRecord.inUse() ) { throw new IllegalStateException( "Node[" + nodeId + "] has been deleted in this tx" ); } loadProperties( getPropertyStore(), nodeRecord.getNextProp(), receiver ); } /** * Removes the given property identified by indexKeyId of the node with the * given id. * * @param nodeId The id of the node that is to have the property removed. * @param propertyKey The index key of the property. */ public void nodeRemoveProperty( long nodeId, int propertyKey ) { RecordChange<Long, NodeRecord, Void> node = nodeRecords.getOrLoad( nodeId, null ); NodeRecord nodeRecord = node.forReadingLinkage(); if ( !nodeRecord.inUse() ) { throw new IllegalStateException( "Property remove on node[" + nodeId + "] illegal since it has been deleted." ); } assert assertPropertyChain( nodeRecord ); removeProperty( nodeRecord, node, propertyKey ); } private <P extends PrimitiveRecord> void removeProperty( P primitive, RecordChange<Long, P, Void> primitiveRecordChange, int propertyKey ) { long propertyId = // propertyData.getId(); findPropertyRecordContaining( primitive, propertyKey ); RecordChange<Long, PropertyRecord, PrimitiveRecord> recordChange = propertyRecords.getOrLoad( propertyId, primitiveRecordChange.forReadingLinkage() ); PropertyRecord propRecord = recordChange.forChangingData(); if ( !propRecord.inUse() ) { throw new IllegalStateException( "Unable to delete property[" + propertyId + "] since it is already deleted." ); } PropertyBlock block = propRecord.removePropertyBlock( propertyKey ); if ( block == null ) { throw new IllegalStateException( "Property with index[" + propertyKey + "] is not present in property[" + propertyId + "]" ); } for ( DynamicRecord valueRecord : block.getValueRecords() ) { assert valueRecord.inUse(); valueRecord.setInUse( false, block.getType().intValue() ); propRecord.addDeletedRecord( valueRecord ); } if ( propRecord.size() > 0 ) { /* * There are remaining blocks in the record. We do not unlink yet. */ propRecord.setChanged( primitiveRecordChange.forReadingLinkage() ); assert assertPropertyChain( primitiveRecordChange.forReadingLinkage() ); } else { unlinkPropertyRecord( propRecord, primitiveRecordChange ); } } private <P extends PrimitiveRecord> void unlinkPropertyRecord( PropertyRecord propRecord, RecordChange<Long, P, Void> primitiveRecordChange ) { P primitive = primitiveRecordChange.forReadingLinkage(); assert assertPropertyChain( primitive ); assert propRecord.size() == 0; long prevProp = propRecord.getPrevProp(); long nextProp = propRecord.getNextProp(); if ( primitive.getNextProp() == propRecord.getId() ) { assert propRecord.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue() : propRecord + " for " + primitive; primitiveRecordChange.forChangingLinkage().setNextProp( nextProp ); } if ( prevProp != Record.NO_PREVIOUS_PROPERTY.intValue() ) { PropertyRecord prevPropRecord = propertyRecords.getOrLoad( prevProp, primitive ).forChangingLinkage(); assert prevPropRecord.inUse() : prevPropRecord + "->" + propRecord + " for " + primitive; prevPropRecord.setNextProp( nextProp ); prevPropRecord.setChanged( primitive ); } if ( nextProp != Record.NO_NEXT_PROPERTY.intValue() ) { PropertyRecord nextPropRecord = propertyRecords.getOrLoad( nextProp, primitive ).forChangingLinkage(); assert nextPropRecord.inUse() : propRecord + "->" + nextPropRecord + " for " + primitive; nextPropRecord.setPrevProp( prevProp ); nextPropRecord.setChanged( primitive ); } propRecord.setInUse( false ); /* * The following two are not needed - the above line does all the work (PropertyStore * does not write out the prev/next for !inUse records). It is nice to set this * however to check for consistency when assertPropertyChain(). */ propRecord.setPrevProp( Record.NO_PREVIOUS_PROPERTY.intValue() ); propRecord.setNextProp( Record.NO_NEXT_PROPERTY.intValue() ); propRecord.setChanged( primitive ); assert assertPropertyChain( primitive ); } /** * Changes an existing property's value of the given relationship, with the * given index to the passed value * * @param relId The id of the relationship which holds the property to * change. * @param propertyKey The index of the key of the property to change. * @param value The new value of the property. * @return The changed property, as a PropertyData object. */ public DefinedProperty relChangeProperty( long relId, int propertyKey, Object value ) { RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getOrLoad( relId, null ); if ( !rel.forReadingLinkage().inUse() ) { throw new IllegalStateException( "Property change on relationship[" + relId + "] illegal since it has been deleted." ); } return primitiveChangeProperty( rel, propertyKey, value ); } /** * Changes an existing property of the given node, with the given index to * the passed value * * @param nodeId The id of the node which holds the property to change. * @param propertyKey The index of the key of the property to change. * @param value The new value of the property. * @return The changed property, as a PropertyData object. */ public DefinedProperty nodeChangeProperty( long nodeId, int propertyKey, Object value ) { RecordChange<Long, NodeRecord, Void> node = nodeRecords.getOrLoad( nodeId, null ); //getNodeRecord( nodeId ); if ( !node.forReadingLinkage().inUse() ) { throw new IllegalStateException( "Property change on node[" + nodeId + "] illegal since it has been deleted." ); } return primitiveChangeProperty( node, propertyKey, value ); } /** * TODO MP: itroduces performance regression * This method was introduced during moving handling of entity properties from NodeImpl/RelationshipImpl * to the {@link KernelAPI}. Reason was that the {@link Property} object at the time didn't have a notion * of property record id, and didn't want to have it. */ private long findPropertyRecordContaining( PrimitiveRecord primitive, int propertyKey ) { long propertyRecordId = primitive.getNextProp(); while ( !Record.NO_NEXT_PROPERTY.is( propertyRecordId ) ) { PropertyRecord propertyRecord = propertyRecords.getOrLoad( propertyRecordId, primitive ).forReadingLinkage(); if ( propertyRecord.getPropertyBlock( propertyKey ) != null ) { return propertyRecordId; } propertyRecordId = propertyRecord.getNextProp(); } throw new IllegalStateException( "No property record in property chain for " + primitive + " contained property with key " + propertyKey ); } private <P extends PrimitiveRecord> DefinedProperty primitiveChangeProperty( RecordChange<Long, P, Void> primitiveRecordChange, int propertyKey, Object value ) { P primitive = primitiveRecordChange.forReadingLinkage(); assert assertPropertyChain( primitive ); long propertyId = // propertyData.getId(); findPropertyRecordContaining( primitive, propertyKey ); PropertyRecord propertyRecord = propertyRecords.getOrLoad( propertyId, primitive ).forChangingData(); if ( !propertyRecord.inUse() ) { throw new IllegalStateException( "Unable to change property[" + propertyId + "] since it has been deleted." ); } PropertyBlock block = propertyRecord.getPropertyBlock( propertyKey ); if ( block == null ) { throw new IllegalStateException( "Property with index[" + propertyKey + "] is not present in property[" + propertyId + "]" ); } propertyRecord.setChanged( primitive ); for ( DynamicRecord record : block.getValueRecords() ) { assert record.inUse(); record.setInUse( false, block.getType().intValue() ); propertyRecord.addDeletedRecord( record ); } getPropertyStore().encodeValue( block, propertyKey, value ); if ( propertyRecord.size() > PropertyType.getPayloadSize() ) { propertyRecord.removePropertyBlock( propertyKey ); /* * The record should never, ever be above max size. Less obviously, it should * never remain empty. If removing a property because it won't fit when changing * it leaves the record empty it means that this block was the last one which * means that it doesn't fit in an empty record. Where i come from, we call this * weird. * assert propertyRecord.size() <= PropertyType.getPayloadSize() : propertyRecord; assert propertyRecord.size() > 0 : propertyRecord; */ addPropertyBlockToPrimitive( block, primitiveRecordChange ); } assert assertPropertyChain( primitive ); return Property.property( propertyKey, value ); } private <P extends PrimitiveRecord> DefinedProperty addPropertyToPrimitive( RecordChange<Long, P, Void> node, int propertyKey, Object value ) { P record = node.forReadingLinkage(); assert assertPropertyChain( record ); PropertyBlock block = new PropertyBlock(); getPropertyStore().encodeValue( block, propertyKey, value ); addPropertyBlockToPrimitive( block, node ); assert assertPropertyChain( record ); return Property.property( propertyKey, value ); } /** * Adds a property to the given relationship, with the given index and * value. * * @param relId The id of the relationship to which to add the property. * @param propertyKey The index of the key of the property to add. * @param value The value of the property. * @return The added property, as a PropertyData object. */ public DefinedProperty relAddProperty( long relId, int propertyKey, Object value ) { RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getOrLoad( relId, null ); RelationshipRecord relRecord = rel.forReadingLinkage(); if ( !relRecord.inUse() ) { throw new IllegalStateException( "Property add on relationship[" + relId + "] illegal since it has been deleted." ); } return addPropertyToPrimitive( rel, propertyKey, value ); } /** * Adds a property to the given node, with the given index and value. * * @param nodeId The id of the node to which to add the property. * @param propertyKey The index of the key of the property to add. * @param value The value of the property. * @return The added property, as a PropertyData object. */ public DefinedProperty nodeAddProperty( long nodeId, int propertyKey, Object value ) { RecordChange<Long, NodeRecord, Void> node = nodeRecords.getOrLoad( nodeId, null ); NodeRecord nodeRecord = node.forReadingLinkage(); if ( !nodeRecord.inUse() ) { throw new IllegalStateException( "Property add on node[" + nodeId + "] illegal since it has been deleted." ); } return addPropertyToPrimitive( node, propertyKey, value ); } private <P extends PrimitiveRecord> void addPropertyBlockToPrimitive( PropertyBlock block, RecordChange<Long, P, Void> primitiveRecordChange ) { P primitive = primitiveRecordChange.forReadingLinkage(); assert assertPropertyChain( primitive ); int newBlockSizeInBytes = block.getSize(); /* * Here we could either iterate over the whole chain or just go for the first record * which is the most likely to be the less full one. Currently we opt for the second * to perform better. */ PropertyRecord host = null; long firstProp = primitive.getNextProp(); if ( firstProp != Record.NO_NEXT_PROPERTY.intValue() ) { // We do not store in map - might not have enough space RecordChange<Long, PropertyRecord, PrimitiveRecord> change = propertyRecords .getOrLoad( firstProp, primitive ); PropertyRecord propRecord = change.forReadingLinkage(); assert propRecord.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue() : propRecord + " for " + primitive; assert propRecord.inUse() : propRecord; int propSize = propRecord.size(); assert propSize > 0 : propRecord; if ( propSize + newBlockSizeInBytes <= PropertyType.getPayloadSize() ) { propRecord = change.forChangingData(); host = propRecord; host.addPropertyBlock( block ); host.setChanged( primitive ); } } if ( host == null ) { // First record in chain didn't fit, make new one host = propertyRecords.create( getPropertyStore().nextId(), primitive ).forChangingData(); if ( primitive.getNextProp() != Record.NO_NEXT_PROPERTY.intValue() ) { PropertyRecord prevProp = propertyRecords.getOrLoad( primitive.getNextProp(), primitive ) .forChangingLinkage(); assert prevProp.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue(); prevProp.setPrevProp( host.getId() ); host.setNextProp( prevProp.getId() ); prevProp.setChanged( primitive ); } primitiveRecordChange.forChangingLinkage().setNextProp( host.getId() ); host.addPropertyBlock( block ); host.setInUse( true ); } // Ok, here host does for the job. Use it assert assertPropertyChain( primitive ); } /** * Creates a relationship with the given id, from the nodes identified by id * and of type typeId * * @param id The id of the relationship to create. * @param type The id of the relationship type this relationship will * have. * @param firstNodeId The id of the start node. * @param secondNodeId The id of the end node. */ public void relationshipCreate( long id, int type, long firstNodeId, long secondNodeId ) { NodeRecord firstNode = nodeRecords.getOrLoad( firstNodeId, null ).forChangingLinkage(); if ( !firstNode.inUse() ) { throw new IllegalStateException( "First node[" + firstNodeId + "] is deleted and cannot be used to create a relationship" ); } NodeRecord secondNode = nodeRecords.getOrLoad( secondNodeId, null ).forChangingLinkage(); if ( !secondNode.inUse() ) { throw new IllegalStateException( "Second node[" + secondNodeId + "] is deleted and cannot be used to create a relationship" ); } RelationshipRecord record = relRecords.create( id, null ).forChangingLinkage(); record.setLinks( firstNodeId, secondNodeId, type ); record.setInUse( true ); record.setCreated(); connectRelationship( firstNode, secondNode, record ); } private void connectRelationship( NodeRecord firstNode, NodeRecord secondNode, RelationshipRecord rel ) { assert firstNode.getNextRel() != rel.getId(); assert secondNode.getNextRel() != rel.getId(); rel.setFirstNextRel( firstNode.getNextRel() ); rel.setSecondNextRel( secondNode.getNextRel() ); connect( firstNode, rel ); connect( secondNode, rel ); firstNode.setNextRel( rel.getId() ); secondNode.setNextRel( rel.getId() ); } private void connect( NodeRecord node, RelationshipRecord rel ) { if ( node.getNextRel() != Record.NO_NEXT_RELATIONSHIP.intValue() ) { Relationship lockableRel = new LockableRelationship( node.getNextRel() ); getWriteLock( lockableRel ); RelationshipRecord nextRel = relRecords.getOrLoad( node.getNextRel(), null ).forChangingLinkage(); boolean changed = false; if ( nextRel.getFirstNode() == node.getId() ) { nextRel.setFirstPrevRel( rel.getId() ); changed = true; } if ( nextRel.getSecondNode() == node.getId() ) { nextRel.setSecondPrevRel( rel.getId() ); changed = true; } if ( !changed ) { throw new InvalidRecordException( node + " dont match " + nextRel ); } } } /** * Creates a node for the given id * * @param nodeId The id of the node to create. */ public void nodeCreate( long nodeId ) { NodeRecord nodeRecord = nodeRecords.create( nodeId, null ).forChangingData(); nodeRecord.setInUse( true ); nodeRecord.setCreated(); } /** * Creates a property index entry out of the given id and string. * * @param key The key of the property index, as a string. * @param id The property index record id. */ public void createPropertyKeyToken( String key, int id ) { PropertyKeyTokenRecord record = new PropertyKeyTokenRecord( id ); record.setInUse( true ); record.setCreated(); PropertyKeyTokenStore propIndexStore = getPropertyStore().getPropertyKeyTokenStore(); Collection<DynamicRecord> nameRecords = propIndexStore.allocateNameRecords( encodeString( key ) ); record.setNameId( (int) first( nameRecords ).getId() ); record.addNameRecords( nameRecords ); addPropertyKeyTokenRecord( record ); } /** * Creates a property index entry out of the given id and string. * * @param name The key of the property index, as a string. * @param id The property index record id. */ public void createLabelToken( String name, int id ) { LabelTokenRecord record = new LabelTokenRecord( id ); record.setInUse( true ); record.setCreated(); LabelTokenStore labelTokenStore = getLabelTokenStore(); Collection<DynamicRecord> nameRecords = labelTokenStore.allocateNameRecords( encodeString( name ) ); record.setNameId( (int) first( nameRecords ).getId() ); record.addNameRecords( nameRecords ); addLabelIdRecord( record ); } /** * Creates a new RelationshipType record with the given id that has the * given name. * * @param id The id of the new relationship type record. * @param name The name of the relationship type. */ public void createRelationshipTypeToken( int id, String name ) { RelationshipTypeTokenRecord record = new RelationshipTypeTokenRecord( id ); record.setInUse( true ); record.setCreated(); Collection<DynamicRecord> typeNameRecords = getRelationshipTypeStore().allocateNameRecords( encodeString( name ) ); record.setNameId( (int) first( typeNameRecords ).getId() ); record.addNameRecords( typeNameRecords ); addRelationshipTypeRecord( record ); } static class CommandSorter implements Comparator<Command>, Serializable { @Override public int compare( Command o1, Command o2 ) { long id1 = o1.getKey(); long id2 = o2.getKey(); long diff = id1 - id2; if ( diff > Integer.MAX_VALUE ) { return Integer.MAX_VALUE; } else if ( diff < Integer.MIN_VALUE ) { return Integer.MIN_VALUE; } else { return (int) diff; } } @Override public boolean equals( Object o ) { return o instanceof CommandSorter; } @Override public int hashCode() { return 3217; } } void addRelationshipTypeRecord( RelationshipTypeTokenRecord record ) { if ( relationshipTypeTokenRecords == null ) { relationshipTypeTokenRecords = new HashMap<>(); } relationshipTypeTokenRecords.put( record.getId(), record ); } void addLabelIdRecord( LabelTokenRecord record ) { if ( labelTokenRecords == null ) { labelTokenRecords = new HashMap<>(); } labelTokenRecords.put( record.getId(), record ); } void addPropertyKeyTokenRecord( PropertyKeyTokenRecord record ) { if ( propertyKeyTokenRecords == null ) { propertyKeyTokenRecords = new HashMap<>(); } propertyKeyTokenRecords.put( record.getId(), record ); } private static class LockableRelationship implements Relationship { private final long id; LockableRelationship( long id ) { this.id = id; } @Override public void delete() { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Node getEndNode() { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public long getId() { return this.id; } @Override public GraphDatabaseService getGraphDatabase() { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Node[] getNodes() { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Node getOtherNode( Node node ) { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Object getProperty( String key ) { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Object getProperty( String key, Object defaultValue ) { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Iterable<String> getPropertyKeys() { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Node getStartNode() { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public RelationshipType getType() { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public boolean isType( RelationshipType type ) { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public boolean hasProperty( String key ) { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public Object removeProperty( String key ) { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public void setProperty( String key, Object value ) { throw new UnsupportedOperationException( "Lockable rel" ); } @Override public boolean equals( Object o ) { return o instanceof Relationship && this.getId() == ((Relationship) o).getId(); } @Override public int hashCode() { return (int) ((id >>> 32) ^ id); } @Override public String toString() { return "Lockable relationship #" + this.getId(); } } private boolean assertPropertyChain( PrimitiveRecord primitive ) { List<PropertyRecord> toCheck = new LinkedList<>(); long nextIdToFetch = primitive.getNextProp(); while ( nextIdToFetch != Record.NO_NEXT_PROPERTY.intValue() ) { PropertyRecord propRecord = propertyRecords.getOrLoad( nextIdToFetch, primitive ).forReadingLinkage(); toCheck.add( propRecord ); assert propRecord.inUse() : primitive + "->" + Arrays.toString( toCheck.toArray() ); nextIdToFetch = propRecord.getNextProp(); } if ( toCheck.isEmpty() ) { assert primitive.getNextProp() == Record.NO_NEXT_PROPERTY.intValue() : primitive; return true; } PropertyRecord first = toCheck.get( 0 ); PropertyRecord last = toCheck.get( toCheck.size() - 1 ); assert first.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue() : primitive + "->" + Arrays.toString( toCheck.toArray() ); assert last.getNextProp() == Record.NO_NEXT_PROPERTY.intValue() : primitive + "->" + Arrays.toString( toCheck.toArray() ); PropertyRecord current, previous = first; for ( int i = 1; i < toCheck.size(); i++ ) { current = toCheck.get( i ); assert current.getPrevProp() == previous.getId() : primitive + "->" + Arrays.toString( toCheck.toArray() ); assert previous.getNextProp() == current.getId() : primitive + "->" + Arrays.toString( toCheck.toArray() ); previous = current; } return true; } private RecordChange<Long, NeoStoreRecord, Void> getOrLoadNeoStoreRecord() { if ( neoStoreRecord == null ) { neoStoreRecord = new RecordChanges<>( new RecordChanges.Loader<Long, NeoStoreRecord, Void>() { @Override public NeoStoreRecord newUnused( Long key, Void additionalData ) { throw new UnsupportedOperationException(); } @Override public NeoStoreRecord load( Long key, Void additionalData ) { return neoStore.asRecord(); } @Override public void ensureHeavy( NeoStoreRecord record ) { } @Override public NeoStoreRecord clone(NeoStoreRecord neoStoreRecord) { // We do not expect to manage the before state, so this operation will not be called. throw new UnsupportedOperationException("Clone on NeoStoreRecord"); } }, false ); } return neoStoreRecord.getOrLoad( 0L, null ); } /** * Adds a property to the graph, with the given index and value. * * @param propertyKey The index of the key of the property to add. * @param value The value of the property. * @return The added property, as a PropertyData object. */ public DefinedProperty graphAddProperty( int propertyKey, Object value ) { PropertyBlock block = new PropertyBlock(); /* * Encoding has to be set here before anything is changed, * since an exception could be thrown in encodeValue now and tx not marked * rollback only. */ getPropertyStore().encodeValue( block, propertyKey, value ); RecordChange<Long, NeoStoreRecord, Void> change = getOrLoadNeoStoreRecord(); addPropertyBlockToPrimitive( block, change ); assert assertPropertyChain( change.forReadingLinkage() ); return Property.property( propertyKey, value ); } /** * Changes an existing property of the graph, with the given index to * the passed value * * @param propertyKey The index of the key of the property to change. * @param value The new value of the property. * @return The changed property, as a PropertyData object. */ public DefinedProperty graphChangeProperty( int propertyKey, Object value ) { return primitiveChangeProperty( getOrLoadNeoStoreRecord(), propertyKey, value ); } /** * Removes the given property identified by indexKeyId of the graph with the * given id. * * @param propertyKey The index key of the property. */ public void graphRemoveProperty( int propertyKey ) { RecordChange<Long, NeoStoreRecord, Void> recordChange = getOrLoadNeoStoreRecord(); removeProperty( recordChange.forReadingLinkage(), recordChange, propertyKey ); } /** * Loads the complete property chain for the graph and returns it as a * map from property index id to property data. * * @param light If the properties should be loaded light or not. * @param records receiver of loaded properties. */ public void graphLoadProperties( boolean light, PropertyReceiver records ) { loadProperties( getPropertyStore(), neoStore.asRecord().getNextProp(), records ); } public void createSchemaRule( SchemaRule schemaRule ) { for(DynamicRecord change : schemaRuleChanges.create( schemaRule.getId(), schemaRule ).forChangingData()) { change.setInUse( true ); change.setCreated(); } } public void dropSchemaRule( SchemaRule rule ) { RecordChange<Long, Collection<DynamicRecord>, SchemaRule> change = schemaRuleChanges.getOrLoad(rule.getId(), rule); Collection<DynamicRecord> records = change.forChangingData(); for ( DynamicRecord record : records ) { record.setInUse( false ); } } public void addLabelToNode( int labelId, long nodeId ) { NodeRecord nodeRecord = nodeRecords.getOrLoad( nodeId, null ).forChangingData(); parseLabelsField( nodeRecord ).add( labelId, getNodeStore() ); } public void removeLabelFromNode( int labelId, long nodeId ) { NodeRecord nodeRecord = nodeRecords.getOrLoad( nodeId, null ).forChangingData(); parseLabelsField( nodeRecord ).remove( labelId, getNodeStore() ); } public PrimitiveLongIterator getLabelsForNode( long nodeId ) { // Don't consider changes in this transaction NodeRecord node = getNodeStore().getRecord( nodeId ); return asPrimitiveIterator( parseLabelsField( node ).get( getNodeStore() ) ); } public void setConstraintIndexOwner( IndexRule indexRule, long constraintId ) { RecordChange<Long, Collection<DynamicRecord>, SchemaRule> change = schemaRuleChanges.getOrLoad( indexRule.getId(), indexRule ); Collection<DynamicRecord> records = change.forChangingData(); indexRule = indexRule.withOwningConstraint( constraintId ); records.clear(); records.addAll( getSchemaStore().allocateFrom( indexRule ) ); } private Pair<Map<DirectionWrapper, Iterable<RelationshipRecord>>, Long> getMoreRelationships( long nodeId, long position, int grabSize, RelationshipStore relStore ) { // initialCapacity=grabSize saves the lists the trouble of resizing List<RelationshipRecord> out = new ArrayList<>(); List<RelationshipRecord> in = new ArrayList<>(); List<RelationshipRecord> loop = null; Map<DirectionWrapper, Iterable<RelationshipRecord>> result = new EnumMap<>( DirectionWrapper.class ); result.put( DirectionWrapper.OUTGOING, out ); result.put( DirectionWrapper.INCOMING, in ); for ( int i = 0; i < grabSize && position != Record.NO_NEXT_RELATIONSHIP.intValue(); i++ ) { RelationshipRecord relRecord = relStore.getChainRecord( position ); if ( relRecord == null ) { // return what we got so far return Pair.of( result, position ); } long firstNode = relRecord.getFirstNode(); long secondNode = relRecord.getSecondNode(); if ( relRecord.inUse() ) { if ( firstNode == secondNode ) { if ( loop == null ) { // This is done lazily because loops are probably quite // rarely encountered loop = new ArrayList<>(); result.put( DirectionWrapper.BOTH, loop ); } loop.add( relRecord ); } else if ( firstNode == nodeId ) { out.add( relRecord ); } else if ( secondNode == nodeId ) { in.add( relRecord ); } } else { i--; } if ( firstNode == nodeId ) { position = relRecord.getFirstNextRel(); } else if ( secondNode == nodeId ) { position = relRecord.getSecondNextRel(); } else { throw new InvalidRecordException( "Node[" + nodeId + "] is neither firstNode[" + firstNode + "] nor secondNode[" + secondNode + "] for Relationship[" + relRecord.getId() + "]" ); } } return Pair.of( result, position ); } private static void loadPropertyChain( Collection<PropertyRecord> chain, PropertyStore propertyStore, PropertyReceiver receiver ) { if ( chain != null ) { for ( PropertyRecord propRecord : chain ) { for ( PropertyBlock propBlock : propRecord.getPropertyBlocks() ) { receiver.receive( propBlock.newPropertyData( propertyStore ), propRecord.getId() ); } } } } static void loadProperties( PropertyStore propertyStore, long nextProp, PropertyReceiver receiver ) { Collection<PropertyRecord> chain = propertyStore.getPropertyRecordChain( nextProp ); if ( chain != null ) { loadPropertyChain( chain, propertyStore, receiver ); } } public interface PropertyReceiver { void receive( DefinedProperty property, long propertyRecordId ); } }
0true
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
95
public class ClientImpl implements Client { private final String uuid; private final InetSocketAddress socketAddress; public ClientImpl(String uuid, InetSocketAddress socketAddress) { this.uuid = uuid; this.socketAddress = socketAddress; } @Override public String getUuid() { return uuid; } @Override public InetSocketAddress getSocketAddress() { return socketAddress; } @Override public ClientType getClientType() { return ClientType.JAVA; } }
0true
hazelcast-client_src_main_java_com_hazelcast_client_ClientImpl.java
205
public class ExistsFieldQueryExtension implements FieldQueryExtension { public static final String NAME = "_exists_"; @Override public Query query(QueryParseContext parseContext, String queryText) { String fieldName = queryText; Filter filter = null; MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); if (smartNameFieldMappers != null) { if (smartNameFieldMappers.hasMapper()) { filter = smartNameFieldMappers.mapper().rangeFilter(null, null, true, true, parseContext); } } if (filter == null) { filter = new TermRangeFilter(fieldName, null, null, true, true); } // we always cache this one, really does not change... filter = parseContext.cacheFilter(filter, null); filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext); return new XConstantScoreQuery(filter); } }
1no label
src_main_java_org_apache_lucene_queryparser_classic_ExistsFieldQueryExtension.java
3,731
public static class Defaults { public static final boolean ENABLED = true; public static final Nested NESTED = Nested.NO; public static final Dynamic DYNAMIC = null; // not set, inherited from root public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL; }
0true
src_main_java_org_elasticsearch_index_mapper_object_ObjectMapper.java
3,181
public abstract class FilterDoubleValues extends DoubleValues { protected final DoubleValues delegate; protected FilterDoubleValues(DoubleValues delegate) { super(delegate.isMultiValued()); this.delegate = delegate; } @Override public int setDocument(int docId) { return delegate.setDocument(docId); } @Override public double nextValue() { return delegate.nextValue(); } @Override public AtomicFieldData.Order getOrder() { return delegate.getOrder(); } }
0true
src_main_java_org_elasticsearch_index_fielddata_FilterDoubleValues.java
450
public class ClusterStatsResponse extends NodesOperationResponse<ClusterStatsNodeResponse> implements ToXContent { ClusterStatsNodes nodesStats; ClusterStatsIndices indicesStats; String clusterUUID; ClusterHealthStatus status; long timestamp; ClusterStatsResponse() { } public ClusterStatsResponse(long timestamp, ClusterName clusterName, String clusterUUID, ClusterStatsNodeResponse[] nodes) { super(clusterName, null); this.timestamp = timestamp; this.clusterUUID = clusterUUID; nodesStats = new ClusterStatsNodes(nodes); indicesStats = new ClusterStatsIndices(nodes); for (ClusterStatsNodeResponse response : nodes) { // only the master node populates the status if (response.clusterStatus() != null) { status = response.clusterStatus(); break; } } } public long getTimestamp() { return this.timestamp; } public ClusterHealthStatus getStatus() { return this.status; } public ClusterStatsNodes getNodesStats() { return nodesStats; } public ClusterStatsIndices getIndicesStats() { return indicesStats; } @Override public ClusterStatsNodeResponse[] getNodes() { throw new UnsupportedOperationException(); } @Override public Map<String, ClusterStatsNodeResponse> getNodesMap() { throw new UnsupportedOperationException(); } @Override public ClusterStatsNodeResponse getAt(int position) { throw new UnsupportedOperationException(); } @Override public Iterator<ClusterStatsNodeResponse> iterator() { throw new UnsupportedOperationException(); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); timestamp = in.readVLong(); status = null; if (in.readBoolean()) { // it may be that the master switched on us while doing the operation. In this case the status may be null. status = ClusterHealthStatus.fromValue(in.readByte()); } clusterUUID = in.readString(); nodesStats = ClusterStatsNodes.readNodeStats(in); indicesStats = ClusterStatsIndices.readIndicesStats(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(timestamp); if (status == null) { out.writeBoolean(false); } else { out.writeBoolean(true); out.writeByte(status.value()); } out.writeString(clusterUUID); nodesStats.writeTo(out); indicesStats.writeTo(out); } static final class Fields { static final XContentBuilderString NODES = new XContentBuilderString("nodes"); static final XContentBuilderString INDICES = new XContentBuilderString("indices"); static final XContentBuilderString UUID = new XContentBuilderString("uuid"); static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name"); static final XContentBuilderString STATUS = new XContentBuilderString("status"); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("timestamp", getTimestamp()); builder.field(Fields.CLUSTER_NAME, getClusterName().value()); if (params.paramAsBoolean("output_uuid", false)) { builder.field(Fields.UUID, clusterUUID); } if (status != null) { builder.field(Fields.STATUS, status.name().toLowerCase(Locale.ROOT)); } builder.startObject(Fields.INDICES); indicesStats.toXContent(builder, params); builder.endObject(); builder.startObject(Fields.NODES); nodesStats.toXContent(builder, params); builder.endObject(); return builder; } @Override public String toString() { try { XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); builder.startObject(); toXContent(builder, EMPTY_PARAMS); builder.endObject(); return builder.string(); } catch (IOException e) { return "{ \"error\" : \"" + e.getMessage() + "\"}"; } } }
0true
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsResponse.java
3,717
public final class EntryTaskSchedulerFactory { private EntryTaskSchedulerFactory() { } /** * Creates a new EntryTaskScheduler that will run all second operations in bulk. * Imagine a write-behind map where dirty entries will be stored in bulk. * Note that each key can be only once; meaning you cannot delay the execution * Once an entry is marked as dirty for example, it will run in write-delay-seconds, * even if the entry is updated again within write-delay-seconds. * So two things to * remember: * 1. a key cannot be re-scheduled (postponing its execution). * 2. all entries scheduled for a given second will be executed in once by your * SecondBulkExecutor implementation. * Once a key is executed, it can be re-scheduled for another execution. * <p/> * EntryTaskScheduler implementation is thread-safe. * * @param scheduledExecutorService ScheduledExecutorService instance to execute the second * @param entryProcessor bulk processor * @return EntryTaskScheduler */ public static <K, V> EntryTaskScheduler<K, V> newScheduler(ScheduledExecutorService scheduledExecutorService, ScheduledEntryProcessor entryProcessor, ScheduleType scheduleType) { return new SecondsBasedEntryTaskScheduler<K, V>(scheduledExecutorService, entryProcessor, scheduleType); } }
1no label
hazelcast_src_main_java_com_hazelcast_util_scheduler_EntryTaskSchedulerFactory.java
2,446
executor.execute(new Runnable() { @Override public void run() { try { wait.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } executed1.set(true); exec1Wait.countDown(); } });
0true
src_test_java_org_elasticsearch_common_util_concurrent_EsExecutorsTests.java
2,064
return new Module() { public void configure(Binder binder) { binder = binder.skipSources(getClass()); for (Module module : modulesSet) { binder.install(module); } } };
0true
src_main_java_org_elasticsearch_common_inject_util_Modules.java
255
final class FormatAction extends Action { private final CeylonEditor editor; private final boolean respectSelection; FormatAction(CeylonEditor editor) { this(editor, true); } FormatAction(CeylonEditor editor, boolean respectSelection) { super(null); this.editor = editor; this.respectSelection = respectSelection; } @Override public boolean isEnabled() { CeylonParseController cpc = editor.getParseController(); return isEnabled(cpc); } public static boolean isEnabled(CeylonParseController cpc) { return cpc!=null && cpc.getStage().ordinal()>=Stage.SYNTACTIC_ANALYSIS.ordinal() && cpc.getRootNode()!=null; } private static class FormattingUnit { public final Node node; public final CommonToken startToken; public final CommonToken endToken; public FormattingUnit(final Node node, final CommonToken startToken, final CommonToken endToken) { this.node = node; this.startToken = startToken; this.endToken= endToken; } } @Override public void run() { IDocument document = editor.getCeylonSourceViewer().getDocument(); final ITextSelection ts = getSelection(editor); final boolean selected = respectSelection && ts.getLength() > 0; final CeylonParseController pc = editor.getParseController(); format(pc, document, ts, selected, editor.getSelectionProvider()); } public static void format(final CeylonParseController pc, IDocument document, final ITextSelection ts, final boolean selected, ISelectionProvider selectionProvider) { if (!isEnabled(pc)) return; final List<CommonToken> tokenList = pc.getTokens(); final List<FormattingUnit> formattingUnits; boolean formatAll = !selected || document.getLength()==ts.getLength(); if (!formatAll) { // a node was selected, format only that Node selectedRootNode = Nodes.findNode(pc.getRootNode(), ts); if (selectedRootNode == null) return; if (selectedRootNode instanceof Body || selectedRootNode instanceof CompilationUnit) { // format only selected statements, not entire body / CU (from now on: body) Iterator<? extends Statement> it; if (selectedRootNode instanceof Body) { it = ((Body)selectedRootNode).getStatements().iterator(); } else { it = ((CompilationUnit)selectedRootNode).getDeclarations().iterator(); } Statement stat = null; formattingUnits = new ArrayList<FormattingUnit>(); int tokenIndex = -1; // find first selected statement while (it.hasNext()) { stat = it.next(); CommonToken start = (CommonToken)stat.getToken(); CommonToken end = (CommonToken)stat.getEndToken(); if (end.getStopIndex() >= ts.getOffset()) { formattingUnits.add(new FormattingUnit(stat, start, end)); tokenIndex = end.getTokenIndex() + 1; break; } } // find last selected statement while (it.hasNext()) { stat = it.next(); CommonToken start = (CommonToken)stat.getToken(); CommonToken end = (CommonToken)stat.getEndToken(); if (start.getStartIndex() >= ts.getOffset() + ts.getLength()) { break; } formattingUnits.add(new FormattingUnit(stat, tokenList.get(tokenIndex), end)); tokenIndex = end.getTokenIndex() + 1; } if (formattingUnits.isEmpty()) { // possible if the selection spanned the entire content of the body, // or if the body is empty, etc. formattingUnits.add(new FormattingUnit( selectedRootNode, (CommonToken)selectedRootNode.getToken(), (CommonToken)selectedRootNode.getEndToken())); } } else { formattingUnits = Collections.singletonList(new FormattingUnit( selectedRootNode, (CommonToken)selectedRootNode.getToken(), (CommonToken)selectedRootNode.getEndToken())); } } else { // format everything formattingUnits = Collections.singletonList(new FormattingUnit( pc.getRootNode(), tokenList.get(0), tokenList.get(tokenList.size() - 1))); } final StringBuilder builder = new StringBuilder(document.getLength()); final SparseFormattingOptions wsOptions = CeylonStyle.getEclipseWsOptions(document); try { for (FormattingUnit unit : formattingUnits) { final int startTokenIndex = unit.startToken.getTokenIndex(); final int endTokenIndex = unit.endToken.getTokenIndex(); // final int startIndex = unit.startToken.getStartIndex(); // final int stopIndex = unit.endToken.getStopIndex(); final TokenSource tokens = new TokenSource() { int i = startTokenIndex; @Override public Token nextToken() { if (i <= endTokenIndex) return tokenList.get(i++); else if (i == endTokenIndex + 1) return tokenList.get(tokenList.size() - 1); // EOF token else return null; } @Override public String getSourceName() { throw new IllegalStateException("No one should need this"); } }; final int indentLevel = Indents.getIndent(unit.node, document).length() / Indents.getIndentSpaces(); if (unit != formattingUnits.get(0)) { // add indentation builder.append(wsOptions.getIndentMode().indent(indentLevel)); } format_.format( unit.node, combinedOptions_.combinedOptions( loadProfile_.loadProfile( CeylonStyle.getFormatterProfile(pc.getProject()), /* inherit = */ false, /* baseDir = */ pc.getProject().getLocation().toOSString()), new Singleton<SparseFormattingOptions> (SparseFormattingOptions.$TypeDescriptor$, wsOptions)), new StringBuilderWriter(builder), new BufferedTokenStream(tokens), indentLevel ); if (unit == formattingUnits.get(0)) { // trim leading indentation (from formatter's indentBefore) int firstNonWsIndex = 0; while (Character.isWhitespace(builder.charAt(firstNonWsIndex))) firstNonWsIndex++; if (firstNonWsIndex != 0) builder.delete(0, firstNonWsIndex); } } } catch (Exception e) { return; } catch (AssertionError e) { return; } final String text; if (selected) { // remove the trailing line break text = builder.substring(0, builder.length() - wsOptions.getLineBreak().getText().length()); } else { text = builder.toString(); } try { final int startIndex = formattingUnits.get(0).startToken.getStartIndex(); final int stopIndex = formattingUnits.get(formattingUnits.size() - 1).endToken.getStopIndex(); final int from = formatAll ? 0 : startIndex; final int length = formatAll ? document.getLength() : stopIndex - startIndex + 1; if (!document.get(from, length).equals(text)) { DocumentChange change = new DocumentChange("Format", document); change.setEdit(new ReplaceEdit(from, length, text)); change.perform(new NullProgressMonitor()); if (selected) { selectionProvider.setSelection(new TextSelection(startIndex, text.length())); } } } catch (Exception e) { e.printStackTrace(); } } }
0true
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_FormatAction.java
2,601
private class SymmetricCipherPacketWriter implements PacketWriter { final Cipher cipher; ByteBuffer packetBuffer = ByteBuffer.allocate(ioService.getSocketSendBufferSize() * IOService.KILO_BYTE); boolean packetWritten; SymmetricCipherPacketWriter() { cipher = init(); } private Cipher init() { Cipher c; try { c = CipherHelper.createSymmetricWriterCipher(ioService.getSymmetricEncryptionConfig()); } catch (Exception e) { logger.severe("Symmetric Cipher for WriteHandler cannot be initialized.", e); CipherHelper.handleCipherException(e, connection); throw ExceptionUtil.rethrow(e); } return c; } public boolean writePacket(Packet packet, ByteBuffer socketBuffer) throws Exception { if (!packetWritten) { if (socketBuffer.remaining() < CONST_BUFFER_NO) { return false; } int size = cipher.getOutputSize(packet.size()); socketBuffer.putInt(size); if (packetBuffer.capacity() < packet.size()) { packetBuffer = ByteBuffer.allocate(packet.size()); } if (!packet.writeTo(packetBuffer)) { throw new HazelcastException("Packet didn't fit into the buffer!"); } packetBuffer.flip(); packetWritten = true; } if (socketBuffer.hasRemaining()) { int outputSize = cipher.getOutputSize(packetBuffer.remaining()); if (outputSize <= socketBuffer.remaining()) { cipher.update(packetBuffer, socketBuffer); } else { int min = Math.min(packetBuffer.remaining(), socketBuffer.remaining()); int len = min / 2; if (len > 0) { int limitOld = packetBuffer.limit(); packetBuffer.limit(packetBuffer.position() + len); cipher.update(packetBuffer, socketBuffer); packetBuffer.limit(limitOld); } } if (!packetBuffer.hasRemaining()) { if (socketBuffer.remaining() >= cipher.getOutputSize(0)) { socketBuffer.put(cipher.doFinal()); packetWritten = false; packetBuffer.clear(); return true; } } } return false; } }
1no label
hazelcast_src_main_java_com_hazelcast_nio_SocketPacketWriter.java
420
trackedList.addChangeListener(new OMultiValueChangeListener<Integer, String>() { public void onAfterRecordChanged(final OMultiValueChangeEvent<Integer, String> event) { changed.value = true; } });
0true
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedListTest.java
283
public class ThymeleafMessageCreator extends MessageCreator { private TemplateEngine templateEngine; public ThymeleafMessageCreator(TemplateEngine templateEngine, JavaMailSender mailSender) { super(mailSender); this.templateEngine = templateEngine; } @Override public String buildMessageBody(EmailInfo info, HashMap<String,Object> props) { BroadleafRequestContext blcContext = BroadleafRequestContext.getBroadleafRequestContext(); final Context thymeleafContext = new Context(); if (blcContext != null && blcContext.getJavaLocale() != null) { thymeleafContext.setLocale(blcContext.getJavaLocale()); } if (props != null) { Iterator<String> propsIterator = props.keySet().iterator(); while(propsIterator.hasNext()) { String key = propsIterator.next(); thymeleafContext.setVariable(key, props.get(key)); } } return this.templateEngine.process( info.getEmailTemplate(), thymeleafContext); } }
0true
common_src_main_java_org_broadleafcommerce_common_email_service_message_ThymeleafMessageCreator.java
337
public class NodesRestartResponse extends NodesOperationResponse<NodesRestartResponse.NodeRestartResponse> { NodesRestartResponse() { } public NodesRestartResponse(ClusterName clusterName, NodeRestartResponse[] nodes) { super(clusterName, nodes); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); nodes = new NodeRestartResponse[in.readVInt()]; for (int i = 0; i < nodes.length; i++) { nodes[i] = NodeRestartResponse.readNodeRestartResponse(in); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(nodes.length); for (NodeRestartResponse node : nodes) { node.writeTo(out); } } public static class NodeRestartResponse extends NodeOperationResponse { NodeRestartResponse() { } public NodeRestartResponse(DiscoveryNode node) { super(node); } public static NodeRestartResponse readNodeRestartResponse(StreamInput in) throws IOException { NodeRestartResponse res = new NodeRestartResponse(); res.readFrom(in); return res; } } }
0true
src_main_java_org_elasticsearch_action_admin_cluster_node_restart_NodesRestartResponse.java
1,202
@SuppressWarnings("serial") public abstract class OSQLQuery<T> extends OQueryAbstract<T> implements OCommandRequestText { protected String text; public OSQLQuery() { } public OSQLQuery(final String iText) { text = iText.trim(); } /** * Delegates to the OQueryExecutor the query execution. */ @SuppressWarnings("unchecked") public List<T> run(final Object... iArgs) { final ODatabaseRecord database = ODatabaseRecordThreadLocal.INSTANCE.get(); if (database == null) throw new OQueryParsingException("No database configured"); setParameters(iArgs); return (List<T>) database.getStorage().command(this); } /** * Returns only the first record if any. */ public T runFirst(final Object... iArgs) { setLimit(1); final List<T> result = execute(iArgs); return result != null && !result.isEmpty() ? result.get(0) : null; } public String getText() { return text; } public OCommandRequestText setText(final String iText) { text = iText; return this; } @Override public String toString() { return "sql." + text; } public OSerializableStream fromStream(final byte[] iStream) throws OSerializationException { final OMemoryStream buffer = new OMemoryStream(iStream); queryFromStream(buffer); return this; } public byte[] toStream() throws OSerializationException { return queryToStream().toByteArray(); } protected OMemoryStream queryToStream() { final OMemoryStream buffer = new OMemoryStream(); buffer.set(text); // TEXT AS STRING buffer.set(limit); // LIMIT AS INTEGER buffer.set(fetchPlan != null ? fetchPlan : ""); // FETCH PLAN IN FORM OF STRING (to know more goto: // http://code.google.com/p/orient/wiki/FetchingStrategies) buffer.set(serializeQueryParameters(parameters)); return buffer; } @SuppressWarnings("unchecked") private Map<Object, Object> convertToRIDsIfPossible(final Map<Object, Object> params) { final Map<Object, Object> newParams = new HashMap<Object, Object>(params.size()); for (Entry<Object, Object> entry : params.entrySet()) { final Object value = entry.getValue(); if (value instanceof Set<?> && ((Set<?>) value).iterator().next() instanceof ORecord<?>) { // CONVERT RECORDS AS RIDS final Set<ORID> newSet = new HashSet<ORID>(); for (ORecord<?> rec : (Set<ORecord<?>>) value) { newSet.add(rec.getIdentity()); } newParams.put(entry.getKey(), newSet); } else if (value instanceof List<?> && ((List<?>) value).get(0) instanceof ORecord<?>) { // CONVERT RECORDS AS RIDS final List<ORID> newList = new ArrayList<ORID>(); for (ORecord<?> rec : (List<ORecord<?>>) value) { newList.add(rec.getIdentity()); } newParams.put(entry.getKey(), newList); } else if (value instanceof Map<?, ?> && ((Map<?, ?>) value).values().iterator().next() instanceof ORecord<?>) { // CONVERT RECORDS AS RIDS final Map<Object, ORID> newMap = new HashMap<Object, ORID>(); for (Entry<?, ORecord<?>> mapEntry : ((Map<?, ORecord<?>>) value).entrySet()) { newMap.put(mapEntry.getKey(), mapEntry.getValue().getIdentity()); } newParams.put(entry.getKey(), newMap); } else newParams.put(entry.getKey(), entry.getValue()); } return newParams; } protected void queryFromStream(final OMemoryStream buffer) { text = buffer.getAsString(); limit = buffer.getAsInteger(); setFetchPlan(buffer.getAsString()); final byte[] paramBuffer = buffer.getAsByteArray(); parameters = deserializeQueryParameters(paramBuffer); } protected Map<Object, Object> deserializeQueryParameters(final byte[] paramBuffer) { if (paramBuffer == null || paramBuffer.length == 0) return Collections.emptyMap(); final ODocument param = new ODocument(); param.fromStream(paramBuffer); param.setFieldType("params", OType.EMBEDDEDMAP); final Map<String, Object> params = param.rawField("params"); final Map<Object, Object> result = new HashMap<Object, Object>(); for (Entry<String, Object> p : params.entrySet()) { if (Character.isDigit(p.getKey().charAt(0))) result.put(Integer.parseInt(p.getKey()), p.getValue()); else result.put(p.getKey(), p.getValue()); } return result; } protected byte[] serializeQueryParameters(final Map<Object, Object> params) { if (parameters == null || parameters.size() == 0) // NO PARAMETER, JUST SEND 0 return new byte[0]; final ODocument param = new ODocument(); param.field("params", convertToRIDsIfPossible(params)); return param.toStream(); } }
0true
core_src_main_java_com_orientechnologies_orient_core_sql_query_OSQLQuery.java
55
public class OExclusiveLock extends OAbstractLock { private final ReadWriteLock lock; public OExclusiveLock(final ReadWriteLock iLock) { lock = iLock; } public void lock() { lock.writeLock().lock(); } public void unlock() { lock.writeLock().unlock(); } }
0true
commons_src_main_java_com_orientechnologies_common_concur_lock_OExclusiveLock.java
218
protected class SelectPreviousSubWordAction extends PreviousSubWordAction { /** * Creates a new select previous sub-word action. */ public SelectPreviousSubWordAction() { super(ST.SELECT_WORD_PREVIOUS); } @Override protected void setCaretPosition(final int position) { final ISourceViewer viewer= getSourceViewer(); final StyledText text= viewer.getTextWidget(); if (text != null && !text.isDisposed()) { final Point selection= text.getSelection(); final int caret= text.getCaretOffset(); final int offset= modelOffset2WidgetOffset(viewer, position); if (caret == selection.x) text.setSelectionRange(selection.y, offset - selection.y); else text.setSelectionRange(selection.x, offset - selection.x); } } }
0true
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_CeylonEditor.java
596
public class ContainerSizeType implements Serializable, BroadleafEnumerationType { private static final long serialVersionUID = 1L; private static final Map<String, ContainerSizeType> TYPES = new LinkedHashMap<String, ContainerSizeType>(); public static ContainerSizeType getInstance(final String type) { return TYPES.get(type); } private String type; private String friendlyType; public ContainerSizeType() { //do nothing } public ContainerSizeType(final String type, final String friendlyType) { this.friendlyType = friendlyType; setType(type); } public String getType() { return type; } public String getFriendlyType() { return friendlyType; } private void setType(final String type) { this.type = type; if (!TYPES.containsKey(type)) { TYPES.put(type, this); } else { throw new RuntimeException("Cannot add the type: (" + type + "). It already exists as a type via " + getInstance(type).getClass().getName()); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((type == null) ? 0 : type.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ContainerSizeType other = (ContainerSizeType) obj; if (type == null) { if (other.type != null) return false; } else if (!type.equals(other.type)) return false; return true; } }
1no label
common_src_main_java_org_broadleafcommerce_common_vendor_service_type_ContainerSizeType.java
1,012
private class TransportHandler extends BaseTransportRequestHandler<Request> { @Override public Request newInstance() { return newRequest(); } @Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { // no need to have a threaded listener since we just send back a response request.listenerThreaded(false); // if we have a local operation, execute it on a thread since we don't spawn request.operationThreaded(true); execute(request, new ActionListener<Response>() { @Override public void onResponse(Response result) { try { channel.sendResponse(result); } catch (Throwable e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(e); } catch (Exception e1) { logger.warn("Failed to send response for get", e1); } } }); } @Override public String executor() { return ThreadPool.Names.SAME; } }
0true
src_main_java_org_elasticsearch_action_support_single_custom_TransportSingleCustomOperationAction.java
3,739
public class SimpleObjectMappingTests extends ElasticsearchTestCase { @Test public void testDifferentInnerObjectTokenFailure() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject().string(); DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping); try { defaultMapper.parse("type", "1", new BytesArray(" {\n" + " \"object\": {\n" + " \"array\":[\n" + " {\n" + " \"object\": { \"value\": \"value\" }\n" + " },\n" + " {\n" + " \"object\":\"value\"\n" + " }\n" + " ]\n" + " },\n" + " \"value\":\"value\"\n" + " }")); fail(); } catch (MapperParsingException e) { // all is well } } }
0true
src_test_java_org_elasticsearch_index_mapper_object_SimpleObjectMappingTests.java
1,475
public interface HazelcastRegion<Cache extends RegionCache> extends Region { HazelcastInstance getInstance(); Cache getCache(); ILogger getLogger(); }
0true
hazelcast-hibernate_hazelcast-hibernate4_src_main_java_com_hazelcast_hibernate_region_HazelcastRegion.java
1,330
public abstract class AbstractSolrSearchServiceExtensionHandler extends AbstractExtensionHandler implements SolrSearchServiceExtensionHandler { @Override public ExtensionResultStatusType buildPrefixListForSearchableFacet(Field field, List<String> prefixList) { return ExtensionResultStatusType.NOT_HANDLED; } @Override public ExtensionResultStatusType buildPrefixListForSearchableField(Field field, FieldType searchableFieldType, List<String> prefixList) { return ExtensionResultStatusType.NOT_HANDLED; } @Override public ExtensionResultStatusType filterSearchFacetRanges(SearchFacetDTO dto, List<SearchFacetRange> ranges) { return ExtensionResultStatusType.NOT_HANDLED; } @Override public ExtensionResultStatusType addPropertyValues(Product product, Field field, FieldType fieldType, Map<String, Object> values, String propertyName, List<Locale> locales) throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { return ExtensionResultStatusType.NOT_HANDLED; } @Override public ExtensionResultStatusType modifySolrQuery(SolrQuery query, String qualifiedSolrQuery, List<SearchFacetDTO> facets, ProductSearchCriteria searchCriteria, String defaultSort) { return ExtensionResultStatusType.NOT_HANDLED; } @Override public ExtensionResultStatusType attachAdditionalBasicFields(Product product, SolrInputDocument document, SolrHelperService shs) { return ExtensionResultStatusType.NOT_HANDLED; } }
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_service_solr_AbstractSolrSearchServiceExtensionHandler.java
616
public class IndexStats implements Iterable<IndexShardStats> { private final String index; private final ShardStats shards[]; public IndexStats(String index, ShardStats[] shards) { this.index = index; this.shards = shards; } public String getIndex() { return this.index; } public ShardStats[] getShards() { return this.shards; } private Map<Integer, IndexShardStats> indexShards; public Map<Integer, IndexShardStats> getIndexShards() { if (indexShards != null) { return indexShards; } Map<Integer, List<ShardStats>> tmpIndexShards = Maps.newHashMap(); for (ShardStats shard : shards) { List<ShardStats> lst = tmpIndexShards.get(shard.getShardRouting().id()); if (lst == null) { lst = Lists.newArrayList(); tmpIndexShards.put(shard.getShardRouting().id(), lst); } lst.add(shard); } indexShards = Maps.newHashMap(); for (Map.Entry<Integer, List<ShardStats>> entry : tmpIndexShards.entrySet()) { indexShards.put(entry.getKey(), new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardStats[entry.getValue().size()]))); } return indexShards; } @Override public Iterator<IndexShardStats> iterator() { return getIndexShards().values().iterator(); } private CommonStats total = null; public CommonStats getTotal() { if (total != null) { return total; } CommonStats stats = new CommonStats(); for (ShardStats shard : shards) { stats.add(shard.getStats()); } total = stats; return stats; } private CommonStats primary = null; public CommonStats getPrimaries() { if (primary != null) { return primary; } CommonStats stats = new CommonStats(); for (ShardStats shard : shards) { if (shard.getShardRouting().primary()) { stats.add(shard.getStats()); } } primary = stats; return stats; } }
0true
src_main_java_org_elasticsearch_action_admin_indices_stats_IndexStats.java
2,483
public interface ToXContent { public static interface Params { String param(String key); String param(String key, String defaultValue); boolean paramAsBoolean(String key, boolean defaultValue); Boolean paramAsBoolean(String key, Boolean defaultValue); /** * @deprecated since 1.0.0 * use {@link ToXContent.Params#paramAsBoolean(String, Boolean)} instead */ @Deprecated Boolean paramAsBooleanOptional(String key, Boolean defaultValue); } public static final Params EMPTY_PARAMS = new Params() { @Override public String param(String key) { return null; } @Override public String param(String key, String defaultValue) { return defaultValue; } @Override public boolean paramAsBoolean(String key, boolean defaultValue) { return defaultValue; } @Override public Boolean paramAsBoolean(String key, Boolean defaultValue) { return defaultValue; } @Override @Deprecated public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) { return paramAsBoolean(key, defaultValue); } }; public static class MapParams implements Params { private final Map<String, String> params; public MapParams(Map<String, String> params) { this.params = params; } @Override public String param(String key) { return params.get(key); } @Override public String param(String key, String defaultValue) { String value = params.get(key); if (value == null) { return defaultValue; } return value; } @Override public boolean paramAsBoolean(String key, boolean defaultValue) { return Booleans.parseBoolean(param(key), defaultValue); } @Override public Boolean paramAsBoolean(String key, Boolean defaultValue) { return Booleans.parseBoolean(param(key), defaultValue); } @Override @Deprecated public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) { return paramAsBoolean(key, defaultValue); } } public static class DelegatingMapParams extends MapParams { private final Params delegate; public DelegatingMapParams(Map<String, String> params, Params delegate) { super(params); this.delegate = delegate; } @Override public String param(String key) { return super.param(key, delegate.param(key)); } @Override public String param(String key, String defaultValue) { return super.param(key, delegate.param(key, defaultValue)); } @Override public boolean paramAsBoolean(String key, boolean defaultValue) { return super.paramAsBoolean(key, delegate.paramAsBoolean(key, defaultValue)); } @Override public Boolean paramAsBoolean(String key, Boolean defaultValue) { return super.paramAsBoolean(key, delegate.paramAsBoolean(key, defaultValue)); } @Override @Deprecated public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) { return super.paramAsBooleanOptional(key, delegate.paramAsBooleanOptional(key, defaultValue)); } } XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException; }
0true
src_main_java_org_elasticsearch_common_xcontent_ToXContent.java
76
public class OSharedResourceExternal extends OSharedResourceAbstract implements OSharedResource { @Override public void acquireExclusiveLock() { super.acquireExclusiveLock(); } @Override public void acquireSharedLock() { super.acquireSharedLock(); } @Override public void releaseExclusiveLock() { super.releaseExclusiveLock(); } @Override public void releaseSharedLock() { super.releaseSharedLock(); } }
0true
commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedResourceExternal.java
1,981
assertTrueEventually(new AssertTask() { @Override public void run() throws Exception { assertEquals(TestEventBasedMapStore.STORE_EVENTS.DELETE, testMapStore.getEvents().poll()); } });
0true
hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java
2,698
final class PortableSerializer implements StreamSerializer<Portable> { private final SerializationContext context; private final Map<Integer, PortableFactory> factories = new HashMap<Integer, PortableFactory>(); PortableSerializer(SerializationContext context, Map<Integer, ? extends PortableFactory> portableFactories) { this.context = context; factories.putAll(portableFactories); } public int getTypeId() { return SerializationConstants.CONSTANT_TYPE_PORTABLE; } public void write(ObjectDataOutput out, Portable p) throws IOException { if (p.getClassId() == 0) { throw new IllegalArgumentException("Portable class id cannot be zero!"); } if (!(out instanceof BufferObjectDataOutput)) { throw new IllegalArgumentException("ObjectDataOutput must be instance of BufferObjectDataOutput!"); } if (p.getClassId() == 0) { throw new IllegalArgumentException("Portable class id cannot be zero!"); } ClassDefinition cd = context.lookupOrRegisterClassDefinition(p); BufferObjectDataOutput bufferedOut = (BufferObjectDataOutput) out; DefaultPortableWriter writer = new DefaultPortableWriter(this, bufferedOut, cd); p.writePortable(writer); writer.end(); } public Portable read(ObjectDataInput in) throws IOException { if (!(in instanceof BufferObjectDataInput)) { throw new IllegalArgumentException("ObjectDataInput must be instance of BufferObjectDataInput!"); } if (!(in instanceof PortableContextAwareInputStream)) { throw new IllegalArgumentException("ObjectDataInput must be instance of PortableContextAwareInputStream!"); } final PortableContextAwareInputStream ctxIn = (PortableContextAwareInputStream) in; final int factoryId = ctxIn.getFactoryId(); final int dataClassId = ctxIn.getClassId(); final int dataVersion = ctxIn.getVersion(); final PortableFactory portableFactory = factories.get(factoryId); if (portableFactory == null) { throw new HazelcastSerializationException("Could not find PortableFactory for factory-id: " + factoryId); } final Portable portable = portableFactory.create(dataClassId); if (portable == null) { throw new HazelcastSerializationException("Could not create Portable for class-id: " + dataClassId); } final DefaultPortableReader reader; final ClassDefinition cd; final BufferObjectDataInput bufferedIn = (BufferObjectDataInput) in; if (context.getVersion() == dataVersion) { cd = context.lookup(factoryId, dataClassId); // using context.version if (cd == null) { throw new HazelcastSerializationException("Could not find class-definition for " + "factory-id: " + factoryId + ", class-id: " + dataClassId + ", version: " + dataVersion); } reader = new DefaultPortableReader(this, bufferedIn, cd); } else { cd = context.lookup(factoryId, dataClassId, dataVersion); // registered during read if (cd == null) { throw new HazelcastSerializationException("Could not find class-definition for " + "factory-id: " + factoryId + ", class-id: " + dataClassId + ", version: " + dataVersion); } reader = new MorphingPortableReader(this, bufferedIn, cd); } portable.readPortable(reader); reader.end(); return portable; } Portable readAndInitialize(BufferObjectDataInput in) throws IOException { Portable p = read(in); final ManagedContext managedContext = context.getManagedContext(); return managedContext != null ? (Portable) managedContext.initialize(p) : p; } public void destroy() { factories.clear(); } }
1no label
hazelcast_src_main_java_com_hazelcast_nio_serialization_PortableSerializer.java
2,560
public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery { private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0]; private final TransportService transportService; private final ClusterService clusterService; private final DiscoveryNodeService discoveryNodeService; private AllocationService allocationService; private final ClusterName clusterName; private final Version version; private final TimeValue publishTimeout; private DiscoveryNode localNode; private volatile boolean master = false; private final AtomicBoolean initialStateSent = new AtomicBoolean(); private final CopyOnWriteArrayList<InitialStateDiscoveryListener> initialStateListeners = new CopyOnWriteArrayList<InitialStateDiscoveryListener>(); private static final ConcurrentMap<ClusterName, ClusterGroup> clusterGroups = ConcurrentCollections.newConcurrentMap(); @Inject public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService, DiscoveryNodeService discoveryNodeService, Version version) { super(settings); this.clusterName = clusterName; this.clusterService = clusterService; this.transportService = transportService; this.discoveryNodeService = discoveryNodeService; this.version = version; this.publishTimeout = settings.getAsTime("discovery.zen.publish_timeout", DEFAULT_PUBLISH_TIMEOUT); } @Override public void setNodeService(@Nullable NodeService nodeService) { // nothing to do here } @Override public void setAllocationService(AllocationService allocationService) { this.allocationService = allocationService; } @Override protected void doStart() throws ElasticsearchException { synchronized (clusterGroups) { ClusterGroup clusterGroup = clusterGroups.get(clusterName); if (clusterGroup == null) { clusterGroup = new ClusterGroup(); clusterGroups.put(clusterName, clusterGroup); } logger.debug("Connected to cluster [{}]", clusterName); this.localNode = new DiscoveryNode(settings.get("name"), DiscoveryService.generateNodeId(settings), transportService.boundAddress().publishAddress(), discoveryNodeService.buildAttributes(), version); clusterGroup.members().add(this); LocalDiscovery firstMaster = null; for (LocalDiscovery localDiscovery : clusterGroup.members()) { if (localDiscovery.localNode().masterNode()) { firstMaster = localDiscovery; break; } } if (firstMaster != null && firstMaster.equals(this)) { // we are the first master (and the master) master = true; final LocalDiscovery master = firstMaster; clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { nodesBuilder.put(discovery.localNode); } nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id()); // remove the NO_MASTER block in this case ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(Discovery.NO_MASTER_BLOCK); return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { sendInitialStateEventIfNeeded(); } }); } else if (firstMaster != null) { // update as fast as we can the local node state with the new metadata (so we create indices for example) final ClusterState masterState = firstMaster.clusterService.state(); clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { // make sure we have the local node id set, we might need it as a result of the new metadata DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()).put(localNode).localNodeId(localNode.id()); return ClusterState.builder(currentState).metaData(masterState.metaData()).nodes(nodesBuilder).build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } }); // tell the master to send the fact that we are here final LocalDiscovery master = firstMaster; firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode + "])", new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { nodesBuilder.put(discovery.localNode); } nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id()); return ClusterState.builder(currentState).nodes(nodesBuilder).build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { sendInitialStateEventIfNeeded(); } }); } } // else, no master node, the next node that will start will fill things in... } @Override protected void doStop() throws ElasticsearchException { synchronized (clusterGroups) { ClusterGroup clusterGroup = clusterGroups.get(clusterName); if (clusterGroup == null) { logger.warn("Illegal state, should not have an empty cluster group when stopping, I should be there at teh very least..."); return; } clusterGroup.members().remove(this); if (clusterGroup.members().isEmpty()) { // no more members, remove and return clusterGroups.remove(clusterName); return; } LocalDiscovery firstMaster = null; for (LocalDiscovery localDiscovery : clusterGroup.members()) { if (localDiscovery.localNode().masterNode()) { firstMaster = localDiscovery; break; } } if (firstMaster != null) { // if the removed node is the master, make the next one as the master if (master) { firstMaster.master = true; } final Set<String> newMembers = newHashSet(); for (LocalDiscovery discovery : clusterGroup.members()) { newMembers.add(discovery.localNode.id()); } final LocalDiscovery master = firstMaster; master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode.id()); DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes()); if (delta.added()) { logger.warn("No new nodes should be created when a new discovery view is accepted"); } // reroute here, so we eagerly remove dead nodes from the routing ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build(); RoutingAllocation.Result routingResult = master.allocationService.reroute(ClusterState.builder(updatedState).build()); return ClusterState.builder(updatedState).routingResult(routingResult).build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } }); } } } @Override protected void doClose() throws ElasticsearchException { } @Override public DiscoveryNode localNode() { return localNode; } @Override public void addListener(InitialStateDiscoveryListener listener) { this.initialStateListeners.add(listener); } @Override public void removeListener(InitialStateDiscoveryListener listener) { this.initialStateListeners.remove(listener); } @Override public String nodeDescription() { return clusterName.value() + "/" + localNode.id(); } public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { if (!master) { throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); } LocalDiscovery[] members = members(); if (members.length > 0) { publish(members, clusterState, new AckClusterStatePublishResponseHandler(members.length - 1, ackListener)); } } private LocalDiscovery[] members() { ClusterGroup clusterGroup = clusterGroups.get(clusterName); if (clusterGroup == null) { return NO_MEMBERS; } Queue<LocalDiscovery> members = clusterGroup.members(); return members.toArray(new LocalDiscovery[members.size()]); } private void publish(LocalDiscovery[] members, ClusterState clusterState, final ClusterStatePublishResponseHandler publishResponseHandler) { try { // we do the marshaling intentionally, to check it works well... final byte[] clusterStateBytes = Builder.toBytes(clusterState); for (final LocalDiscovery discovery : members) { if (discovery.master) { continue; } final ClusterState nodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); // ignore cluster state messages that do not include "me", not in the game yet... if (nodeSpecificClusterState.nodes().localNode() != null) { discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { ClusterState.Builder builder = ClusterState.builder(nodeSpecificClusterState); // if the routing table did not change, use the original one if (nodeSpecificClusterState.routingTable().version() == currentState.routingTable().version()) { builder.routingTable(currentState.routingTable()); } if (nodeSpecificClusterState.metaData().version() == currentState.metaData().version()) { builder.metaData(currentState.metaData()); } return builder.build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); publishResponseHandler.onFailure(discovery.localNode, t); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { sendInitialStateEventIfNeeded(); publishResponseHandler.onResponse(discovery.localNode); } }); } else { publishResponseHandler.onResponse(discovery.localNode); } } if (publishTimeout.millis() > 0) { try { boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout); if (!awaited) { logger.debug("awaiting all nodes to process published state {} timed out, timeout {}", clusterState.version(), publishTimeout); } } catch (InterruptedException e) { // ignore & restore interrupt Thread.currentThread().interrupt(); } } } catch (Exception e) { // failure to marshal or un-marshal throw new ElasticsearchIllegalStateException("Cluster state failed to serialize", e); } } private void sendInitialStateEventIfNeeded() { if (initialStateSent.compareAndSet(false, true)) { for (InitialStateDiscoveryListener listener : initialStateListeners) { listener.initialStateProcessed(); } } } private class ClusterGroup { private Queue<LocalDiscovery> members = ConcurrentCollections.newQueue(); Queue<LocalDiscovery> members() { return members; } } }
1no label
src_main_java_org_elasticsearch_discovery_local_LocalDiscovery.java
1,102
public class LongFieldDataBenchmark { private static final Random RANDOM = new Random(); private static final int SECONDS_PER_YEAR = 60 * 60 * 24 * 365; public static enum Data { SINGLE_VALUES_DENSE_ENUM { public int numValues() { return 1; } @Override public long nextValue() { return RANDOM.nextInt(16); } }, SINGLE_VALUED_DENSE_DATE { public int numValues() { return 1; } @Override public long nextValue() { // somewhere in-between 2010 and 2012 return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR)); } }, MULTI_VALUED_DATE { public int numValues() { return RANDOM.nextInt(3); } @Override public long nextValue() { // somewhere in-between 2010 and 2012 return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR)); } }, MULTI_VALUED_ENUM { public int numValues() { return RANDOM.nextInt(3); } @Override public long nextValue() { return 3 + RANDOM.nextInt(8); } }, SINGLE_VALUED_SPARSE_RANDOM { public int numValues() { return RANDOM.nextFloat() < 0.1f ? 1 : 0; } @Override public long nextValue() { return RANDOM.nextLong(); } }, MULTI_VALUED_SPARSE_RANDOM { public int numValues() { return RANDOM.nextFloat() < 0.1f ? 1 + RANDOM.nextInt(5) : 0; } @Override public long nextValue() { return RANDOM.nextLong(); } }, MULTI_VALUED_DENSE_RANDOM { public int numValues() { return 1 + RANDOM.nextInt(3); } @Override public long nextValue() { return RANDOM.nextLong(); } }; public abstract int numValues(); public abstract long nextValue(); } public static void main(String[] args) throws Exception { final IndexWriterConfig iwc = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()); final String fieldName = "f"; final int numDocs = 1000000; System.out.println("Data\tLoading time\tImplementation\tActual size\tExpected size"); for (Data data : Data.values()) { final RAMDirectory dir = new RAMDirectory(); final IndexWriter indexWriter = new IndexWriter(dir, iwc); for (int i = 0; i < numDocs; ++i) { final Document doc = new Document(); final int numFields = data.numValues(); for (int j = 0; j < numFields; ++j) { doc.add(new LongField(fieldName, data.nextValue(), Store.NO)); } indexWriter.addDocument(doc); } indexWriter.forceMerge(1); indexWriter.close(); final DirectoryReader dr = DirectoryReader.open(dir); final IndexFieldDataService fds = new IndexFieldDataService(new Index("dummy"), new DummyCircuitBreakerService()); final LongFieldMapper mapper = new LongFieldMapper.Builder(fieldName).build(new BuilderContext(null, new ContentPath(1))); final IndexNumericFieldData<AtomicNumericFieldData> fd = fds.getForField(mapper); final long start = System.nanoTime(); final AtomicNumericFieldData afd = fd.loadDirect(SlowCompositeReaderWrapper.wrap(dr).getContext()); final long loadingTimeMs = (System.nanoTime() - start) / 1000 / 1000; System.out.println(data + "\t" + loadingTimeMs + "\t" + afd.getClass().getSimpleName() + "\t" + RamUsageEstimator.humanSizeOf(afd.getLongValues()) + "\t" + RamUsageEstimator.humanReadableUnits(afd.getMemorySizeInBytes())); dr.close(); } } }
0true
src_test_java_org_elasticsearch_benchmark_fielddata_LongFieldDataBenchmark.java
1,325
assertThat(awaitBusy(new Predicate<Object>() { public boolean apply(Object o) { ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); return state.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK); } }), equalTo(true));
0true
src_test_java_org_elasticsearch_cluster_NoMasterNodeTests.java
1
public final class OAlwaysGreaterKey implements Comparable<Comparable<?>>{ public int compareTo(Comparable<?> o) { return 1; } }
0true
commons_src_main_java_com_orientechnologies_common_collection_OAlwaysGreaterKey.java
1,317
private final class InnerExecutionCallback<V> implements ExecutionCallback<V> { private final Member member; private InnerExecutionCallback(Member member) { this.member = member; } @Override public void onResponse(V response) { ExecutionCallbackAdapterFactory.this.onResponse(member, response); } @Override public void onFailure(Throwable t) { ExecutionCallbackAdapterFactory.this.onResponse(member, t); } }
0true
hazelcast_src_main_java_com_hazelcast_executor_ExecutionCallbackAdapterFactory.java
393
public interface ORecordLazyListener { public void onLazyLoad(); }
0true
core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordLazyListener.java
2,231
public static enum ScoreMode { First, Avg, Max, Sum, Min, Multiply }
0true
src_main_java_org_elasticsearch_common_lucene_search_function_FiltersFunctionScoreQuery.java
2,692
public class FsGateway extends BlobStoreGateway { private final ExecutorService concurrentStreamPool; @Inject public FsGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService, Environment environment, ClusterName clusterName) throws IOException { super(settings, threadPool, clusterService); File gatewayFile; String location = componentSettings.get("location"); if (location == null) { logger.warn("using local fs location for gateway, should be changed to be a shared location across nodes"); gatewayFile = new File(environment.dataFiles()[0], "gateway"); } else { gatewayFile = new File(location); } int concurrentStreams = componentSettings.getAsInt("concurrent_streams", 5); this.concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[fs_stream]")); initialize(new FsBlobStore(componentSettings, concurrentStreamPool, gatewayFile), clusterName, null); } @Override public String type() { return "fs"; } @Override public Class<? extends Module> suggestIndexGateway() { return FsIndexGatewayModule.class; } @Override protected void doClose() throws ElasticsearchException { super.doClose(); concurrentStreamPool.shutdown(); } }
0true
src_main_java_org_elasticsearch_gateway_fs_FsGateway.java
415
trackedList.addChangeListener(new OMultiValueChangeListener<Integer, String>() { public void onAfterRecordChanged(final OMultiValueChangeEvent<Integer, String> event) { changed.value = true; } });
0true
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedListTest.java
129
public interface OBinaryConverter { void putInt(byte[] buffer, int index, int value, ByteOrder byteOrder); int getInt(byte[] buffer, int index, ByteOrder byteOrder); void putShort(byte[] buffer, int index, short value, ByteOrder byteOrder); short getShort(byte[] buffer, int index, ByteOrder byteOrder); void putLong(byte[] buffer, int index, long value, ByteOrder byteOrder); long getLong(byte[] buffer, int index, ByteOrder byteOrder); void putChar(byte[] buffer, int index, char character, ByteOrder byteOrder); char getChar(byte[] buffer, int index, ByteOrder byteOrder); boolean nativeAccelerationUsed(); }
0true
commons_src_main_java_com_orientechnologies_common_serialization_OBinaryConverter.java
410
public interface Status { public void setArchived(Character archived); public Character getArchived(); public boolean isActive(); }
0true
common_src_main_java_org_broadleafcommerce_common_persistence_Status.java
1,151
public final class Hazelcast { private Hazelcast() { } /** * Shuts down all running Hazelcast Instances on this JVM. * It doesn't shutdown all members of the * cluster but just the ones running on this JVM. * * @see #newHazelcastInstance(Config) */ public static void shutdownAll() { HazelcastInstanceFactory.shutdownAll(); } /** * Creates a new HazelcastInstance (a new node in a cluster). * This method allows you to create and run multiple instances * of Hazelcast cluster members on the same JVM. * <p/> * To shutdown all running HazelcastInstances (all members on this JVM) * call {@link #shutdownAll()}. * * @param config Configuration for the new HazelcastInstance (member) * @return new HazelcastInstance * @see #shutdownAll() * @see #getHazelcastInstanceByName(String) */ public static HazelcastInstance newHazelcastInstance(Config config) { return HazelcastInstanceFactory.newHazelcastInstance(config); } /** * Creates a new HazelcastInstance (a new node in a cluster). * This method allows you to create and run multiple instances * of Hazelcast cluster members on the same JVM. * <p/> * To shutdown all running HazelcastInstances (all members on this JVM) * call {@link #shutdownAll()}. * * Hazelcast will look into two places for the configuration file: * <ol> * <li> * System property: Hazelcast will first check if "hazelcast.config" system property is set to a file path. * Example: -Dhazelcast.config=C:/myhazelcast.xml. * </li> * <li> * Classpath: If config file is not set as a system property, Hazelcast will check classpath for hazelcast.xml file. * </li> * </ol> * If Hazelcast doesn't find any config file, it will happily start with default configuration (hazelcast-default.xml) * located in hazelcast.jar. * * @return new HazelcastInstance * @see #shutdownAll() * @see #getHazelcastInstanceByName(String) */ public static HazelcastInstance newHazelcastInstance() { return HazelcastInstanceFactory.newHazelcastInstance(null); } /** * Returns an existing HazelcastInstance with instanceName. * <p/> * To shutdown all running HazelcastInstances (all members on this JVM) * call {@link #shutdownAll()}. * * @param instanceName Name of the HazelcastInstance (member) * @return HazelcastInstance * @see #newHazelcastInstance(Config) * @see #shutdownAll() */ public static HazelcastInstance getHazelcastInstanceByName(String instanceName) { return HazelcastInstanceFactory.getHazelcastInstance(instanceName); } /** * Gets or creates the HazelcastInstance with a certain name. * * If a Hazelcast with the same name as the configuration exists, then it is returned, otherwise it is created. * * @param config the Config. * @return the HazelcastInstance * @throws NullPointerException if config is null. * @throws IllegalArgumentException if the instancename of the config is null or empty. */ public static HazelcastInstance getOrCreateHazelcastInstance(Config config) { return HazelcastInstanceFactory.getOrCreateHazelcastInstance(config); } /** * Returns all active/running HazelcastInstances on this JVM. * <p/> * To shutdown all running HazelcastInstances (all members on this JVM) * call {@link #shutdownAll()}. * * @return all HazelcastInstances * @see #newHazelcastInstance(Config) * @see #getHazelcastInstanceByName(String) * @see #shutdownAll() */ public static Set<HazelcastInstance> getAllHazelcastInstances() { return HazelcastInstanceFactory.getAllHazelcastInstances(); } /** * Sets <tt>OutOfMemoryHandler</tt> to be used when an <tt>OutOfMemoryError</tt> * is caught by Hazelcast threads. * * <p> * <b>Warning: </b> <tt>OutOfMemoryHandler</tt> may not be called although JVM throws * <tt>OutOfMemoryError</tt>. * Because error may be thrown from an external (user thread) thread * and Hazelcast may not be informed about <tt>OutOfMemoryError</tt>. * </p> * * @param outOfMemoryHandler * * @see OutOfMemoryError * @see OutOfMemoryHandler */ public static void setOutOfMemoryHandler(OutOfMemoryHandler outOfMemoryHandler) { OutOfMemoryErrorDispatcher.setHandler(outOfMemoryHandler); } }
0true
hazelcast_src_main_java_com_hazelcast_core_Hazelcast.java
947
Thread t = new Thread(new Runnable() { public void run() { final ILock lock = instance2.getLock(key); lock.lock(); latch.countDown(); } });
0true
hazelcast_src_test_java_com_hazelcast_concurrent_lock_LockTest.java
121
class FillInArgumentNameProposal extends CorrectionProposal { public FillInArgumentNameProposal(String name, Change change) { super("Fill in argument name '" + name + "'", change, null); } static void addFillInArgumentNameProposal(Collection<ICompletionProposal> proposals, IDocument doc, IFile file, Tree.SpecifiedArgument sa) { Tree.Identifier id = sa.getIdentifier(); if (id.getToken()==null) { TextChange change = new TextFileChange("Convert to Block", file); change.setEdit(new MultiTextEdit()); Tree.Expression e = sa.getSpecifierExpression().getExpression(); if (e!=null) { final String name = id.getText(); if (e.getTerm() instanceof Tree.FunctionArgument) { //convert anon functions to typed named argument //i.e. (Param param) => result; //becomes function fun(Param param) => result; //and (Param param) { return result; }; //becomes function fun(Param param) { return result; } //and void (Param param) {}; //becomes void fun(Param param) {} Tree.FunctionArgument fa = (Tree.FunctionArgument) e.getTerm(); if (!fa.getParameterLists().isEmpty()) { int startIndex = fa.getParameterLists().get(0).getStartIndex(); if (fa.getType().getToken()==null) { //only really necessary if the anon //function has a block instead of => change.addEdit(new InsertEdit(startIndex, "function ")); } change.addEdit(new InsertEdit(startIndex, name)); try { //if it is an anon function with a body, //we must remove the trailing ; which is //required by the named arg list syntax if (fa.getBlock()!=null && doc.getChar(sa.getStopIndex())==';') { change.addEdit(new DeleteEdit(sa.getStopIndex(), 1)); } } catch (Exception ex) {} } } else { //convert other args to specified named args //i.e. arg; //becomes name = arg; change.addEdit(new InsertEdit(sa.getStartIndex(), name + " = ")); } if (change.getEdit().hasChildren()) { proposals.add(new FillInArgumentNameProposal(name, change)); } } } } }
0true
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_FillInArgumentNameProposal.java
3,597
private static ThreadLocal<NumericTokenStream> tokenStream4 = new ThreadLocal<NumericTokenStream>() { @Override protected NumericTokenStream initialValue() { return new NumericTokenStream(4); } };
0true
src_main_java_org_elasticsearch_index_mapper_core_NumberFieldMapper.java
800
public class PercolateSourceBuilder implements ToXContent { private DocBuilder docBuilder; private QueryBuilder queryBuilder; private FilterBuilder filterBuilder; private Integer size; private Boolean sort; private List<SortBuilder> sorts; private Boolean trackScores; private HighlightBuilder highlightBuilder; private List<FacetBuilder> facets; private List<AggregationBuilder> aggregations; public DocBuilder percolateDocument() { if (docBuilder == null) { docBuilder = new DocBuilder(); } return docBuilder; } public DocBuilder getDoc() { return docBuilder; } /** * Sets the document to run the percolate queries against. */ public PercolateSourceBuilder setDoc(DocBuilder docBuilder) { this.docBuilder = docBuilder; return this; } public QueryBuilder getQueryBuilder() { return queryBuilder; } /** * Sets a query to reduce the number of percolate queries to be evaluated and score the queries that match based * on this query. */ public PercolateSourceBuilder setQueryBuilder(QueryBuilder queryBuilder) { this.queryBuilder = queryBuilder; return this; } public FilterBuilder getFilterBuilder() { return filterBuilder; } /** * Sets a filter to reduce the number of percolate queries to be evaluated. */ public PercolateSourceBuilder setFilterBuilder(FilterBuilder filterBuilder) { this.filterBuilder = filterBuilder; return this; } /** * Limits the maximum number of percolate query matches to be returned. */ public PercolateSourceBuilder setSize(int size) { this.size = size; return this; } /** * Similar as {@link #setTrackScores(boolean)}, but whether to sort by the score descending. */ public PercolateSourceBuilder setSort(boolean sort) { if (sort) { addSort(new ScoreSortBuilder()); } else { this.sorts = null; } return this; } /** * Adds a sort builder. Only sorting by score desc is supported. */ public PercolateSourceBuilder addSort(SortBuilder sort) { if (sorts == null) { sorts = Lists.newArrayList(); } sorts.add(sort); return this; } /** * Whether to compute a score for each match and include it in the response. The score is based on * {@link #setQueryBuilder(QueryBuilder)}. */ public PercolateSourceBuilder setTrackScores(boolean trackScores) { this.trackScores = trackScores; return this; } /** * Enables highlighting for the percolate document. Per matched percolate query highlight the percolate document. */ public PercolateSourceBuilder setHighlightBuilder(HighlightBuilder highlightBuilder) { this.highlightBuilder = highlightBuilder; return this; } /** * Add a facet definition. */ public PercolateSourceBuilder addFacet(FacetBuilder facetBuilder) { if (facets == null) { facets = Lists.newArrayList(); } facets.add(facetBuilder); return this; } /** * Add an aggregationB definition. */ public PercolateSourceBuilder addAggregation(AggregationBuilder aggregationBuilder) { if (aggregations == null) { aggregations = Lists.newArrayList(); } aggregations.add(aggregationBuilder); return this; } public BytesReference buildAsBytes(XContentType contentType) throws SearchSourceBuilderException { try { XContentBuilder builder = XContentFactory.contentBuilder(contentType); toXContent(builder, ToXContent.EMPTY_PARAMS); return builder.bytes(); } catch (Exception e) { throw new SearchSourceBuilderException("Failed to build search source", e); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); if (docBuilder != null) { docBuilder.toXContent(builder, params); } if (queryBuilder != null) { builder.field("query"); queryBuilder.toXContent(builder, params); } if (filterBuilder != null) { builder.field("filter"); filterBuilder.toXContent(builder, params); } if (size != null) { builder.field("size", size); } if (sorts != null) { builder.startArray("sort"); for (SortBuilder sort : sorts) { builder.startObject(); sort.toXContent(builder, params); builder.endObject(); } builder.endArray(); } if (trackScores != null) { builder.field("track_scores", trackScores); } if (highlightBuilder != null) { highlightBuilder.toXContent(builder, params); } if (facets != null) { builder.field("facets"); builder.startObject(); for (FacetBuilder facet : facets) { facet.toXContent(builder, params); } builder.endObject(); } if (aggregations != null) { builder.field("aggregations"); builder.startObject(); for (AbstractAggregationBuilder aggregation : aggregations) { aggregation.toXContent(builder, params); } builder.endObject(); } builder.endObject(); return builder; } public static DocBuilder docBuilder() { return new DocBuilder(); } public static class DocBuilder implements ToXContent { private BytesReference doc; public DocBuilder setDoc(BytesReference doc) { this.doc = doc; return this; } public DocBuilder setDoc(String field, Object value) { Map<String, Object> values = new HashMap<String, Object>(2); values.put(field, value); setDoc(values); return this; } public DocBuilder setDoc(String doc) { this.doc = new BytesArray(doc); return this; } public DocBuilder setDoc(XContentBuilder doc) { this.doc = doc.bytes(); return this; } public DocBuilder setDoc(Map doc) { return setDoc(doc, PercolateRequest.contentType); } public DocBuilder setDoc(Map doc, XContentType contentType) { try { return setDoc(XContentFactory.contentBuilder(contentType).map(doc)); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + doc + "]", e); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { XContentType contentType = XContentFactory.xContentType(doc); if (contentType == builder.contentType()) { builder.rawField("doc", doc); } else { XContentParser parser = XContentFactory.xContent(contentType).createParser(doc); try { parser.nextToken(); builder.field("doc"); builder.copyCurrentStructure(parser); } finally { parser.close(); } } return builder; } } }
0true
src_main_java_org_elasticsearch_action_percolate_PercolateSourceBuilder.java
3,505
public class MapperException extends ElasticsearchException { public MapperException(String message) { super(message); } public MapperException(String message, Throwable cause) { super(message, cause); } }
0true
src_main_java_org_elasticsearch_index_mapper_MapperException.java
814
return getDatabase().getStorage().callInLock(new Callable<Integer>() { @Override public Integer call() throws Exception { return classes.size(); } }, false);
0true
core_src_main_java_com_orientechnologies_orient_core_metadata_schema_OSchemaShared.java
1,171
BaseTransportResponseHandler<BenchmarkMessageResponse> handler = new BaseTransportResponseHandler<BenchmarkMessageResponse>() { @Override public BenchmarkMessageResponse newInstance() { return new BenchmarkMessageResponse(); } @Override public String executor() { return executor; } @Override public void handleResponse(BenchmarkMessageResponse response) { if (response.id() != id) { System.out.println("NO ID MATCH [" + response.id() + "] and [" + id + "]"); } latch.countDown(); } @Override public void handleException(TransportException exp) { exp.printStackTrace(); latch.countDown(); } };
0true
src_test_java_org_elasticsearch_benchmark_transport_TransportBenchmark.java
3,478
private CloseableThreadLocal<ParseContext> cache = new CloseableThreadLocal<ParseContext>() { @Override protected ParseContext initialValue() { return new ParseContext(index, indexSettings, docMapperParser, DocumentMapper.this, new ContentPath(0)); } };
0true
src_main_java_org_elasticsearch_index_mapper_DocumentMapper.java
1,467
public final class HazelcastCollectionRegion<Cache extends RegionCache> extends AbstractTransactionalDataRegion<Cache> implements CollectionRegion { public HazelcastCollectionRegion(final HazelcastInstance instance, final String regionName, final Properties props, final CacheDataDescription metadata, final Cache cache) { super(instance, regionName, props, metadata, cache); } public CollectionRegionAccessStrategy buildAccessStrategy(final AccessType accessType) throws CacheException { if (null == accessType) { throw new CacheException( "Got null AccessType while attempting to build CollectionRegionAccessStrategy. This can't happen!"); } if (AccessType.READ_ONLY.equals(accessType)) { return new CollectionRegionAccessStrategyAdapter( new ReadOnlyAccessDelegate<HazelcastCollectionRegion>(this, props)); } if (AccessType.NONSTRICT_READ_WRITE.equals(accessType)) { return new CollectionRegionAccessStrategyAdapter( new NonStrictReadWriteAccessDelegate<HazelcastCollectionRegion>(this, props)); } if (AccessType.READ_WRITE.equals(accessType)) { return new CollectionRegionAccessStrategyAdapter( new ReadWriteAccessDelegate<HazelcastCollectionRegion>(this, props)); } if (AccessType.TRANSACTIONAL.equals(accessType)) { throw new CacheException("Transactional access is not currently supported by Hazelcast."); } throw new CacheException("Got unknown AccessType " + accessType + " while attempting to build CollectionRegionAccessStrategy."); } }
0true
hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_region_HazelcastCollectionRegion.java
1,090
public class PartitioningStrategyConfigReadOnly extends PartitioningStrategyConfig { public PartitioningStrategyConfigReadOnly(PartitioningStrategyConfig config) { super(config); } public PartitioningStrategyConfig setPartitioningStrategyClass(String partitionStrategyClass) { throw new UnsupportedOperationException("This config is read-only"); } public PartitioningStrategyConfig setPartitionStrategy(PartitioningStrategy partitionStrategy) { throw new UnsupportedOperationException("This config is read-only"); } }
0true
hazelcast_src_main_java_com_hazelcast_config_PartitioningStrategyConfigReadOnly.java
1,028
public class ClasspathXmlConfig extends Config { private final static ILogger logger = Logger.getLogger(ClasspathXmlConfig.class); /** * Creates a config which is loaded from a classpath resource using the * Thread.currentThread contextClassLoader. The System.properties are used to resolve variables * in the XML. * * @param resource the xml resource. * @throws IllegalArgumentException if the resource could not be found. * @throws com.hazelcast.core.HazelcastException if the XML content is invalid */ public ClasspathXmlConfig(String resource) { this(resource, System.getProperties()); } /** * Creates a config which is loaded from a classpath resource using the * Thread.currentThread contextClassLoader. * * @param resource the xml resource. * @param properties the Properties to resolve variables in the XML. * @throws IllegalArgumentException if the resource could not be found or if properties is null. * @throws com.hazelcast.core.HazelcastException if the XML content is invalid */ public ClasspathXmlConfig(String resource, Properties properties) { this(Thread.currentThread().getContextClassLoader(), resource, properties); } /** * Creates a config which is loaded from a classpath resource. The System.properties are used to * resolve variables in the XML. * * @param classLoader the ClassLoader used to load the resource. * @param resource the classpath resource * @throws IllegalArgumentException if classLoader or resource is null, or if the resource is not found. * @throws com.hazelcast.core.HazelcastException if the XML content is invalid */ public ClasspathXmlConfig(ClassLoader classLoader, String resource) { this(classLoader, resource, System.getProperties()); } /** * Creates a config which is loaded from a classpath resource. * * @param classLoader the ClassLoader used to load the resource. * @param resource the classpath resource * @param properties to properties used to resolve variables in the XML. * @throws IllegalArgumentException if classLoader or resource is null, or if the resource is not found. * @throws com.hazelcast.core.HazelcastException if the XML content is invalid */ public ClasspathXmlConfig(ClassLoader classLoader, String resource, Properties properties) { if(classLoader == null){ throw new IllegalArgumentException("classLoader can't be null"); } if(resource == null){ throw new IllegalArgumentException("resource can't be null"); } if(properties == null){ throw new IllegalArgumentException("properties can't be null"); } logger.info("Configuring Hazelcast from '" + resource + "'."); InputStream in = classLoader.getResourceAsStream(resource); if (in == null) { throw new IllegalArgumentException("Specified resource '" + resource + "' could not be found!"); } new XmlConfigBuilder(in).setProperties(properties).build(this); } }
0true
hazelcast_src_main_java_com_hazelcast_config_ClasspathXmlConfig.java
1,824
constructors[ENTRY_SET] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() { public IdentifiedDataSerializable createNew(Integer arg) { return new MapEntrySet(); } };
0true
hazelcast_src_main_java_com_hazelcast_map_MapDataSerializerHook.java
84
public interface ClientEngine { int getClientEndpointCount(); InternalPartitionService getPartitionService(); ClusterService getClusterService(); SerializationService getSerializationService(); EventService getEventService(); TransactionManagerService getTransactionManagerService(); ProxyService getProxyService(); Config getConfig(); ILogger getLogger(Class clazz); ILogger getLogger(String className); Object toObject(Data data); Data toData(Object obj); Address getMasterAddress(); Address getThisAddress(); MemberImpl getLocalMember(); SecurityContext getSecurityContext(); }
0true
hazelcast_src_main_java_com_hazelcast_client_ClientEngine.java
2,007
public static class SimpleMapStore<K, V> extends MapStoreAdapter<K, V> { public final Map<K, V> store; private boolean loadAllKeys = true; public SimpleMapStore() { store = new ConcurrentHashMap<K, V>(); } public SimpleMapStore(final Map<K, V> store) { this.store = store; } @Override public void delete(final K key) { store.remove(key); } @Override public V load(final K key) { return store.get(key); } @Override public void store(final K key, final V value) { store.put(key, value); } public Set<K> loadAllKeys() { if (loadAllKeys) { return store.keySet(); } return null; } public void setLoadAllKeys(boolean loadAllKeys) { this.loadAllKeys = loadAllKeys; } @Override public void storeAll(final Map<K, V> kvMap) { store.putAll(kvMap); } }
0true
hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java
1,536
@SuppressWarnings({ "unchecked" }) public class OObjectCustomSerializerIterator<TYPE> implements Iterator<TYPE>, Serializable { private static final long serialVersionUID = -4012483076050044405L; private final ORecord<?> sourceRecord; private final Iterator<? extends Object> underlying; private final Class<?> deserializeClass; public OObjectCustomSerializerIterator(final Class<?> iDeserializeClass, final ORecord<?> iSourceRecord, final Iterator<? extends Object> iIterator) { this.sourceRecord = iSourceRecord; this.underlying = iIterator; this.deserializeClass = iDeserializeClass; } public TYPE next() { final Object value = underlying.next(); return (TYPE) OObjectEntitySerializer.deserializeFieldValue(deserializeClass, value); } public boolean hasNext() { return underlying.hasNext(); } public void remove() { underlying.remove(); if (sourceRecord != null) sourceRecord.setDirty(); } }
0true
object_src_main_java_com_orientechnologies_orient_object_serialization_OObjectCustomSerializerIterator.java
1,086
public abstract class OSQLFilterItemFieldMultiAbstract extends OSQLFilterItemAbstract { private List<String> names; public OSQLFilterItemFieldMultiAbstract(final OSQLPredicate iQueryCompiled, final String iName, final List<String> iNames) { super(iQueryCompiled, iName); names = iNames; } public Object getValue(final OIdentifiable iRecord, OCommandContext iContext) { final ODocument doc = ((ODocument) iRecord); if (names.size() == 1) return transformValue(iRecord, iContext, ODocumentHelper.getIdentifiableValue(iRecord, names.get(0))); final String[] fieldNames = doc.fieldNames(); final Object[] values = new Object[fieldNames.length]; final List<OCollate> collates = new ArrayList<OCollate>(); for (int i = 0; i < fieldNames.length; ++i) { values[i] = doc.field(fieldNames[i]); collates.add(getCollateForField(doc, fieldNames[i])); } if (hasChainOperators()) { // TRANSFORM ALL THE VALUES for (int i = 0; i < values.length; ++i) values[i] = transformValue(iRecord, iContext, values[i]); } return new OQueryRuntimeValueMulti(this, values, collates); } }
0true
core_src_main_java_com_orientechnologies_orient_core_sql_filter_OSQLFilterItemFieldMultiAbstract.java
153
public interface ArchivedStructuredContentPublisher { void processStructuredContentArchive(StructuredContent sc, String baseTypeKey, String baseNameKey); }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_message_ArchivedStructuredContentPublisher.java
374
public class ExplicitNameFactoryBean implements FactoryBean { private final String name; private final String suffix; public ExplicitNameFactoryBean(String name, String suffix) { this.name = name; this.suffix = suffix; } public Object getObject() throws Exception { return name + "-" + suffix; } @SuppressWarnings("unchecked") public Class getObjectType() { return String.class; } public boolean isSingleton() { return false; } }
0true
common_src_main_java_org_broadleafcommerce_common_jmx_ExplicitNameFactoryBean.java
1,271
new OProfilerHookValue() { public Object getValue() { return getHoles(); } }, "db.*.data.holes");
0true
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OStorageLocal.java
1,653
@RunWith(HazelcastSerialClassRunner.class) @Category(SlowTest.class) public class BackupTest extends HazelcastTestSupport { private static final String MAP_NAME = "default"; @BeforeClass @AfterClass public static void killAllHazelcastInstances() throws IOException { Hazelcast.shutdownAll(); } @Before public void gc() { Runtime.getRuntime().gc(); } @Test public void testGracefulShutdown() throws Exception { int size = 250000; TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(4); final Config config = new Config(); config.setProperty(GroupProperties.PROP_PARTITION_COUNT, "1111"); HazelcastInstance h1 = nodeFactory.newHazelcastInstance(config); IMap m1 = h1.getMap(MAP_NAME); for (int i = 0; i < size; i++) { m1.put(i, i); } HazelcastInstance h2 = nodeFactory.newHazelcastInstance(config); IMap m2 = h2.getMap(MAP_NAME); h1.shutdown(); assertEquals(size, m2.size()); HazelcastInstance h3 = nodeFactory.newHazelcastInstance(config); IMap m3 = h3.getMap(MAP_NAME); h2.shutdown(); assertEquals(size, m3.size()); HazelcastInstance h4 = nodeFactory.newHazelcastInstance(config); IMap m4 = h4.getMap(MAP_NAME); h3.shutdown(); assertEquals(size, m4.size()); } @Test public void testGracefulShutdown2() throws Exception { Config config = new Config(); config.getMapConfig(MAP_NAME).setBackupCount(2); config.setProperty(GroupProperties.PROP_PARTITION_COUNT, "1111"); TestHazelcastInstanceFactory f = createHazelcastInstanceFactory(6); final HazelcastInstance hz = f.newHazelcastInstance(config); final IMap<Object, Object> map = hz.getMap(MAP_NAME); final int size = 200000; for (int i = 0; i < size; i++) { map.put(i, i); } final HazelcastInstance hz2 = f.newHazelcastInstance(config); final IMap<Object, Object> map2 = hz2.getMap(MAP_NAME); assertEquals(size, map2.size()); final HazelcastInstance hz3 = f.newHazelcastInstance(config); final IMap<Object, Object> map3 = hz3.getMap(MAP_NAME); final HazelcastInstance hz4 = f.newHazelcastInstance(config); final IMap<Object, Object> map4 = hz4.getMap(MAP_NAME); assertEquals(size, map3.size()); assertEquals(size, map4.size()); final HazelcastInstance hz5 = f.newHazelcastInstance(config); final IMap<Object, Object> map5 = hz5.getMap(MAP_NAME); final HazelcastInstance hz6 = f.newHazelcastInstance(config); final IMap<Object, Object> map6 = hz6.getMap(MAP_NAME); assertEquals(size, map5.size()); assertEquals(size, map6.size()); hz.shutdown(); hz2.shutdown(); assertEquals(size, map3.size()); assertEquals(size, map4.size()); assertEquals(size, map5.size()); assertEquals(size, map6.size()); hz3.shutdown(); hz4.shutdown(); assertEquals(size, map5.size()); assertEquals(size, map6.size()); } @Test public void testGracefulShutdown3() throws Exception { Config config = new Config(); config.getMapConfig(MAP_NAME).setBackupCount(1); config.setProperty(GroupProperties.PROP_PARTITION_COUNT, "1111"); TestHazelcastInstanceFactory f = createHazelcastInstanceFactory(6); final HazelcastInstance hz = f.newHazelcastInstance(config); final IMap<Object, Object> map = hz.getMap(MAP_NAME); final int size = 200000; for (int i = 0; i < size; i++) { map.put(i, i); } final HazelcastInstance hz2 = f.newHazelcastInstance(config); final IMap<Object, Object> map2 = hz2.getMap(MAP_NAME); final HazelcastInstance hz3 = f.newHazelcastInstance(config); final IMap<Object, Object> map3 = hz3.getMap(MAP_NAME); final HazelcastInstance hz4 = f.newHazelcastInstance(config); final IMap<Object, Object> map4 = hz4.getMap(MAP_NAME); final HazelcastInstance hz5 = f.newHazelcastInstance(config); final IMap<Object, Object> map5 = hz5.getMap(MAP_NAME); final HazelcastInstance hz6 = f.newHazelcastInstance(config); final IMap<Object, Object> map6 = hz6.getMap(MAP_NAME); assertEquals(size, map2.size()); assertEquals(size, map3.size()); assertEquals(size, map4.size()); assertEquals(size, map5.size()); assertEquals(size, map6.size()); hz6.shutdown(); assertEquals(size, map.size()); assertEquals(size, map2.size()); assertEquals(size, map3.size()); assertEquals(size, map4.size()); assertEquals(size, map5.size()); hz2.shutdown(); assertEquals(size, map.size()); assertEquals(size, map3.size()); assertEquals(size, map4.size()); assertEquals(size, map5.size()); hz5.shutdown(); assertEquals(size, map.size()); assertEquals(size, map3.size()); assertEquals(size, map4.size()); hz3.shutdown(); assertEquals(size, map.size()); assertEquals(size, map4.size()); hz4.shutdown(); assertEquals(size, map.size()); } /** * Test for the issue https://code.google.com/p/hazelcast/issues/detail?id=275. */ @Test public void testBackupMigrationAndRecovery() throws Exception { testBackupMigrationAndRecovery(4, 1, 50000); } /** * Test for the issue https://code.google.com/p/hazelcast/issues/detail?id=395. */ @Test public void testBackupMigrationAndRecovery2() throws Exception { testBackupMigrationAndRecovery(6, 2, 50000); } private void testBackupMigrationAndRecovery(int nodeCount, int backupCount, int mapSize) throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(nodeCount); final String name = MAP_NAME; final Config config = new Config(); config.setProperty(GroupProperties.PROP_PARTITION_COUNT, "1111"); config.setProperty(GroupProperties.PROP_PARTITION_BACKUP_SYNC_INTERVAL, "3"); config.getMapConfig(name).setBackupCount(backupCount).setStatisticsEnabled(true); final HazelcastInstance[] instances = new HazelcastInstance[nodeCount]; HazelcastInstance hz = nodeFactory.newHazelcastInstance(config); instances[0] = hz; IMap map1 = hz.getMap(name); for (int i = 0; i < mapSize; i++) { map1.put(i, "value" + i); } checkMapSizes(mapSize, backupCount, instances); for (int i = 1; i < nodeCount; i++) { instances[i] = nodeFactory.newHazelcastInstance(config); checkMapSizes(mapSize, backupCount, instances); } final Random rand = new Random(); for (int i = 1; i < nodeCount; i++) { int ix; do { ix = rand.nextInt(nodeCount); } while (instances[ix] == null); final CountDownLatch latch = new CountDownLatch(1); // add listener instances[ix].getLifecycleService().addLifecycleListener(new LifecycleListener() { @Override public void stateChanged(LifecycleEvent event) { if (event.getState().equals(LifecycleEvent.LifecycleState.SHUTDOWN)) { latch.countDown(); } } }); latch.await(5, TimeUnit.SECONDS); // shutdown. TestUtil.terminateInstance(instances[ix]); instances[ix] = null; checkMapSizes(mapSize, backupCount, instances); } } private static void checkMapSizes(final int expectedSize, int backupCount, HazelcastInstance... instances) throws InterruptedException { int nodeCount = 0; final IMap[] maps = new IMap[instances.length]; for (int i = 0; i < 20; i++) { for (int j = 0; j < instances.length; j++) { final HazelcastInstance hz = instances[j]; if (hz != null) { if (i == 0) { maps[j] = hz.getMap(MAP_NAME); nodeCount++; } assertEquals(expectedSize, maps[j].size()); } } Thread.sleep(10); } final int expectedBackupSize = Math.min(nodeCount - 1, backupCount) * expectedSize; for (int i = 0; i < 1200; i++) { long ownedSize = getTotalOwnedEntryCount(maps); long backupSize = getTotalBackupEntryCount(maps); if (ownedSize == expectedSize && backupSize == expectedBackupSize) { int votes = 0; for (HazelcastInstance hz : instances) { if (hz != null) { votes += TestUtil.getNode(hz).getPartitionService().hasOnGoingMigration() ? 0 : 1; } } if (votes == nodeCount) { break; } } Thread.sleep(500); } long actualBackupSize = getTotalBackupEntryCount(maps); if (expectedBackupSize > actualBackupSize) { fail("Missing backups, node-count: " + nodeCount + ", expected:<" + expectedBackupSize + "> but was:<" + actualBackupSize + ">"); } } private static long getTotalOwnedEntryCount(IMap... maps) { long total = 0; for (IMap map : maps) { if (map != null) { total += map.getLocalMapStats().getOwnedEntryCount(); } } return total; } private static long getTotalBackupEntryCount(IMap... maps) { long total = 0; for (IMap map : maps) { if (map != null) { total += map.getLocalMapStats().getBackupEntryCount(); } } return total; } @Test public void testIssue177BackupCount() throws InterruptedException { final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(10); final Config config = new Config(); final String name = MAP_NAME; config.getMapConfig(name).setBackupCount(1).setStatisticsEnabled(true); final Random rand = new Random(); final AtomicReferenceArray<HazelcastInstance> instances = new AtomicReferenceArray<HazelcastInstance>(10); final int count = 10000; final int totalCount = count * (instances.length() - 1); final CountDownLatch latch = new CountDownLatch(instances.length()); for (int i = 0; i < instances.length(); i++) { final int finalI = i; Thread thread = new Thread() { public void run() { try { Thread.sleep(3000 * finalI); HazelcastInstance instance = nodeFactory.newHazelcastInstance(config); instances.set(finalI, instance); Thread.sleep(rand.nextInt(100)); if (finalI != 0) { // do not run on master node, // let partition assignment be made during put ops. for (int j = 0; j < 10000; j++) { instance.getMap(name).put(getName() + "-" + j, "value"); } } } catch (InterruptedException e) { e.printStackTrace(); } finally { latch.countDown(); } } }; thread.start(); } assertTrue(latch.await(5, TimeUnit.MINUTES)); final int trials = 50; for (int i = 0; i < trials; i++) { long totalOwned = 0L; long totalBackup = 0L; for (int j = 0; j < instances.length(); j++) { HazelcastInstance hz = instances.get(j); LocalMapStats stats = hz.getMap(name).getLocalMapStats(); totalOwned += stats.getOwnedEntryCount(); totalBackup += stats.getBackupEntryCount(); } assertEquals("Owned entry count is wrong! ", totalCount, totalOwned); if (i < trials - 1) { if (totalBackup == totalCount) { break; } // check again after sometime Thread.sleep(1000); } else { assertEquals("Backup entry count is wrong! ", totalCount, totalBackup); } } } /** * Test for issue #259. */ @Test public void testBackupPutWhenOwnerNodeDead() throws InterruptedException { final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(2); final String name = MAP_NAME; final HazelcastInstance hz = nodeFactory.newHazelcastInstance(); final HazelcastInstance hz2 = nodeFactory.newHazelcastInstance(); final IMap<Object, Object> map = hz2.getMap(name); final int size = 100000; final byte[] data = new byte[250]; final int threads = 100; final int l = size / threads; final CountDownLatch latch = new CountDownLatch(threads); ExecutorService ex = Executors.newFixedThreadPool(threads); new Thread() { public void run() { while (hz.getMap(name).size() < size / 2) { try { sleep(5); } catch (InterruptedException ignored) { return; } } TestUtil.terminateInstance(hz); } }.start(); for (int i = 0; i < threads; i++) { final int n = i; ex.execute(new Runnable() { public void run() { for (int j = (n * l); j < (n + 1) * l; j++) { map.put(j, data); try { Thread.sleep(1); } catch (InterruptedException ignored) { return; } } latch.countDown(); } }); } try { assertTrue(latch.await(5, TimeUnit.MINUTES)); assertEquals("Data lost!", size, map.size()); } finally { ex.shutdownNow(); } } /** * Test for issue #259. */ @Test public void testBackupRemoveWhenOwnerNodeDead() throws InterruptedException { final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(2); final String name = MAP_NAME; final HazelcastInstance hz = nodeFactory.newHazelcastInstance(); final HazelcastInstance hz2 = nodeFactory.newHazelcastInstance(); final IMap<Object, Object> map = hz2.getMap(name); final int size = 100000; final int threads = 100; ExecutorService ex = Executors.newFixedThreadPool(threads); final int loadCount = 10; final CountDownLatch loadLatch = new CountDownLatch(loadCount); // initial load for (int i = 0; i < loadCount; i++) { final int n = i; ex.execute(new Runnable() { public void run() { int chunk = size / loadCount; for (int j = (n * chunk); j < (n + 1) * chunk; j++) { map.put(j, j); } loadLatch.countDown(); } }); } loadLatch.await(); new Thread() { public void run() { while (hz.getMap(name).size() > size / 2) { try { sleep(5); } catch (InterruptedException ignored) { return; } } TestUtil.terminateInstance(hz); } }.start(); final int chunk = size / threads; final CountDownLatch latch = new CountDownLatch(threads); for (int i = 0; i < threads; i++) { final int n = i; ex.execute(new Runnable() { public void run() { for (int j = (n * chunk); j < (n + 1) * chunk; j++) { map.remove(j); try { Thread.sleep(1); } catch (InterruptedException ignored) { } } latch.countDown(); } }); } try { assertTrue(latch.await(5, TimeUnit.MINUTES)); assertEquals("Remove failed!", 0, map.size()); } finally { ex.shutdown(); } } }
0true
hazelcast_src_test_java_com_hazelcast_map_BackupTest.java
819
public abstract class AtomicLongBaseOperation extends Operation implements PartitionAwareOperation, IdentifiedDataSerializable { protected String name; public AtomicLongBaseOperation() { } public AtomicLongBaseOperation(String name) { this.name = name; } public LongWrapper getNumber() { AtomicLongService service = getService(); return service.getNumber(name); } @Override public int getFactoryId() { return AtomicLongDataSerializerHook.F_ID; } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { out.writeUTF(name); } @Override protected void readInternal(ObjectDataInput in) throws IOException { name = in.readUTF(); } @Override public void afterRun() throws Exception { } @Override public void beforeRun() throws Exception { } @Override public Object getResponse() { return null; } @Override public boolean returnsResponse() { return true; } }
0true
hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_operations_AtomicLongBaseOperation.java
3,266
private static class InnerSource extends IndexFieldData.XFieldComparatorSource { private final SearchScript script; private InnerSource(SearchScript script) { this.script = script; } @Override public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { return new StringScriptDataComparator(numHits, script); } @Override public SortField.Type reducedType() { return SortField.Type.STRING; } }
0true
src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_StringScriptDataComparator.java
1,266
public class OMultiFileSegment extends OSegment { protected OStorageSegmentConfiguration config; protected OFile[] files = new OFile[0]; private final String fileExtension; private final String type; private final long maxSize; @SuppressWarnings("unused") private final String defrag; private int fileStartSize; final private int fileMaxSize; private final int fileIncrementSize; private boolean wasSoftlyClosedAtPreviousTime = true; private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); public OMultiFileSegment(final OStorageLocalAbstract storage, final OStorageSegmentConfiguration config, final String fileExtension, final int roundMaxSize) throws IOException { super(storage, config.name); readWriteLock.writeLock().lock(); try { this.config = config; this.fileExtension = fileExtension; type = config.fileType; defrag = config.defrag; maxSize = OFileUtils.getSizeAsNumber(config.maxSize); fileStartSize = (int) OFileUtils.getSizeAsNumber(config.fileStartSize); final int tmpFileMaxSize = (int) OFileUtils.getSizeAsNumber(config.fileMaxSize); fileIncrementSize = (int) OFileUtils.getSizeAsNumber(config.fileIncrementSize); if (roundMaxSize > 0) // ROUND THE FILE SIZE TO AVOID ERRORS ON ROUNDING BY DIVIDING FOR FIXED RECORD SIZE fileMaxSize = (tmpFileMaxSize / roundMaxSize) * roundMaxSize; else fileMaxSize = tmpFileMaxSize; // INSTANTIATE ALL THE FILES int perFileMaxSize; if (config.infoFiles.length == 0) { // EMPTY FILE: CREATE THE FIRST FILE BY DEFAULT files = new OFile[1]; files[0] = OFileFactory.instance().create(type, storage.getVariableParser().resolveVariables(this.config.getLocation() + "/" + name + "." + 0 + this.fileExtension), storage.getMode()); perFileMaxSize = fileMaxSize; files[0].setMaxSize(perFileMaxSize); files[0].setIncrementSize(fileIncrementSize); } else { files = new OFile[config.infoFiles.length]; for (int i = 0; i < files.length; ++i) { files[i] = OFileFactory.instance().create(type, storage.getVariableParser().resolveVariables(config.infoFiles[i].path), storage.getMode()); perFileMaxSize = fileMaxSize; files[i].setMaxSize(perFileMaxSize); files[i].setIncrementSize(fileIncrementSize); } } } finally { readWriteLock.writeLock().unlock(); } } public void open() throws IOException { readWriteLock.writeLock().lock(); try { // @TODO: LAZY OPEN FILES for (OFile file : files) if (!file.open()) { // LAST TIME THE FILE WAS NOT CLOSED IN SOFT WAY OLogManager.instance().warn(this, "segment file '%s' was not closed correctly last time", OFileUtils.getPath(file.getName())); // TODO VERIFY DATA? wasSoftlyClosedAtPreviousTime = false; } } finally { readWriteLock.writeLock().unlock(); } } /** * Create the first file for current segment * * @param iStartSize * @throws IOException */ public void create(final int iStartSize) throws IOException { readWriteLock.writeLock().lock(); try { files = new OFile[1]; fileStartSize = iStartSize; createNewFile(); } finally { readWriteLock.writeLock().unlock(); } } public void close() throws IOException { readWriteLock.writeLock().lock(); try { for (OFile file : files) { if (file != null) file.close(); } } finally { readWriteLock.writeLock().unlock(); } } public void delete() throws IOException { readWriteLock.writeLock().lock(); try { for (OFile file : files) { if (file != null) file.delete(); } } finally { readWriteLock.writeLock().unlock(); } } public boolean exists() { readWriteLock.readLock().lock(); try { return files[0].exists(); } finally { readWriteLock.readLock().unlock(); } } public void truncate() throws IOException { readWriteLock.writeLock().lock(); try { // SHRINK TO 0 files[0].shrink(0); if (files.length > 1) { // LEAVE JUST ONE FILE for (int i = 1; i < files.length; ++i) { if (files[i] != null) files[i].delete(); } // UPDATE FILE STRUCTURE final OFile f = files[0]; files = new OFile[1]; files[0] = f; // UPDATE CONFIGURATION final OStorageFileConfiguration fileConfig = config.infoFiles[0]; config.infoFiles = new OStorageFileConfiguration[1]; config.infoFiles[0] = fileConfig; config.root.update(); } } finally { readWriteLock.writeLock().unlock(); } } public void synch() throws IOException { readWriteLock.readLock().lock(); try { for (OFile file : files) { if (file != null && file.isOpen()) file.synch(); } } finally { readWriteLock.readLock().unlock(); } } public void setSoftlyClosed(boolean softlyClosed) throws IOException { readWriteLock.writeLock().lock(); try { for (OFile file : files) if (file != null && file.isOpen()) file.setSoftlyClosed(softlyClosed); } finally { readWriteLock.writeLock().unlock(); } } public OStorageSegmentConfiguration getConfig() { readWriteLock.readLock().lock(); try { return config; } finally { readWriteLock.readLock().unlock(); } } public long getFilledUpTo() { readWriteLock.readLock().lock(); try { long filled = 0; for (OFile file : files) filled += file.getFilledUpTo(); return filled; } finally { readWriteLock.readLock().unlock(); } } public long getSize() { readWriteLock.readLock().lock(); try { long size = 0; for (OFile file : files) size += file.getFileSize(); return size; } finally { readWriteLock.readLock().unlock(); } } /** * Find free space for iRecordSize bytes. * * @param iRecordSize * @return a pair file-id/file-pos * @throws IOException */ public long[] allocateSpace(final int iRecordSize) throws IOException { readWriteLock.writeLock().lock(); try { // IT'S PREFEREABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE OFile file; for (int i = 0; i < files.length; ++i) { file = files[i]; if (file.getFreeSpace() >= iRecordSize) // FOUND: RETURN THIS OFFSET return new long[] { i, file.allocateSpace(iRecordSize) }; } // NOT FOUND: CHECK IF CAN OVERSIZE SOME FILES for (int i = 0; i < files.length; ++i) { file = files[i]; if (file.canOversize(iRecordSize)) { // FOUND SPACE: ENLARGE IT return new long[] { i, file.allocateSpace(iRecordSize) }; } } // TRY TO CREATE A NEW FILE if (maxSize > 0 && getSize() >= maxSize) // OUT OF MAX SIZE throw new OStorageException("Unable to allocate the requested space of " + iRecordSize + " bytes because the segment is full: max-Size=" + maxSize + ", currentSize=" + getFilledUpTo()); // COPY THE OLD ARRAY TO THE NEW ONE OFile[] newFiles = new OFile[files.length + 1]; System.arraycopy(files, 0, newFiles, 0, files.length); files = newFiles; // CREATE THE NEW FILE AND PUT IT AS LAST OF THE ARRAY file = createNewFile(); file.allocateSpace(iRecordSize); config.root.update(); return new long[] { files.length - 1, 0 }; } finally { readWriteLock.writeLock().unlock(); } } /** * Return the absolute position receiving the pair file-id/file-pos. * * @param iFilePosition * as pair file-id/file-pos * @return */ public long getAbsolutePosition(final long[] iFilePosition) { readWriteLock.readLock().lock(); try { long position = 0; for (int i = 0; i < iFilePosition[0]; ++i) { position += fileMaxSize; } return position + iFilePosition[1]; } finally { readWriteLock.readLock().unlock(); } } public long[] getRelativePosition(final long iPosition) { readWriteLock.readLock().lock(); try { if (iPosition < fileMaxSize) return new long[] { 0l, iPosition }; final int fileNum = (int) (iPosition / fileMaxSize); if (fileNum >= files.length && fileNum < 0) throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum + " that is out of limit (files range 0-" + (files.length - 1) + ")"); final int fileRec = (int) (iPosition % fileMaxSize); if (fileNum >= files.length) throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum + " but configured files are only " + files.length); if (fileRec >= files[fileNum].getFilledUpTo() && fileRec < 0) throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum + " but the position #" + fileRec + " is out of file size " + files[fileNum].getFilledUpTo()); return new long[] { fileNum, fileRec }; } finally { readWriteLock.readLock().unlock(); } } private OFile createNewFile() throws IOException { final int num = files.length - 1; final OFile file = OFileFactory.instance().create(type, config.getLocation() + "/" + name + "." + num + fileExtension, storage.getMode()); file.setMaxSize(fileMaxSize); file.create(fileStartSize); files[num] = file; addInfoFileConfigEntry(file); return file; } private void addInfoFileConfigEntry(final OFile file) throws IOException { OStorageFileConfiguration[] newConfigFiles = new OStorageFileConfiguration[config.infoFiles.length + 1]; for (int i = 0; i < config.infoFiles.length; ++i) newConfigFiles[i] = config.infoFiles[i]; config.infoFiles = newConfigFiles; // CREATE A NEW ENTRY FOR THE NEW FILE String fileNameToStore = storage.getVariableParser().convertPathToRelative(OFileUtils.getPath(file.getPath())); final OStorageSegmentConfiguration template = config.root.fileTemplate; config.infoFiles[config.infoFiles.length - 1] = new OStorageFileConfiguration(config, fileNameToStore, template.fileType, template.fileMaxSize, template.fileIncrementSize); } public long allocateSpaceContinuously(final int iSize) throws IOException { readWriteLock.writeLock().lock(); try { // IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE OFile file; int remainingSize = iSize; // IF SOME FILES ALREADY CREATED long offset = -1; int fileNumber = -1; if (files.length > 0) { // CHECK IF THERE IS FREE SPACE IN LAST FILE IN CHAIN file = files[files.length - 1]; if (file.getFreeSpace() > 0) { fileNumber = files.length - 1; if (remainingSize > file.getFreeSpace()) { remainingSize -= file.getFreeSpace(); offset = file.allocateSpace(file.getFreeSpace()); } else { return (long) (files.length - 1) * fileMaxSize + file.allocateSpace(remainingSize); } } // NOT FOUND FREE SPACE: CHECK IF CAN OVERSIZE LAST FILE final long oversize = fileMaxSize - file.getFileSize(); if (oversize > 0 && remainingSize > 0) { fileNumber = files.length - 1; if (remainingSize > oversize) { remainingSize -= oversize; long newOffset = file.allocateSpace(oversize); // SAVE OFFSET IF IT WASN'T SAVED EARLIER if (offset == -1) offset = newOffset; } else { long newOffset = file.allocateSpace(remainingSize); if (offset == -1) offset = newOffset; if (fileNumber == -1) { fileNumber = files.length - 1; } return (long) fileNumber * fileMaxSize + offset; } } } // CREATE NEW FILE BECAUSE THERE IS NO FILES OR WE CANNOT ENLARGE EXISTING ENOUGH if (remainingSize > 0) { if (maxSize > 0 && getSize() >= maxSize) // OUT OF MAX SIZE throw new OStorageException("Unable to allocate the requested space of " + iSize + " bytes because the segment is full: max-Size=" + maxSize + ", currentSize=" + getFilledUpTo()); // COPY THE OLD ARRAY TO THE NEW ONE OFile[] newFiles = new OFile[files.length + 1]; for (int i = 0; i < files.length; ++i) newFiles[i] = files[i]; files = newFiles; // CREATE THE NEW FILE AND PUT IT AS LAST OF THE ARRAY file = createNewFile(); file.allocateSpace(iSize); config.root.update(); if (fileNumber == -1) { fileNumber = files.length - 1; } if (offset == -1) offset = 0; } return (long) fileNumber * fileMaxSize + offset; } finally { readWriteLock.writeLock().unlock(); } } public void writeContinuously(long iPosition, byte[] iData) throws IOException { readWriteLock.writeLock().lock(); try { long[] pos = getRelativePosition(iPosition); // IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE OFile file; int remainingSize = iData.length; long offset = pos[1]; for (int i = (int) pos[0]; remainingSize > 0; ++i) { file = files[i]; if (remainingSize > file.getFilledUpTo() - offset) { if (file.getFilledUpTo() < offset) { throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset); } file.write(offset, iData, (int) (file.getFilledUpTo() - offset), iData.length - remainingSize); remainingSize -= (file.getFilledUpTo() - offset); } else { file.write(offset, iData, remainingSize, iData.length - remainingSize); remainingSize = 0; } offset = 0; } } finally { readWriteLock.writeLock().unlock(); } } public void writeContinuously(long iPosition, byte[] iData, int arrayOffset, int length) throws IOException { readWriteLock.writeLock().lock(); try { long[] pos = getRelativePosition(iPosition); // IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE OFile file; int remainingSize = length; long offset = pos[1]; for (int i = (int) pos[0]; remainingSize > 0; ++i) { file = files[i]; if (remainingSize > file.getFilledUpTo() - offset) { if (file.getFilledUpTo() < offset) { throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset); } file.write(offset, iData, (int) (file.getFilledUpTo() - offset), arrayOffset + iData.length - remainingSize); remainingSize -= (file.getFilledUpTo() - offset); } else { file.write(offset, iData, remainingSize, arrayOffset + iData.length - remainingSize); remainingSize = 0; } offset = 0; } } finally { readWriteLock.writeLock().unlock(); } } public void readContinuously(final long iPosition, byte[] iBuffer, final int iSize) throws IOException { readWriteLock.readLock().lock(); try { long[] pos = getRelativePosition(iPosition); // IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE OFile file; int remainingSize = iSize; long offset = pos[1]; assert offset < Integer.MAX_VALUE; assert offset > -1; for (int i = (int) pos[0]; remainingSize > 0; ++i) { file = files[i]; if (remainingSize > file.getFilledUpTo() - offset) { if (file.getFilledUpTo() < offset) { throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset); } int toRead = (int) (file.getFilledUpTo() - offset); file.read(offset, iBuffer, toRead, iSize - remainingSize); remainingSize -= toRead; } else { file.read(offset, iBuffer, remainingSize, iSize - remainingSize); remainingSize = 0; } offset = 0; } } finally { readWriteLock.readLock().unlock(); } } public void rename(String iOldName, String iNewName) { readWriteLock.writeLock().lock(); try { for (OFile file : files) { final String osFileName = file.getName(); if (osFileName.startsWith(name)) { final File newFile = new File(storage.getStoragePath() + "/" + iNewName + osFileName.substring(osFileName.lastIndexOf(name) + name.length())); for (OStorageFileConfiguration conf : config.infoFiles) { if (conf.parent.name.equals(name)) conf.parent.name = iNewName; if (conf.path.endsWith(osFileName)) conf.path = new String(conf.path.replace(osFileName, newFile.getName())); } boolean renamed = file.renameTo(newFile); while (!renamed) { OMemoryWatchDog.freeMemoryForResourceCleanup(100); renamed = file.renameTo(newFile); } } } } finally { readWriteLock.writeLock().unlock(); } } public boolean wasSoftlyClosedAtPreviousTime() { readWriteLock.readLock().lock(); try { return wasSoftlyClosedAtPreviousTime; } finally { readWriteLock.readLock().unlock(); } } }
1no label
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OMultiFileSegment.java
1,389
public class MappingMetaDataParserTests extends ElasticsearchTestCase { @Test public void testParseIdAlone() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("id"), new MappingMetaData.Routing(true, "routing"), new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1"); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("id")); assertThat(parseContext.idResolved(), equalTo(true)); assertThat(parseContext.routing(), nullValue()); assertThat(parseContext.routingResolved(), equalTo(false)); assertThat(parseContext.timestamp(), nullValue()); assertThat(parseContext.timestampResolved(), equalTo(false)); } @Test public void testFailIfIdIsNoValue() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("id"), new MappingMetaData.Routing(true, "routing"), new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startArray("id").value("id").endArray().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1"); try { md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); fail(); } catch (MapperParsingException ex) { // bogus its an array } bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("id").field("x", "id").endObject().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes(); parseContext = md.createParseContext(null, "routing_value", "1"); try { md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); fail(); } catch (MapperParsingException ex) { // bogus its an object } } @Test public void testParseRoutingAlone() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("id"), new MappingMetaData.Routing(true, "routing"), new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "1"); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), nullValue()); assertThat(parseContext.idResolved(), equalTo(false)); assertThat(parseContext.routing(), equalTo("routing_value")); assertThat(parseContext.routingResolved(), equalTo(true)); assertThat(parseContext.timestamp(), nullValue()); assertThat(parseContext.timestampResolved(), equalTo(false)); } @Test public void testParseTimestampAlone() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("id"), new MappingMetaData.Routing(true, "routing"), new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext("id", "routing_value1", null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), nullValue()); assertThat(parseContext.idResolved(), equalTo(false)); assertThat(parseContext.routing(), nullValue()); assertThat(parseContext.routingResolved(), equalTo(false)); assertThat(parseContext.timestamp(), equalTo("1")); assertThat(parseContext.timestampResolved(), equalTo(true)); } @Test public void testParseIdAndRoutingAndTimestamp() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("id"), new MappingMetaData.Routing(true, "routing"), new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("id")); assertThat(parseContext.routing(), equalTo("routing_value")); assertThat(parseContext.timestamp(), equalTo("1")); } @Test public void testParseIdAndRoutingAndTimestampWithPath() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("obj1.id"), new MappingMetaData.Routing(true, "obj1.routing"), new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject() .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject() .startObject("obj2").field("timestamp", "1").endObject() .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("id")); assertThat(parseContext.routing(), equalTo("routing_value")); assertThat(parseContext.timestamp(), equalTo("1")); } @Test public void testParseIdWithPath() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("obj1.id"), new MappingMetaData.Routing(true, "obj1.routing"), new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject() .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject() .startObject("obj2").field("timestamp", "1").endObject() .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "2"); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("id")); assertThat(parseContext.idResolved(), equalTo(true)); assertThat(parseContext.routing(), nullValue()); assertThat(parseContext.routingResolved(), equalTo(false)); assertThat(parseContext.timestamp(), nullValue()); assertThat(parseContext.timestampResolved(), equalTo(false)); } @Test public void testParseRoutingWithPath() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("obj1.id"), new MappingMetaData.Routing(true, "obj1.routing"), new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject() .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject() .startObject("obj2").field("timestamp", "1").endObject() .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "2"); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), nullValue()); assertThat(parseContext.idResolved(), equalTo(false)); assertThat(parseContext.routing(), equalTo("routing_value")); assertThat(parseContext.routingResolved(), equalTo(true)); assertThat(parseContext.timestamp(), nullValue()); assertThat(parseContext.timestampResolved(), equalTo(false)); } @Test public void testParseTimestampWithPath() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("obj1.id"), new MappingMetaData.Routing(true, "obj1.routing"), new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject() .startObject("obj1").field("routing", "routing_value").endObject() .startObject("obj2").field("timestamp", "1").endObject() .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value1", null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), nullValue()); assertThat(parseContext.idResolved(), equalTo(false)); assertThat(parseContext.routing(), nullValue()); assertThat(parseContext.routingResolved(), equalTo(false)); assertThat(parseContext.timestamp(), equalTo("1")); assertThat(parseContext.timestampResolved(), equalTo(true)); } @Test public void testParseIdAndRoutingAndTimestampWithinSamePath() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("obj1.id"), new MappingMetaData.Routing(true, "obj1.routing"), new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject() .startObject("obj1").field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject() .startObject("obj2").field("field1", "value1").endObject() .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("id")); assertThat(parseContext.routing(), equalTo("routing_value")); assertThat(parseContext.timestamp(), equalTo("1")); } @Test public void testParseIdAndRoutingAndTimestampWithinSamePathAndMoreLevels() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("obj1.obj0.id"), new MappingMetaData.Routing(true, "obj1.obj2.routing"), new MappingMetaData.Timestamp(true, "obj1.obj3.timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject() .startObject("obj1") .startObject("obj0") .field("id", "id") .endObject() .startObject("obj2") .field("routing", "routing_value") .endObject() .startObject("obj3") .field("timestamp", "1") .endObject() .endObject() .startObject("obj2").field("field1", "value1").endObject() .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("id")); assertThat(parseContext.routing(), equalTo("routing_value")); assertThat(parseContext.timestamp(), equalTo("1")); } @Test public void testParseIdAndRoutingAndTimestampWithSameRepeatedObject() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("obj1.id"), new MappingMetaData.Routing(true, "obj1.routing"), new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2") .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject() .startObject("obj1").field("id", "id").endObject() .startObject("obj1").field("routing", "routing_value").endObject() .startObject("obj1").field("timestamp", "1").endObject() .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("id")); assertThat(parseContext.routing(), equalTo("routing_value")); assertThat(parseContext.timestamp(), equalTo("1")); } // @Test public void testParseIdRoutingTimestampWithRepeatedField() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("field1"), new MappingMetaData.Routing(true, "field1.field1"), new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject() .field("aaa", "wr") .array("arr1", "1", "2", "3") .field("field1", "foo") .field("field1", "bar") .field("test", "value") .field("zzz", "wr") .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), equalTo("foo")); assertThat(parseContext.routing(), nullValue()); assertThat(parseContext.timestamp(), equalTo("foo")); } @Test public void testParseNoIdRoutingWithRepeatedFieldAndObject() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id("id"), new MappingMetaData.Routing(true, "field1.field1.field2"), new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject() .field("aaa", "wr") .array("arr1", "1", "2", "3") .field("field1", "foo") .startObject("field1").field("field2", "bar").endObject() .field("test", "value") .field("zzz", "wr") .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), nullValue()); assertThat(parseContext.routing(), nullValue()); assertThat(parseContext.timestamp(), equalTo("foo")); } @Test public void testParseRoutingWithRepeatedFieldAndValidRouting() throws Exception { MappingMetaData md = new MappingMetaData("type1", new CompressedString(""), new MappingMetaData.Id(null), new MappingMetaData.Routing(true, "field1.field2"), new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime"), false); byte[] bytes = jsonBuilder().startObject() .field("aaa", "wr") .array("arr1", "1", "2", "3") .field("field1", "foo") .startObject("field1").field("field2", "bar").endObject() .field("test", "value") .field("zzz", "wr") .endObject().bytes().toBytes(); MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null); md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext); assertThat(parseContext.id(), nullValue()); assertThat(parseContext.routing(), equalTo("bar")); assertThat(parseContext.timestamp(), equalTo("foo")); } }
0true
src_test_java_org_elasticsearch_cluster_metadata_MappingMetaDataParserTests.java
570
public class OpenIndexRequest extends AcknowledgedRequest<OpenIndexRequest> { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, false, true); OpenIndexRequest() { } /** * Constructs a new open index request for the specified index. */ public OpenIndexRequest(String... indices) { this.indices = indices; } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (indices == null || indices.length == 0) { validationException = addValidationError("index is missing", validationException); } return validationException; } /** * The indices to be opened * @return the indices to be opened */ String[] indices() { return indices; } /** * Sets the indices to be opened * @param indices the indices to be opened * @return the request itself */ public OpenIndexRequest indices(String... indices) { this.indices = indices; return this; } /** * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. * For example indices that don't exist. * * @return the current behaviour when it comes to index names and wildcard indices expressions */ public IndicesOptions indicesOptions() { return indicesOptions; } /** * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. * For example indices that don't exist. * * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions * @return the request itself */ public OpenIndexRequest indicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; return this; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); indices = in.readStringArray(); readTimeout(in); indicesOptions = IndicesOptions.readIndicesOptions(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); writeTimeout(out); indicesOptions.writeIndicesOptions(out); } }
0true
src_main_java_org_elasticsearch_action_admin_indices_open_OpenIndexRequest.java
1,288
public class ReviewStatusType { private static final long serialVersionUID = 1L; private static final Map<String, ReviewStatusType> TYPES = new HashMap<String, ReviewStatusType>(); public static final ReviewStatusType PENDING = new ReviewStatusType("PENDING"); public static final ReviewStatusType APPROVED = new ReviewStatusType("APPROVED"); public static final ReviewStatusType REJECTED = new ReviewStatusType("REJECTED"); public static ReviewStatusType getInstance(final String type) { return TYPES.get(type); } private String type; public ReviewStatusType() { } public ReviewStatusType(final String type) { setType(type); } public String getType() { return type; } private void setType(String type) { this.type = type; if (!TYPES.containsKey(type)) { TYPES.put(type, this); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((type == null) ? 0 : type.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ReviewStatusType other = (ReviewStatusType) obj; if (type == null) { if (other.type != null) return false; } else if (!type.equals(other.type)) return false; return true; } }
1no label
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_rating_service_type_ReviewStatusType.java
581
executionService.scheduleWithFixedDelay(executorName, new Runnable() { public void run() { sendMasterConfirmation(); } }, masterConfirmationInterval, masterConfirmationInterval, TimeUnit.SECONDS);
1no label
hazelcast_src_main_java_com_hazelcast_cluster_ClusterServiceImpl.java