Unnamed: 0
int64
0
6.45k
func
stringlengths
37
161k
target
class label
2 classes
project
stringlengths
33
167
3,696
public abstract class AbstractExecutorThreadFactory implements ThreadFactory { protected final ClassLoader classLoader; protected final ThreadGroup threadGroup; public AbstractExecutorThreadFactory(ThreadGroup threadGroup, ClassLoader classLoader) { this.threadGroup = threadGroup; this.classLoader = classLoader; } @Override public final Thread newThread(Runnable r) { final Thread t = createThread(r); ClassLoader cl = classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader(); t.setContextClassLoader(cl); if (t.isDaemon()) { t.setDaemon(false); } if (t.getPriority() != Thread.NORM_PRIORITY) { t.setPriority(Thread.NORM_PRIORITY); } return t; } protected abstract Thread createThread(Runnable r); }
1no label
hazelcast_src_main_java_com_hazelcast_util_executor_AbstractExecutorThreadFactory.java
4,443
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, AtomicFieldData> { Cache<Key, AtomicFieldData> cache; private volatile String size; private volatile long sizeInBytes; private volatile TimeValue expire; @Inject public IndicesFieldDataCache(Settings settings) { super(settings); this.size = componentSettings.get("size", "-1"); this.sizeInBytes = componentSettings.getAsMemory("size", "-1").bytes(); this.expire = componentSettings.getAsTime("expire", null); buildCache(); } private void buildCache() { CacheBuilder<Key, AtomicFieldData> cacheBuilder = CacheBuilder.newBuilder() .removalListener(this); if (sizeInBytes > 0) { cacheBuilder.maximumWeight(sizeInBytes).weigher(new FieldDataWeigher()); } // defaults to 4, but this is a busy map for all indices, increase it a bit cacheBuilder.concurrencyLevel(16); if (expire != null && expire.millis() > 0) { cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); } logger.debug("using size [{}] [{}], expire [{}]", size, new ByteSizeValue(sizeInBytes), expire); cache = cacheBuilder.build(); } public void close() { cache.invalidateAll(); } public IndexFieldDataCache buildIndexFieldDataCache(@Nullable IndexService indexService, Index index, FieldMapper.Names fieldNames, FieldDataType fieldDataType) { return new IndexFieldCache(indexService, index, fieldNames, fieldDataType); } @Override public void onRemoval(RemovalNotification<Key, AtomicFieldData> notification) { Key key = notification.getKey(); if (key == null || key.listener == null) { return; // nothing to do here really... } IndexFieldCache indexCache = key.indexCache; long sizeInBytes = key.sizeInBytes; AtomicFieldData value = notification.getValue(); if (sizeInBytes == -1 && value != null) { sizeInBytes = value.getMemorySizeInBytes(); } key.listener.onUnload(indexCache.fieldNames, indexCache.fieldDataType, notification.wasEvicted(), sizeInBytes, value); } public static class FieldDataWeigher implements Weigher<Key, AtomicFieldData> { @Override public int weigh(Key key, AtomicFieldData fieldData) { int weight = (int) Math.min(fieldData.getMemorySizeInBytes(), Integer.MAX_VALUE); return weight == 0 ? 1 : weight; } } /** * A specific cache instance for the relevant parameters of it (index, fieldNames, fieldType). */ class IndexFieldCache implements IndexFieldDataCache, SegmentReader.CoreClosedListener { @Nullable private final IndexService indexService; final Index index; final FieldMapper.Names fieldNames; final FieldDataType fieldDataType; IndexFieldCache(@Nullable IndexService indexService, Index index, FieldMapper.Names fieldNames, FieldDataType fieldDataType) { this.indexService = indexService; this.index = index; this.fieldNames = fieldNames; this.fieldDataType = fieldDataType; } @Override public <FD extends AtomicFieldData, IFD extends IndexFieldData<FD>> FD load(final AtomicReaderContext context, final IFD indexFieldData) throws Exception { final Key key = new Key(this, context.reader().getCoreCacheKey()); //noinspection unchecked return (FD) cache.get(key, new Callable<AtomicFieldData>() { @Override public AtomicFieldData call() throws Exception { SegmentReaderUtils.registerCoreListener(context.reader(), IndexFieldCache.this); AtomicFieldData fieldData = indexFieldData.loadDirect(context); if (indexService != null) { ShardId shardId = ShardUtils.extractShardId(context.reader()); if (shardId != null) { IndexShard shard = indexService.shard(shardId.id()); if (shard != null) { key.listener = shard.fieldData(); } } } if (key.listener != null) { key.listener.onLoad(fieldNames, fieldDataType, fieldData); } return fieldData; } }); } @Override public void onClose(Object coreKey) { cache.invalidate(new Key(this, coreKey)); } @Override public void clear() { for (Key key : cache.asMap().keySet()) { if (key.indexCache.index.equals(index)) { cache.invalidate(key); } } } @Override public void clear(String fieldName) { for (Key key : cache.asMap().keySet()) { if (key.indexCache.index.equals(index)) { if (key.indexCache.fieldNames.fullName().equals(fieldName)) { cache.invalidate(key); } } } } @Override public void clear(Object coreCacheKey) { cache.invalidate(new Key(this, coreCacheKey)); } } public static class Key { public final IndexFieldCache indexCache; public final Object readerKey; @Nullable public IndexFieldDataCache.Listener listener; // optional stats listener long sizeInBytes = -1; // optional size in bytes (we keep it here in case the values are soft references) Key(IndexFieldCache indexCache, Object readerKey) { this.indexCache = indexCache; this.readerKey = readerKey; } @Override public boolean equals(Object o) { if (this == o) return true; Key key = (Key) o; if (!indexCache.equals(key.indexCache)) return false; if (!readerKey.equals(key.readerKey)) return false; return true; } @Override public int hashCode() { int result = indexCache.hashCode(); result = 31 * result + readerKey.hashCode(); return result; } } }
1no label
src_main_java_org_elasticsearch_indices_fielddata_cache_IndicesFieldDataCache.java
1,457
public static interface CustomAttributesProvider { Map<String, String> buildAttributes(); }
0true
src_main_java_org_elasticsearch_cluster_node_DiscoveryNodeService.java
1,390
public class MetaData implements Iterable<IndexMetaData> { public interface Custom { interface Factory<T extends Custom> { String type(); T readFrom(StreamInput in) throws IOException; void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; T fromXContent(XContentParser parser) throws IOException; void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; /** * Returns true if this custom metadata should be persisted as part of global cluster state */ boolean isPersistent(); } } public static Map<String, Custom.Factory> customFactories = new HashMap<String, Custom.Factory>(); static { // register non plugin custom metadata registerFactory(RepositoriesMetaData.TYPE, RepositoriesMetaData.FACTORY); registerFactory(SnapshotMetaData.TYPE, SnapshotMetaData.FACTORY); registerFactory(RestoreMetaData.TYPE, RestoreMetaData.FACTORY); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ public static void registerFactory(String type, Custom.Factory factory) { customFactories.put(type, factory); } @Nullable public static <T extends Custom> Custom.Factory<T> lookupFactory(String type) { return customFactories.get(type); } public static <T extends Custom> Custom.Factory<T> lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { Custom.Factory<T> factory = customFactories.get(type); if (factory == null) { throw new ElasticsearchIllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); } return factory; } public static final String SETTING_READ_ONLY = "cluster.blocks.read_only"; public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA); public static final MetaData EMPTY_META_DATA = builder().build(); public static final String GLOBAL_PERSISTENT_ONLY_PARAM = "global_persistent_only"; private final String uuid; private final long version; private final Settings transientSettings; private final Settings persistentSettings; private final Settings settings; private final ImmutableOpenMap<String, IndexMetaData> indices; private final ImmutableOpenMap<String, IndexTemplateMetaData> templates; private final ImmutableOpenMap<String, Custom> customs; private final transient int totalNumberOfShards; // Transient ? not serializable anyway? private final int numberOfShards; private final String[] allIndices; private final String[] allOpenIndices; private final String[] allClosedIndices; private final ImmutableOpenMap<String, ImmutableOpenMap<String, AliasMetaData>> aliases; private final ImmutableOpenMap<String, String[]> aliasAndIndexToIndexMap; @SuppressWarnings("unchecked") MetaData(String uuid, long version, Settings transientSettings, Settings persistentSettings, ImmutableOpenMap<String, IndexMetaData> indices, ImmutableOpenMap<String, IndexTemplateMetaData> templates, ImmutableOpenMap<String, Custom> customs) { this.uuid = uuid; this.version = version; this.transientSettings = transientSettings; this.persistentSettings = persistentSettings; this.settings = ImmutableSettings.settingsBuilder().put(persistentSettings).put(transientSettings).build(); this.indices = indices; this.customs = customs; this.templates = templates; int totalNumberOfShards = 0; int numberOfShards = 0; int numAliases = 0; for (ObjectCursor<IndexMetaData> cursor : indices.values()) { totalNumberOfShards += cursor.value.totalNumberOfShards(); numberOfShards += cursor.value.numberOfShards(); numAliases += cursor.value.aliases().size(); } this.totalNumberOfShards = totalNumberOfShards; this.numberOfShards = numberOfShards; // build all indices map List<String> allIndicesLst = Lists.newArrayList(); for (ObjectCursor<IndexMetaData> cursor : indices.values()) { allIndicesLst.add(cursor.value.index()); } allIndices = allIndicesLst.toArray(new String[allIndicesLst.size()]); int numIndices = allIndicesLst.size(); List<String> allOpenIndices = Lists.newArrayList(); List<String> allClosedIndices = Lists.newArrayList(); for (ObjectCursor<IndexMetaData> cursor : indices.values()) { IndexMetaData indexMetaData = cursor.value; if (indexMetaData.state() == IndexMetaData.State.OPEN) { allOpenIndices.add(indexMetaData.index()); } else if (indexMetaData.state() == IndexMetaData.State.CLOSE) { allClosedIndices.add(indexMetaData.index()); } } this.allOpenIndices = allOpenIndices.toArray(new String[allOpenIndices.size()]); this.allClosedIndices = allClosedIndices.toArray(new String[allClosedIndices.size()]); // build aliases map ImmutableOpenMap.Builder<String, Object> tmpAliases = ImmutableOpenMap.builder(numAliases); for (ObjectCursor<IndexMetaData> cursor : indices.values()) { IndexMetaData indexMetaData = cursor.value; String index = indexMetaData.index(); for (ObjectCursor<AliasMetaData> aliasCursor : indexMetaData.aliases().values()) { AliasMetaData aliasMd = aliasCursor.value; ImmutableOpenMap.Builder<String, AliasMetaData> indexAliasMap = (ImmutableOpenMap.Builder<String, AliasMetaData>) tmpAliases.get(aliasMd.alias()); if (indexAliasMap == null) { indexAliasMap = ImmutableOpenMap.builder(indices.size()); tmpAliases.put(aliasMd.alias(), indexAliasMap); } indexAliasMap.put(index, aliasMd); } } for (ObjectCursor<String> cursor : tmpAliases.keys()) { String alias = cursor.value; // if there is access to the raw values buffer of the map that the immutable maps wraps, then we don't need to use put, and just set array slots ImmutableOpenMap<String, AliasMetaData> map = ((ImmutableOpenMap.Builder) tmpAliases.get(alias)).cast().build(); tmpAliases.put(alias, map); } this.aliases = tmpAliases.<String, ImmutableOpenMap<String, AliasMetaData>>cast().build(); ImmutableOpenMap.Builder<String, Object> aliasAndIndexToIndexMap = ImmutableOpenMap.builder(numAliases + numIndices); for (ObjectCursor<IndexMetaData> cursor : indices.values()) { IndexMetaData indexMetaData = cursor.value; ObjectArrayList<String> indicesLst = (ObjectArrayList<String>) aliasAndIndexToIndexMap.get(indexMetaData.index()); if (indicesLst == null) { indicesLst = new ObjectArrayList<String>(); aliasAndIndexToIndexMap.put(indexMetaData.index(), indicesLst); } indicesLst.add(indexMetaData.index()); for (ObjectCursor<String> cursor1 : indexMetaData.aliases().keys()) { String alias = cursor1.value; indicesLst = (ObjectArrayList<String>) aliasAndIndexToIndexMap.get(alias); if (indicesLst == null) { indicesLst = new ObjectArrayList<String>(); aliasAndIndexToIndexMap.put(alias, indicesLst); } indicesLst.add(indexMetaData.index()); } } for (ObjectObjectCursor<String, Object> cursor : aliasAndIndexToIndexMap) { String[] indicesLst = ((ObjectArrayList<String>) cursor.value).toArray(String.class); aliasAndIndexToIndexMap.put(cursor.key, indicesLst); } this.aliasAndIndexToIndexMap = aliasAndIndexToIndexMap.<String, String[]>cast().build(); } public long version() { return this.version; } public String uuid() { return this.uuid; } /** * Returns the merges transient and persistent settings. */ public Settings settings() { return this.settings; } public Settings transientSettings() { return this.transientSettings; } public Settings persistentSettings() { return this.persistentSettings; } public ImmutableOpenMap<String, ImmutableOpenMap<String, AliasMetaData>> aliases() { return this.aliases; } public ImmutableOpenMap<String, ImmutableOpenMap<String, AliasMetaData>> getAliases() { return aliases(); } /** * Finds the specific index aliases that match with the specified aliases directly or partially via wildcards and * that point to the specified concrete indices or match partially with the indices via wildcards. * * @param aliases The names of the index aliases to find * @param concreteIndices The concrete indexes the index aliases must point to order to be returned. * @return the found index aliases grouped by index */ public ImmutableOpenMap<String, ImmutableList<AliasMetaData>> findAliases(final String[] aliases, String[] concreteIndices) { assert aliases != null; assert concreteIndices != null; if (concreteIndices.length == 0) { return ImmutableOpenMap.of(); } boolean matchAllAliases = matchAllAliases(aliases); ImmutableOpenMap.Builder<String, ImmutableList<AliasMetaData>> mapBuilder = ImmutableOpenMap.builder(); Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); List<AliasMetaData> filteredValues = Lists.newArrayList(); for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) { AliasMetaData value = cursor.value; if (matchAllAliases || Regex.simpleMatch(aliases, value.alias())) { filteredValues.add(value); } } if (!filteredValues.isEmpty()) { mapBuilder.put(index, ImmutableList.copyOf(filteredValues)); } } return mapBuilder.build(); } private boolean matchAllAliases(final String[] aliases) { for (String alias : aliases) { if (alias.equals("_all")) { return true; } } return aliases.length == 0; } /** * Checks if at least one of the specified aliases exists in the specified concrete indices. Wildcards are supported in the * alias names for partial matches. * * @param aliases The names of the index aliases to find * @param concreteIndices The concrete indexes the index aliases must point to order to be returned. * @return whether at least one of the specified aliases exists in one of the specified concrete indices. */ public boolean hasAliases(final String[] aliases, String[] concreteIndices) { assert aliases != null; assert concreteIndices != null; if (concreteIndices.length == 0) { return false; } Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); List<AliasMetaData> filteredValues = Lists.newArrayList(); for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) { AliasMetaData value = cursor.value; if (Regex.simpleMatch(aliases, value.alias())) { filteredValues.add(value); } } if (!filteredValues.isEmpty()) { return true; } } return false; } /* * Finds all mappings for types and concrete indices. Types are expanded to * include all types that match the glob patterns in the types array. Empty * types array, null or {"_all"} will be expanded to all types available for * the given indices. */ public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(String[] concreteIndices, final String[] types) { assert types != null; assert concreteIndices != null; if (concreteIndices.length == 0) { return ImmutableOpenMap.of(); } ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> indexMapBuilder = ImmutableOpenMap.builder(); Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings; if (isAllTypes(types)) { indexMapBuilder.put(index, indexMetaData.getMappings()); // No types specified means get it all } else { filteredMappings = ImmutableOpenMap.builder(); for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) { if (Regex.simpleMatch(types, cursor.key)) { filteredMappings.put(cursor.key, cursor.value); } } if (!filteredMappings.isEmpty()) { indexMapBuilder.put(index, filteredMappings.build()); } } } return indexMapBuilder.build(); } public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) { assert uncheckedWarmers != null; assert concreteIndices != null; if (concreteIndices.length == 0) { return ImmutableOpenMap.of(); } // special _all check to behave the same like not specifying anything for the warmers (not for the indices) final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers; ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder(); Iterable<String> intersection = HppcMaps.intersection(ObjectOpenHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); IndexWarmersMetaData indexWarmersMetaData = indexMetaData.custom(IndexWarmersMetaData.TYPE); if (indexWarmersMetaData == null || indexWarmersMetaData.entries().isEmpty()) { continue; } Collection<IndexWarmersMetaData.Entry> filteredWarmers = Collections2.filter(indexWarmersMetaData.entries(), new Predicate<IndexWarmersMetaData.Entry>() { @Override public boolean apply(IndexWarmersMetaData.Entry warmer) { if (warmers.length != 0 && types.length != 0) { return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types()); } else if (warmers.length != 0) { return Regex.simpleMatch(warmers, warmer.name()); } else if (types.length != 0) { return Regex.simpleMatch(types, warmer.types()); } else { return true; } } }); if (!filteredWarmers.isEmpty()) { mapBuilder.put(index, ImmutableList.copyOf(filteredWarmers)); } } return mapBuilder.build(); } /** * Returns all the concrete indices. */ public String[] concreteAllIndices() { return allIndices; } public String[] getConcreteAllIndices() { return concreteAllIndices(); } public String[] concreteAllOpenIndices() { return allOpenIndices; } public String[] getConcreteAllOpenIndices() { return allOpenIndices; } public String[] concreteAllClosedIndices() { return allClosedIndices; } public String[] getConcreteAllClosedIndices() { return allClosedIndices; } /** * Returns indexing routing for the given index. */ public String resolveIndexRouting(@Nullable String routing, String aliasOrIndex) { // Check if index is specified by an alias ImmutableOpenMap<String, AliasMetaData> indexAliases = aliases.get(aliasOrIndex); if (indexAliases == null || indexAliases.isEmpty()) { return routing; } if (indexAliases.size() > 1) { throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexAliases.keys().toArray(String.class)) + "], can't execute a single index op"); } AliasMetaData aliasMd = indexAliases.values().iterator().next().value; if (aliasMd.indexRouting() != null) { if (routing != null) { if (!routing.equals(aliasMd.indexRouting())) { throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); } } routing = aliasMd.indexRouting(); } if (routing != null) { if (routing.indexOf(',') != -1) { throw new ElasticsearchIllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + routing + "] that resolved to several routing values, rejecting operation"); } } return routing; } public Map<String, Set<String>> resolveSearchRouting(@Nullable String routing, String aliasOrIndex) { return resolveSearchRouting(routing, convertFromWildcards(new String[]{aliasOrIndex}, IndicesOptions.lenient())); } public Map<String, Set<String>> resolveSearchRouting(@Nullable String routing, String[] aliasesOrIndices) { if (isAllIndices(aliasesOrIndices)) { return resolveSearchRoutingAllIndices(routing); } aliasesOrIndices = convertFromWildcards(aliasesOrIndices, IndicesOptions.lenient()); if (aliasesOrIndices.length == 1) { return resolveSearchRoutingSingleValue(routing, aliasesOrIndices[0]); } Map<String, Set<String>> routings = null; Set<String> paramRouting = null; // List of indices that don't require any routing Set<String> norouting = new HashSet<String>(); if (routing != null) { paramRouting = Strings.splitStringByCommaToSet(routing); } for (String aliasOrIndex : aliasesOrIndices) { ImmutableOpenMap<String, AliasMetaData> indexToRoutingMap = aliases.get(aliasOrIndex); if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) { for (ObjectObjectCursor<String, AliasMetaData> indexRouting : indexToRoutingMap) { if (!norouting.contains(indexRouting.key)) { if (!indexRouting.value.searchRoutingValues().isEmpty()) { // Routing alias if (routings == null) { routings = newHashMap(); } Set<String> r = routings.get(indexRouting.key); if (r == null) { r = new HashSet<String>(); routings.put(indexRouting.key, r); } r.addAll(indexRouting.value.searchRoutingValues()); if (paramRouting != null) { r.retainAll(paramRouting); } if (r.isEmpty()) { routings.remove(indexRouting.key); } } else { // Non-routing alias if (!norouting.contains(indexRouting.key)) { norouting.add(indexRouting.key); if (paramRouting != null) { Set<String> r = new HashSet<String>(paramRouting); if (routings == null) { routings = newHashMap(); } routings.put(indexRouting.key, r); } else { if (routings != null) { routings.remove(indexRouting.key); } } } } } } } else { // Index if (!norouting.contains(aliasOrIndex)) { norouting.add(aliasOrIndex); if (paramRouting != null) { Set<String> r = new HashSet<String>(paramRouting); if (routings == null) { routings = newHashMap(); } routings.put(aliasOrIndex, r); } else { if (routings != null) { routings.remove(aliasOrIndex); } } } } } if (routings == null || routings.isEmpty()) { return null; } return routings; } private Map<String, Set<String>> resolveSearchRoutingSingleValue(@Nullable String routing, String aliasOrIndex) { Map<String, Set<String>> routings = null; Set<String> paramRouting = null; if (routing != null) { paramRouting = Strings.splitStringByCommaToSet(routing); } ImmutableOpenMap<String, AliasMetaData> indexToRoutingMap = aliases.get(aliasOrIndex); if (indexToRoutingMap != null && !indexToRoutingMap.isEmpty()) { // It's an alias for (ObjectObjectCursor<String, AliasMetaData> indexRouting : indexToRoutingMap) { if (!indexRouting.value.searchRoutingValues().isEmpty()) { // Routing alias Set<String> r = new HashSet<String>(indexRouting.value.searchRoutingValues()); if (paramRouting != null) { r.retainAll(paramRouting); } if (!r.isEmpty()) { if (routings == null) { routings = newHashMap(); } routings.put(indexRouting.key, r); } } else { // Non-routing alias if (paramRouting != null) { Set<String> r = new HashSet<String>(paramRouting); if (routings == null) { routings = newHashMap(); } routings.put(indexRouting.key, r); } } } } else { // It's an index if (paramRouting != null) { routings = ImmutableMap.of(aliasOrIndex, paramRouting); } } return routings; } /** * Sets the same routing for all indices */ private Map<String, Set<String>> resolveSearchRoutingAllIndices(String routing) { if (routing != null) { Set<String> r = Strings.splitStringByCommaToSet(routing); Map<String, Set<String>> routings = newHashMap(); String[] concreteIndices = concreteAllIndices(); for (String index : concreteIndices) { routings.put(index, r); } return routings; } return null; } /** * Translates the provided indices (possibly aliased) into actual indices. */ public String[] concreteIndices(String[] indices) throws IndexMissingException { return concreteIndices(indices, IndicesOptions.fromOptions(false, true, true, true)); } /** * Translates the provided indices (possibly aliased) into actual indices. */ public String[] concreteIndicesIgnoreMissing(String[] indices) { return concreteIndices(indices, IndicesOptions.fromOptions(true, true, true, false)); } /** * Translates the provided indices (possibly aliased) into actual indices. */ public String[] concreteIndices(String[] aliasesOrIndices, IndicesOptions indicesOptions) throws IndexMissingException { if (isAllIndices(aliasesOrIndices)) { String[] concreteIndices; if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { concreteIndices = concreteAllIndices(); } else if (indicesOptions.expandWildcardsOpen()) { concreteIndices = concreteAllOpenIndices(); } else if (indicesOptions.expandWildcardsClosed()) { concreteIndices = concreteAllClosedIndices(); } else { assert false : "Shouldn't end up here"; concreteIndices = Strings.EMPTY_ARRAY; } if (!indicesOptions.allowNoIndices() && concreteIndices.length == 0) { throw new IndexMissingException(new Index("_all")); } return concreteIndices; } aliasesOrIndices = convertFromWildcards(aliasesOrIndices, indicesOptions); // optimize for single element index (common case) if (aliasesOrIndices.length == 1) { String aliasOrIndex = aliasesOrIndices[0]; // if a direct index name, just return the array provided if (this.indices.containsKey(aliasOrIndex)) { return aliasesOrIndices; } String[] actualLst = aliasAndIndexToIndexMap.getOrDefault(aliasOrIndex, Strings.EMPTY_ARRAY); if (!indicesOptions.allowNoIndices() && actualLst == null) { throw new IndexMissingException(new Index(aliasOrIndex)); } else { return actualLst; } } // check if its a possible aliased index, if not, just return the // passed array boolean possiblyAliased = false; for (String index : aliasesOrIndices) { if (!this.indices.containsKey(index)) { possiblyAliased = true; break; } } if (!possiblyAliased) { return aliasesOrIndices; } Set<String> actualIndices = new HashSet<String>(); for (String index : aliasesOrIndices) { String[] actualLst = aliasAndIndexToIndexMap.get(index); if (actualLst == null) { if (!indicesOptions.ignoreUnavailable()) { throw new IndexMissingException(new Index(index)); } } else { for (String x : actualLst) { actualIndices.add(x); } } } if (!indicesOptions.allowNoIndices() && actualIndices.isEmpty()) { throw new IndexMissingException(new Index(Arrays.toString(aliasesOrIndices))); } return actualIndices.toArray(new String[actualIndices.size()]); } public String concreteIndex(String index) throws IndexMissingException, ElasticsearchIllegalArgumentException { // a quick check, if this is an actual index, if so, return it if (indices.containsKey(index)) { return index; } // not an actual index, fetch from an alias String[] lst = aliasAndIndexToIndexMap.get(index); if (lst == null) { throw new IndexMissingException(new Index(index)); } if (lst.length > 1) { throw new ElasticsearchIllegalArgumentException("Alias [" + index + "] has more than one indices associated with it [" + Arrays.toString(lst) + "], can't execute a single index op"); } return lst[0]; } /** * Converts a list of indices or aliases wildcards, and special +/- signs, into their respective full matches. It * won't convert only to indices, but also to aliases. For example, alias_* will expand to alias_1 and alias_2, not * to the respective indices those aliases point to. */ public String[] convertFromWildcards(String[] aliasesOrIndices, IndicesOptions indicesOptions) { if (aliasesOrIndices == null) { return null; } Set<String> result = null; for (int i = 0; i < aliasesOrIndices.length; i++) { String aliasOrIndex = aliasesOrIndices[i]; if (aliasAndIndexToIndexMap.containsKey(aliasOrIndex)) { if (result != null) { result.add(aliasOrIndex); } continue; } boolean add = true; if (aliasOrIndex.charAt(0) == '+') { // if its the first, add empty result set if (i == 0) { result = new HashSet<String>(); } add = true; aliasOrIndex = aliasOrIndex.substring(1); } else if (aliasOrIndex.charAt(0) == '-') { // if its the first, fill it with all the indices... if (i == 0) { String[] concreteIndices; if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { concreteIndices = concreteAllIndices(); } else if (indicesOptions.expandWildcardsOpen()) { concreteIndices = concreteAllOpenIndices(); } else if (indicesOptions.expandWildcardsClosed()) { concreteIndices = concreteAllClosedIndices(); } else { assert false : "Shouldn't end up here"; concreteIndices = Strings.EMPTY_ARRAY; } result = new HashSet<String>(Arrays.asList(concreteIndices)); } add = false; aliasOrIndex = aliasOrIndex.substring(1); } if (!Regex.isSimpleMatchPattern(aliasOrIndex)) { if (!indicesOptions.ignoreUnavailable() && !aliasAndIndexToIndexMap.containsKey(aliasOrIndex)) { throw new IndexMissingException(new Index(aliasOrIndex)); } if (result != null) { if (add) { result.add(aliasOrIndex); } else { result.remove(aliasOrIndex); } } continue; } if (result == null) { // add all the previous ones... result = new HashSet<String>(); result.addAll(Arrays.asList(aliasesOrIndices).subList(0, i)); } String[] indices; if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { indices = concreteAllIndices(); } else if (indicesOptions.expandWildcardsOpen()) { indices = concreteAllOpenIndices(); } else if (indicesOptions.expandWildcardsClosed()) { indices = concreteAllClosedIndices(); } else { assert false : "Shouldn't end up here"; indices = Strings.EMPTY_ARRAY; } boolean found = false; // iterating over all concrete indices and see if there is a wildcard match for (String index : indices) { if (Regex.simpleMatch(aliasOrIndex, index)) { found = true; if (add) { result.add(index); } else { result.remove(index); } } } // iterating over all aliases and see if there is a wildcard match for (ObjectCursor<String> cursor : aliases.keys()) { String alias = cursor.value; if (Regex.simpleMatch(aliasOrIndex, alias)) { found = true; if (add) { result.add(alias); } else { result.remove(alias); } } } if (!found && !indicesOptions.allowNoIndices()) { throw new IndexMissingException(new Index(aliasOrIndex)); } } if (result == null) { return aliasesOrIndices; } if (result.isEmpty() && !indicesOptions.allowNoIndices()) { throw new IndexMissingException(new Index(Arrays.toString(aliasesOrIndices))); } return result.toArray(new String[result.size()]); } public boolean hasIndex(String index) { return indices.containsKey(index); } public boolean hasConcreteIndex(String index) { return aliasAndIndexToIndexMap.containsKey(index); } public IndexMetaData index(String index) { return indices.get(index); } public ImmutableOpenMap<String, IndexMetaData> indices() { return this.indices; } public ImmutableOpenMap<String, IndexMetaData> getIndices() { return indices(); } public ImmutableOpenMap<String, IndexTemplateMetaData> templates() { return this.templates; } public ImmutableOpenMap<String, IndexTemplateMetaData> getTemplates() { return this.templates; } public ImmutableOpenMap<String, Custom> customs() { return this.customs; } public ImmutableOpenMap<String, Custom> getCustoms() { return this.customs; } public <T extends Custom> T custom(String type) { return (T) customs.get(type); } public int totalNumberOfShards() { return this.totalNumberOfShards; } public int getTotalNumberOfShards() { return totalNumberOfShards(); } public int numberOfShards() { return this.numberOfShards; } public int getNumberOfShards() { return numberOfShards(); } /** * Iterates through the list of indices and selects the effective list of filtering aliases for the * given index. * <p/> * <p>Only aliases with filters are returned. If the indices list contains a non-filtering reference to * the index itself - null is returned. Returns <tt>null</tt> if no filtering is required.</p> */ public String[] filteringAliases(String index, String... indicesOrAliases) { // expand the aliases wildcard indicesOrAliases = convertFromWildcards(indicesOrAliases, IndicesOptions.lenient()); if (isAllIndices(indicesOrAliases)) { return null; } // optimize for the most common single index/alias scenario if (indicesOrAliases.length == 1) { String alias = indicesOrAliases[0]; IndexMetaData indexMetaData = this.indices.get(index); if (indexMetaData == null) { // Shouldn't happen throw new IndexMissingException(new Index(index)); } AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias); boolean filteringRequired = aliasMetaData != null && aliasMetaData.filteringRequired(); if (!filteringRequired) { return null; } return new String[]{alias}; } List<String> filteringAliases = null; for (String alias : indicesOrAliases) { if (alias.equals(index)) { return null; } IndexMetaData indexMetaData = this.indices.get(index); if (indexMetaData == null) { // Shouldn't happen throw new IndexMissingException(new Index(index)); } AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias); // Check that this is an alias for the current index // Otherwise - skip it if (aliasMetaData != null) { boolean filteringRequired = aliasMetaData.filteringRequired(); if (filteringRequired) { // If filtering required - add it to the list of filters if (filteringAliases == null) { filteringAliases = newArrayList(); } filteringAliases.add(alias); } else { // If not, we have a non filtering alias for this index - no filtering needed return null; } } } if (filteringAliases == null) { return null; } return filteringAliases.toArray(new String[filteringAliases.size()]); } /** * Identifies whether the array containing index names given as argument refers to all indices * The empty or null array identifies all indices * * @param aliasesOrIndices the array containing index names * @return true if the provided array maps to all indices, false otherwise */ public boolean isAllIndices(String[] aliasesOrIndices) { return aliasesOrIndices == null || aliasesOrIndices.length == 0 || isExplicitAllPattern(aliasesOrIndices); } /** * Identifies whether the array containing type names given as argument refers to all types * The empty or null array identifies all types * * @param types the array containing index names * @return true if the provided array maps to all indices, false otherwise */ public boolean isAllTypes(String[] types) { return types == null || types.length == 0 || isExplicitAllPattern(types); } /** * Identifies whether the array containing index names given as argument explicitly refers to all indices * The empty or null array doesn't explicitly map to all indices * * @param aliasesOrIndices the array containing index names * @return true if the provided array explicitly maps to all indices, false otherwise */ public boolean isExplicitAllPattern(String[] aliasesOrIndices) { return aliasesOrIndices != null && aliasesOrIndices.length == 1 && "_all".equals(aliasesOrIndices[0]); } /** * Identifies whether the first argument (an array containing index names) is a pattern that matches all indices * * @param indicesOrAliases the array containing index names * @param concreteIndices array containing the concrete indices that the first argument refers to * @return true if the first argument is a pattern that maps to all available indices, false otherwise */ public boolean isPatternMatchingAllIndices(String[] indicesOrAliases, String[] concreteIndices) { // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure if (concreteIndices.length == concreteAllIndices().length && indicesOrAliases.length > 0) { //we might have something like /-test1,+test1 that would identify all indices //or something like /-test1 with test1 index missing and IndicesOptions.lenient() if (indicesOrAliases[0].charAt(0) == '-') { return true; } //otherwise we check if there's any simple regex for (String indexOrAlias : indicesOrAliases) { if (Regex.isSimpleMatchPattern(indexOrAlias)) { return true; } } } return false; } /** * @param concreteIndex The concrete index to check if routing is required * @param type The type to check if routing is required * @return Whether routing is required according to the mapping for the specified index and type */ public boolean routingRequired(String concreteIndex, String type) { IndexMetaData indexMetaData = indices.get(concreteIndex); if (indexMetaData != null) { MappingMetaData mappingMetaData = indexMetaData.getMappings().get(type); if (mappingMetaData != null) { return mappingMetaData.routing().required(); } } return false; } @Override public UnmodifiableIterator<IndexMetaData> iterator() { return indices.valuesIt(); } public static boolean isGlobalStateEquals(MetaData metaData1, MetaData metaData2) { if (!metaData1.persistentSettings.equals(metaData2.persistentSettings)) { return false; } if (!metaData1.templates.equals(metaData2.templates())) { return false; } // Check if any persistent metadata needs to be saved int customCount1 = 0; for (ObjectObjectCursor<String, Custom> cursor : metaData1.customs) { if (customFactories.get(cursor.key).isPersistent()) { if (!cursor.equals(metaData2.custom(cursor.key))) return false; customCount1++; } } int customCount2 = 0; for (ObjectObjectCursor<String, Custom> cursor : metaData2.customs) { if (customFactories.get(cursor.key).isPersistent()) { customCount2++; } } if (customCount1 != customCount2) return false; return true; } public static Builder builder() { return new Builder(); } public static Builder builder(MetaData metaData) { return new Builder(metaData); } public static class Builder { private String uuid; private long version; private Settings transientSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; private Settings persistentSettings = ImmutableSettings.Builder.EMPTY_SETTINGS; private final ImmutableOpenMap.Builder<String, IndexMetaData> indices; private final ImmutableOpenMap.Builder<String, IndexTemplateMetaData> templates; private final ImmutableOpenMap.Builder<String, Custom> customs; public Builder() { uuid = "_na_"; indices = ImmutableOpenMap.builder(); templates = ImmutableOpenMap.builder(); customs = ImmutableOpenMap.builder(); } public Builder(MetaData metaData) { this.uuid = metaData.uuid; this.transientSettings = metaData.transientSettings; this.persistentSettings = metaData.persistentSettings; this.version = metaData.version; this.indices = ImmutableOpenMap.builder(metaData.indices); this.templates = ImmutableOpenMap.builder(metaData.templates); this.customs = ImmutableOpenMap.builder(metaData.customs); } public Builder put(IndexMetaData.Builder indexMetaDataBuilder) { // we know its a new one, increment the version and store indexMetaDataBuilder.version(indexMetaDataBuilder.version() + 1); IndexMetaData indexMetaData = indexMetaDataBuilder.build(); indices.put(indexMetaData.index(), indexMetaData); return this; } public Builder put(IndexMetaData indexMetaData, boolean incrementVersion) { if (indices.get(indexMetaData.index()) == indexMetaData) { return this; } // if we put a new index metadata, increment its version if (incrementVersion) { indexMetaData = IndexMetaData.builder(indexMetaData).version(indexMetaData.version() + 1).build(); } indices.put(indexMetaData.index(), indexMetaData); return this; } public IndexMetaData get(String index) { return indices.get(index); } public Builder remove(String index) { indices.remove(index); return this; } public Builder removeAllIndices() { indices.clear(); return this; } public Builder put(IndexTemplateMetaData.Builder template) { return put(template.build()); } public Builder put(IndexTemplateMetaData template) { templates.put(template.name(), template); return this; } public Builder removeTemplate(String templateName) { templates.remove(templateName); return this; } public Custom getCustom(String type) { return customs.get(type); } public Builder putCustom(String type, Custom custom) { customs.put(type, custom); return this; } public Builder removeCustom(String type) { customs.remove(type); return this; } public Builder updateSettings(Settings settings, String... indices) { if (indices == null || indices.length == 0) { indices = this.indices.keys().toArray(String.class); } for (String index : indices) { IndexMetaData indexMetaData = this.indices.get(index); if (indexMetaData == null) { throw new IndexMissingException(new Index(index)); } put(IndexMetaData.builder(indexMetaData) .settings(settingsBuilder().put(indexMetaData.settings()).put(settings))); } return this; } public Builder updateNumberOfReplicas(int numberOfReplicas, String... indices) { if (indices == null || indices.length == 0) { indices = this.indices.keys().toArray(String.class); } for (String index : indices) { IndexMetaData indexMetaData = this.indices.get(index); if (indexMetaData == null) { throw new IndexMissingException(new Index(index)); } put(IndexMetaData.builder(indexMetaData).numberOfReplicas(numberOfReplicas)); } return this; } public Settings transientSettings() { return this.transientSettings; } public Builder transientSettings(Settings settings) { this.transientSettings = settings; return this; } public Settings persistentSettings() { return this.persistentSettings; } public Builder persistentSettings(Settings settings) { this.persistentSettings = settings; return this; } public Builder version(long version) { this.version = version; return this; } public Builder generateUuidIfNeeded() { if (uuid.equals("_na_")) { uuid = Strings.randomBase64UUID(); } return this; } public MetaData build() { return new MetaData(uuid, version, transientSettings, persistentSettings, indices.build(), templates.build(), customs.build()); } public static String toXContent(MetaData metaData) throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); builder.startObject(); toXContent(metaData, builder, ToXContent.EMPTY_PARAMS); builder.endObject(); return builder.string(); } public static void toXContent(MetaData metaData, XContentBuilder builder, ToXContent.Params params) throws IOException { boolean globalPersistentOnly = params.paramAsBoolean(GLOBAL_PERSISTENT_ONLY_PARAM, false); builder.startObject("meta-data"); builder.field("version", metaData.version()); builder.field("uuid", metaData.uuid); if (!metaData.persistentSettings().getAsMap().isEmpty()) { builder.startObject("settings"); for (Map.Entry<String, String> entry : metaData.persistentSettings().getAsMap().entrySet()) { builder.field(entry.getKey(), entry.getValue()); } builder.endObject(); } if (!globalPersistentOnly && !metaData.transientSettings().getAsMap().isEmpty()) { builder.startObject("transient_settings"); for (Map.Entry<String, String> entry : metaData.transientSettings().getAsMap().entrySet()) { builder.field(entry.getKey(), entry.getValue()); } builder.endObject(); } builder.startObject("templates"); for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) { IndexTemplateMetaData.Builder.toXContent(cursor.value, builder, params); } builder.endObject(); if (!globalPersistentOnly && !metaData.indices().isEmpty()) { builder.startObject("indices"); for (IndexMetaData indexMetaData : metaData) { IndexMetaData.Builder.toXContent(indexMetaData, builder, params); } builder.endObject(); } for (ObjectObjectCursor<String, Custom> cursor : metaData.customs()) { Custom.Factory factory = lookupFactorySafe(cursor.key); if (!globalPersistentOnly || factory.isPersistent()) { builder.startObject(cursor.key); factory.toXContent(cursor.value, builder, params); builder.endObject(); } } builder.endObject(); } public static MetaData fromXContent(XContentParser parser) throws IOException { Builder builder = new Builder(); // we might get here after the meta-data element, or on a fresh parser XContentParser.Token token = parser.currentToken(); String currentFieldName = parser.currentName(); if (!"meta-data".equals(currentFieldName)) { token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { // move to the field name (meta-data) token = parser.nextToken(); // move to the next object token = parser.nextToken(); } currentFieldName = parser.currentName(); if (token == null) { // no data... return builder.build(); } } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { if ("settings".equals(currentFieldName)) { builder.persistentSettings(ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build()); } else if ("indices".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { builder.put(IndexMetaData.Builder.fromXContent(parser), false); } } else if ("templates".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { builder.put(IndexTemplateMetaData.Builder.fromXContent(parser)); } } else { // check if its a custom index metadata Custom.Factory<Custom> factory = lookupFactory(currentFieldName); if (factory == null) { //TODO warn parser.skipChildren(); } else { builder.putCustom(factory.type(), factory.fromXContent(parser)); } } } else if (token.isValue()) { if ("version".equals(currentFieldName)) { builder.version = parser.longValue(); } else if ("uuid".equals(currentFieldName)) { builder.uuid = parser.text(); } } } return builder.build(); } public static MetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); builder.uuid = in.readString(); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); int size = in.readVInt(); for (int i = 0; i < size; i++) { builder.put(IndexMetaData.Builder.readFrom(in), false); } size = in.readVInt(); for (int i = 0; i < size; i++) { builder.put(IndexTemplateMetaData.Builder.readFrom(in)); } int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { String type = in.readString(); Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); builder.putCustom(type, customIndexMetaData); } return builder.build(); } public static void writeTo(MetaData metaData, StreamOutput out) throws IOException { out.writeLong(metaData.version); out.writeString(metaData.uuid); writeSettingsToStream(metaData.transientSettings(), out); writeSettingsToStream(metaData.persistentSettings(), out); out.writeVInt(metaData.indices.size()); for (IndexMetaData indexMetaData : metaData) { IndexMetaData.Builder.writeTo(indexMetaData, out); } out.writeVInt(metaData.templates.size()); for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates.values()) { IndexTemplateMetaData.Builder.writeTo(cursor.value, out); } out.writeVInt(metaData.customs().size()); for (ObjectObjectCursor<String, Custom> cursor : metaData.customs()) { out.writeString(cursor.key); lookupFactorySafe(cursor.key).writeTo(cursor.value, out); } } } }
1no label
src_main_java_org_elasticsearch_cluster_metadata_MetaData.java
549
public abstract class AbstractClientTxnCollectionProxy<E> extends ClientTxnProxy { protected AbstractClientTxnCollectionProxy(String name, TransactionContextProxy proxy) { super(name, proxy); } void onDestroy() { } public String getName() { return (String) getId(); } protected void throwExceptionIfNull(Object o) { if (o == null) { throw new NullPointerException("Object is null"); } } }
0true
hazelcast-client_src_main_java_com_hazelcast_client_txn_proxy_AbstractClientTxnCollectionProxy.java
788
public class MultiPercolateResponse extends ActionResponse implements Iterable<MultiPercolateResponse.Item>, ToXContent { private Item[] items; public MultiPercolateResponse(Item[] items) { this.items = items; } public MultiPercolateResponse() { this.items = new Item[0]; } @Override public Iterator<Item> iterator() { return Iterators.forArray(items); } public Item[] items() { return items; } public Item[] getItems() { return items; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.startArray(Fields.RESPONSES); for (MultiPercolateResponse.Item item : items) { if (item.isFailure()) { builder.startObject(); builder.field(Fields.ERROR, item.getErrorMessage()); builder.endObject(); } else { item.getResponse().toXContent(builder, params); } } builder.endArray(); builder.endObject(); return builder; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(items.length); for (Item item : items) { item.writeTo(out); } } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); items = new Item[size]; for (int i = 0; i < items.length; i++) { items[i] = new Item(); items[i].readFrom(in); } } public static class Item implements Streamable { private PercolateResponse response; private String errorMessage; public Item(PercolateResponse response) { this.response = response; } public Item(String errorMessage) { this.errorMessage = errorMessage; } public Item() { } public PercolateResponse response() { return response; } public String errorMessage() { return errorMessage; } public PercolateResponse getResponse() { return response; } public String getErrorMessage() { return errorMessage; } public boolean isFailure() { return errorMessage != null; } @Override public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { response = new PercolateResponse(); response.readFrom(in); } else { errorMessage = in.readString(); } } @Override public void writeTo(StreamOutput out) throws IOException { if (response != null) { out.writeBoolean(true); response.writeTo(out); } else { out.writeBoolean(false); out.writeString(errorMessage); } } } static final class Fields { static final XContentBuilderString RESPONSES = new XContentBuilderString("responses"); static final XContentBuilderString ERROR = new XContentBuilderString("error"); } }
0true
src_main_java_org_elasticsearch_action_percolate_MultiPercolateResponse.java
1,437
public class RepositoriesMetaData implements MetaData.Custom { public static final String TYPE = "repositories"; public static final Factory FACTORY = new Factory(); private final ImmutableList<RepositoryMetaData> repositories; /** * Constructs new repository metadata * * @param repositories list of repositories */ public RepositoriesMetaData(RepositoryMetaData... repositories) { this.repositories = ImmutableList.copyOf(repositories); } /** * Returns list of currently registered repositories * * @return list of repositories */ public ImmutableList<RepositoryMetaData> repositories() { return this.repositories; } /** * Returns a repository with a given name or null if such repository doesn't exist * * @param name name of repository * @return repository metadata */ public RepositoryMetaData repository(String name) { for (RepositoryMetaData repository : repositories) { if (name.equals(repository.name())) { return repository; } } return null; } /** * Repository metadata factory */ public static class Factory implements MetaData.Custom.Factory<RepositoriesMetaData> { /** * {@inheritDoc} */ @Override public String type() { return TYPE; } /** * {@inheritDoc} */ @Override public RepositoriesMetaData readFrom(StreamInput in) throws IOException { RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; for (int i = 0; i < repository.length; i++) { repository[i] = RepositoryMetaData.readFrom(in); } return new RepositoriesMetaData(repository); } /** * {@inheritDoc} */ @Override public void writeTo(RepositoriesMetaData repositories, StreamOutput out) throws IOException { out.writeVInt(repositories.repositories().size()); for (RepositoryMetaData repository : repositories.repositories()) { repository.writeTo(out); } } /** * {@inheritDoc} */ @Override public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { XContentParser.Token token; List<RepositoryMetaData> repository = new ArrayList<RepositoryMetaData>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String name = parser.currentName(); if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); } String type = null; Settings settings = ImmutableSettings.EMPTY; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String currentFieldName = parser.currentName(); if ("type".equals(currentFieldName)) { if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); } type = parser.text(); } else if ("settings".equals(currentFieldName)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); } settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); } else { throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); } } else { throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); } } if (type == null) { throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); } repository.add(new RepositoryMetaData(name, type, settings)); } else { throw new ElasticsearchParseException("failed to parse repositories"); } } return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); } /** * {@inheritDoc} */ @Override public void toXContent(RepositoriesMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { for (RepositoryMetaData repository : customIndexMetaData.repositories()) { toXContent(repository, builder, params); } } /** * Serializes information about a single repository * * @param repository repository metadata * @param builder XContent builder * @param params serialization parameters * @throws IOException */ public void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); builder.field("type", repository.type()); builder.startObject("settings"); for (Map.Entry<String, String> settingEntry : repository.settings().getAsMap().entrySet()) { builder.field(settingEntry.getKey(), settingEntry.getValue()); } builder.endObject(); builder.endObject(); } /** * {@inheritDoc} */ @Override public boolean isPersistent() { return true; } } }
0true
src_main_java_org_elasticsearch_cluster_metadata_RepositoriesMetaData.java
511
@RunWith(HazelcastSerialClassRunner.class) @Category(NightlyTest.class) public class MapUpdateStressTest extends StressTestSupport { public static final int CLIENT_THREAD_COUNT = 5; public static final int MAP_SIZE = 100 * 1000; private HazelcastInstance client; private IMap<Integer, Integer> map; private StressThread[] stressThreads; @Before public void setUp() { super.setUp(); ClientConfig clientConfig = new ClientConfig(); clientConfig.setRedoOperation(true); client = HazelcastClient.newHazelcastClient(clientConfig); map = client.getMap("map"); stressThreads = new StressThread[CLIENT_THREAD_COUNT]; for (int k = 0; k < stressThreads.length; k++) { stressThreads[k] = new StressThread(); stressThreads[k].start(); } } @After public void tearDown() { super.tearDown(); if (client != null) { client.shutdown(); } } //@Test public void testChangingCluster() { test(true); } @Test public void testFixedCluster() { test(false); } public void test(boolean clusterChangeEnabled) { setClusterChangeEnabled(clusterChangeEnabled); fillMap(); startAndWaitForTestCompletion(); joinAll(stressThreads); assertNoUpdateFailures(); } private void assertNoUpdateFailures() { int[] increments = new int[MAP_SIZE]; for (StressThread t : stressThreads) { t.addIncrements(increments); } Set<Integer> failedKeys = new HashSet<Integer>(); for (int k = 0; k < MAP_SIZE; k++) { int expectedValue = increments[k]; int foundValue = map.get(k); if (expectedValue != foundValue) { failedKeys.add(k); } } if (failedKeys.isEmpty()) { return; } int index = 1; for (Integer key : failedKeys) { System.err.println("Failed write: " + index + " found:" + map.get(key) + " expected:" + increments[key]); index++; } fail("There are failed writes, number of failures:" + failedKeys.size()); } private void fillMap() { System.out.println("=================================================================="); System.out.println("Inserting data in map"); System.out.println("=================================================================="); for (int k = 0; k < MAP_SIZE; k++) { map.put(k, 0); if (k % 10000 == 0) { System.out.println("Inserted data: " + k); } } System.out.println("=================================================================="); System.out.println("Completed with inserting data in map"); System.out.println("=================================================================="); } public class StressThread extends TestThread { private final int[] increments = new int[MAP_SIZE]; @Override public void doRun() throws Exception { while (!isStopped()) { int key = random.nextInt(MAP_SIZE); int increment = random.nextInt(10); increments[key] += increment; for (; ; ) { int oldValue = map.get(key); if (map.replace(key, oldValue, oldValue + increment)) { break; } } } } public void addIncrements(int[] increments) { for (int k = 0; k < increments.length; k++) { increments[k] += this.increments[k]; } } } }
0true
hazelcast-client_src_test_java_com_hazelcast_client_stress_MapUpdateStressTest.java
4,295
public class IndicesModule extends AbstractModule implements SpawnModules { private final Settings settings; public IndicesModule(Settings settings) { this.settings = settings; } @Override public Iterable<? extends Module> spawnModules() { return ImmutableList.of(new IndicesQueriesModule(), new IndicesAnalysisModule()); } @Override protected void configure() { bind(IndicesLifecycle.class).to(InternalIndicesLifecycle.class).asEagerSingleton(); bind(IndicesService.class).to(InternalIndicesService.class).asEagerSingleton(); bind(RecoverySettings.class).asEagerSingleton(); bind(RecoveryTarget.class).asEagerSingleton(); bind(RecoverySource.class).asEagerSingleton(); bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); bind(IndexingMemoryController.class).asEagerSingleton(); bind(IndicesFilterCache.class).asEagerSingleton(); bind(IndicesFieldDataCache.class).asEagerSingleton(); bind(IndicesTermsFilterCache.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(IndicesTTLService.class).asEagerSingleton(); bind(IndicesWarmer.class).to(InternalIndicesWarmer.class).asEagerSingleton(); bind(UpdateHelper.class).asEagerSingleton(); bind(CircuitBreakerService.class).to(InternalCircuitBreakerService.class).asEagerSingleton(); } }
1no label
src_main_java_org_elasticsearch_indices_IndicesModule.java
1,154
class Searcher extends Thread { final int id; long counter = 0; long max = searcherIterations; Searcher(int id) { super("Searcher" + id); this.id = id; } @Override public void run() { try { barrier1.await(); barrier2.await(); for (; counter < max; counter++) { Client client = client(counter); QueryBuilder query = termQuery("num", counter % fieldNumLimit); query = constantScoreQuery(queryFilter(query)); SearchResponse search = client.search(searchRequest() .source(searchSource().query(query))) .actionGet(); // System.out.println("Got search response, hits [" + search.hits().totalHits() + "]"); } } catch (Exception e) { System.err.println("Failed to search:"); e.printStackTrace(); } finally { latch.countDown(); } } }
0true
src_test_java_org_elasticsearch_benchmark_stress_NodesStressTest.java
179
public class OByteBufferUtils { public static final int SIZE_OF_SHORT = 2; public static final int SIZE_OF_INT = 4; public static final int SIZE_OF_LONG = 8; private static final int SIZE_OF_BYTE_IN_BITS = 8; private static final int MASK = 0x000000FF; /** * Merge short value from two byte buffer. First byte of short will be extracted from first byte buffer and second from second * one. * * @param buffer * to read first part of value * @param buffer1 * to read second part of value * @return merged value */ public static short mergeShortFromBuffers(final ByteBuffer buffer, final ByteBuffer buffer1) { short result = 0; result = (short) (result | (buffer.get() & MASK)); result = (short) (result << SIZE_OF_BYTE_IN_BITS); result = (short) (result | (buffer1.get() & MASK)); return result; } /** * Merge int value from two byte buffer. First bytes of int will be extracted from first byte buffer and second from second one. * How many bytes will be read from first buffer determines based on <code>buffer.remaining()</code> value * * @param buffer * to read first part of value * @param buffer1 * to read second part of value * @return merged value */ public static int mergeIntFromBuffers(final ByteBuffer buffer, final ByteBuffer buffer1) { int result = 0; final int remaining = buffer.remaining(); for (int i = 0; i < remaining; ++i) { result = result | (buffer.get() & MASK); result = result << SIZE_OF_BYTE_IN_BITS; } for (int i = 0; i < SIZE_OF_INT - remaining - 1; ++i) { result = result | (buffer1.get() & MASK); result = result << SIZE_OF_BYTE_IN_BITS; } result = result | (buffer1.get() & MASK); return result; } /** * Merge long value from two byte buffer. First bytes of long will be extracted from first byte buffer and second from second one. * How many bytes will be read from first buffer determines based on <code>buffer.remaining()</code> value * * @param buffer * to read first part of value * @param buffer1 * to read second part of value * @return merged value */ public static long mergeLongFromBuffers(final ByteBuffer buffer, final ByteBuffer buffer1) { long result = 0; final int remaining = buffer.remaining(); for (int i = 0; i < remaining; ++i) { result = result | (MASK & buffer.get()); result = result << SIZE_OF_BYTE_IN_BITS; } for (int i = 0; i < SIZE_OF_LONG - remaining - 1; ++i) { result = result | (MASK & buffer1.get()); result = result << SIZE_OF_BYTE_IN_BITS; } result = result | (MASK & buffer1.get()); return result; } /** * Split short value into two byte buffer. First byte of short will be written to first byte buffer and second to second one. * * @param buffer * to write first part of value * @param buffer1 * to write second part of value */ public static void splitShortToBuffers(final ByteBuffer buffer, final ByteBuffer buffer1, final short iValue) { buffer.put((byte) (MASK & (iValue >>> SIZE_OF_BYTE_IN_BITS))); buffer1.put((byte) (MASK & iValue)); } /** * Split int value into two byte buffer. First byte of int will be written to first byte buffer and second to second one. How many * bytes will be written to first buffer determines based on <code>buffer.remaining()</code> value * * @param buffer * to write first part of value * @param buffer1 * to write second part of value */ public static void splitIntToBuffers(final ByteBuffer buffer, final ByteBuffer buffer1, final int iValue) { final int remaining = buffer.remaining(); int i; for (i = 0; i < remaining; ++i) { buffer.put((byte) (MASK & (iValue >>> SIZE_OF_BYTE_IN_BITS * (SIZE_OF_INT - i - 1)))); } for (int j = 0; j < SIZE_OF_INT - remaining; ++j) { buffer1.put((byte) (MASK & (iValue >>> SIZE_OF_BYTE_IN_BITS * (SIZE_OF_INT - i - j - 1)))); } } /** * Split long value into two byte buffer. First byte of long will be written to first byte buffer and second to second one. How * many bytes will be written to first buffer determines based on <code>buffer.remaining()</code> value * * @param buffer * to write first part of value * @param buffer1 * to write second part of value */ public static void splitLongToBuffers(final ByteBuffer buffer, final ByteBuffer buffer1, final long iValue) { final int remaining = buffer.remaining(); int i; for (i = 0; i < remaining; ++i) { buffer.put((byte) (iValue >> SIZE_OF_BYTE_IN_BITS * (SIZE_OF_LONG - i - 1))); } for (int j = 0; j < SIZE_OF_LONG - remaining; ++j) { buffer1.put((byte) (iValue >> SIZE_OF_BYTE_IN_BITS * (SIZE_OF_LONG - i - j - 1))); } } }
0true
commons_src_main_java_com_orientechnologies_common_util_OByteBufferUtils.java
519
public abstract class BaseTransactionRequest extends CallableClientRequest { protected String txnId; protected long clientThreadId; public BaseTransactionRequest() { } protected abstract Object innerCall() throws Exception; @Override public final Object call() throws Exception { ThreadUtil.setThreadId(clientThreadId); try { return innerCall(); } finally { ThreadUtil.removeThreadId(); } } public void setTxnId(String txnId) { this.txnId = txnId; } public void setClientThreadId(long clientThreadId) { this.clientThreadId = clientThreadId; } @Override public void write(PortableWriter writer) throws IOException { writer.writeUTF("tId", txnId); writer.writeLong("cti", clientThreadId); } @Override public void read(PortableReader reader) throws IOException { txnId = reader.readUTF("tId"); clientThreadId = reader.readLong("cti"); } }
0true
hazelcast_src_main_java_com_hazelcast_client_txn_BaseTransactionRequest.java
2,528
public class XContentMapValues { /** * Extracts raw values (string, int, and so on) based on the path provided returning all of them * as a single list. */ public static List<Object> extractRawValues(String path, Map<String, Object> map) { List<Object> values = Lists.newArrayList(); String[] pathElements = Strings.splitStringToArray(path, '.'); if (pathElements.length == 0) { return values; } extractRawValues(values, map, pathElements, 0); return values; } @SuppressWarnings({"unchecked"}) private static void extractRawValues(List values, Map<String, Object> part, String[] pathElements, int index) { if (index == pathElements.length) { return; } String key = pathElements[index]; Object currentValue = part.get(key); int nextIndex = index + 1; while (currentValue == null && nextIndex != pathElements.length) { key += "." + pathElements[nextIndex]; currentValue = part.get(key); nextIndex++; } if (currentValue == null) { return; } if (currentValue instanceof Map) { extractRawValues(values, (Map<String, Object>) currentValue, pathElements, nextIndex); } else if (currentValue instanceof List) { extractRawValues(values, (List) currentValue, pathElements, nextIndex); } else { values.add(currentValue); } } @SuppressWarnings({"unchecked"}) private static void extractRawValues(List values, List<Object> part, String[] pathElements, int index) { for (Object value : part) { if (value == null) { continue; } if (value instanceof Map) { extractRawValues(values, (Map<String, Object>) value, pathElements, index); } else if (value instanceof List) { extractRawValues(values, (List) value, pathElements, index); } else { values.add(value); } } } public static Object extractValue(String path, Map<String, Object> map) { String[] pathElements = Strings.splitStringToArray(path, '.'); if (pathElements.length == 0) { return null; } return extractValue(pathElements, 0, map); } @SuppressWarnings({"unchecked"}) private static Object extractValue(String[] pathElements, int index, Object currentValue) { if (index == pathElements.length) { return currentValue; } if (currentValue == null) { return null; } if (currentValue instanceof Map) { Map map = (Map) currentValue; String key = pathElements[index]; Object mapValue = map.get(key); int nextIndex = index + 1; while (mapValue == null && nextIndex != pathElements.length) { key += "." + pathElements[nextIndex]; mapValue = map.get(key); nextIndex++; } return extractValue(pathElements, nextIndex, mapValue); } if (currentValue instanceof List) { List valueList = (List) currentValue; List newList = new ArrayList(valueList.size()); for (Object o : valueList) { Object listValue = extractValue(pathElements, index, o); if (listValue != null) { newList.add(listValue); } } return newList; } return null; } public static Map<String, Object> filter(Map<String, Object> map, String[] includes, String[] excludes) { Map<String, Object> result = Maps.newHashMap(); filter(map, result, includes == null ? Strings.EMPTY_ARRAY : includes, excludes == null ? Strings.EMPTY_ARRAY : excludes, new StringBuilder()); return result; } private static void filter(Map<String, Object> map, Map<String, Object> into, String[] includes, String[] excludes, StringBuilder sb) { if (includes.length == 0 && excludes.length == 0) { into.putAll(map); return; } for (Map.Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); int mark = sb.length(); if (sb.length() > 0) { sb.append('.'); } sb.append(key); String path = sb.toString(); if (Regex.simpleMatch(excludes, path)) { sb.setLength(mark); continue; } boolean exactIncludeMatch = false; // true if the current position was specifically mentioned boolean pathIsPrefixOfAnInclude = false; // true if potentially a sub scope can be included if (includes.length == 0) { // implied match anything exactIncludeMatch = true; } else { for (String include : includes) { // check for prefix matches as well to see if we need to zero in, something like: obj1.arr1.* or *.field // note, this does not work well with middle matches, like obj1.*.obj3 if (include.charAt(0) == '*') { if (Regex.simpleMatch(include, path)) { exactIncludeMatch = true; break; } pathIsPrefixOfAnInclude = true; break; } if (include.startsWith(path)) { if (include.length() == path.length()) { exactIncludeMatch = true; break; } else if (include.length() > path.length() && include.charAt(path.length()) == '.') { // include might may match deeper paths. Dive deeper. pathIsPrefixOfAnInclude = true; break; } } if (Regex.simpleMatch(include, path)) { exactIncludeMatch = true; break; } } } if (!(pathIsPrefixOfAnInclude || exactIncludeMatch)) { // skip subkeys, not interesting. sb.setLength(mark); continue; } if (entry.getValue() instanceof Map) { Map<String, Object> innerInto = Maps.newHashMap(); // if we had an exact match, we want give deeper excludes their chance filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb); if (exactIncludeMatch || !innerInto.isEmpty()) { into.put(entry.getKey(), innerInto); } } else if (entry.getValue() instanceof List) { List<Object> list = (List<Object>) entry.getValue(); List<Object> innerInto = new ArrayList<Object>(list.size()); // if we had an exact match, we want give deeper excludes their chance filter(list, innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb); into.put(entry.getKey(), innerInto); } else if (exactIncludeMatch) { into.put(entry.getKey(), entry.getValue()); } sb.setLength(mark); } } private static void filter(List<Object> from, List<Object> to, String[] includes, String[] excludes, StringBuilder sb) { if (includes.length == 0 && excludes.length == 0) { to.addAll(from); return; } for (Object o : from) { if (o instanceof Map) { Map<String, Object> innerInto = Maps.newHashMap(); filter((Map<String, Object>) o, innerInto, includes, excludes, sb); if (!innerInto.isEmpty()) { to.add(innerInto); } } else if (o instanceof List) { List<Object> innerInto = new ArrayList<Object>(); filter((List<Object>) o, innerInto, includes, excludes, sb); if (!innerInto.isEmpty()) { to.add(innerInto); } } else { to.add(o); } } } public static boolean isObject(Object node) { return node instanceof Map; } public static boolean isArray(Object node) { return node instanceof List; } public static String nodeStringValue(Object node, String defaultValue) { if (node == null) { return defaultValue; } return node.toString(); } public static float nodeFloatValue(Object node, float defaultValue) { if (node == null) { return defaultValue; } return nodeFloatValue(node); } public static float nodeFloatValue(Object node) { if (node instanceof Number) { return ((Number) node).floatValue(); } return Float.parseFloat(node.toString()); } public static double nodeDoubleValue(Object node, double defaultValue) { if (node == null) { return defaultValue; } return nodeDoubleValue(node); } public static double nodeDoubleValue(Object node) { if (node instanceof Number) { return ((Number) node).doubleValue(); } return Double.parseDouble(node.toString()); } public static int nodeIntegerValue(Object node) { if (node instanceof Number) { return ((Number) node).intValue(); } return Integer.parseInt(node.toString()); } public static int nodeIntegerValue(Object node, int defaultValue) { if (node == null) { return defaultValue; } if (node instanceof Number) { return ((Number) node).intValue(); } return Integer.parseInt(node.toString()); } public static short nodeShortValue(Object node, short defaultValue) { if (node == null) { return defaultValue; } return nodeShortValue(node); } public static short nodeShortValue(Object node) { if (node instanceof Number) { return ((Number) node).shortValue(); } return Short.parseShort(node.toString()); } public static byte nodeByteValue(Object node, byte defaultValue) { if (node == null) { return defaultValue; } return nodeByteValue(node); } public static byte nodeByteValue(Object node) { if (node instanceof Number) { return ((Number) node).byteValue(); } return Byte.parseByte(node.toString()); } public static long nodeLongValue(Object node, long defaultValue) { if (node == null) { return defaultValue; } return nodeLongValue(node); } public static long nodeLongValue(Object node) { if (node instanceof Number) { return ((Number) node).longValue(); } return Long.parseLong(node.toString()); } public static boolean nodeBooleanValue(Object node, boolean defaultValue) { if (node == null) { return defaultValue; } return nodeBooleanValue(node); } public static boolean nodeBooleanValue(Object node) { if (node instanceof Boolean) { return (Boolean) node; } if (node instanceof Number) { return ((Number) node).intValue() != 0; } String value = node.toString(); return !(value.equals("false") || value.equals("0") || value.equals("off")); } public static TimeValue nodeTimeValue(Object node, TimeValue defaultValue) { if (node == null) { return defaultValue; } return nodeTimeValue(node); } public static TimeValue nodeTimeValue(Object node) { if (node instanceof Number) { return TimeValue.timeValueMillis(((Number) node).longValue()); } return TimeValue.parseTimeValue(node.toString(), null); } public static Map<String, Object> nodeMapValue(Object node, String desc) { if (node instanceof Map) { return (Map<String, Object>) node; } else { throw new ElasticsearchParseException(desc + " should be a hash but was of type: " + node.getClass()); } } }
1no label
src_main_java_org_elasticsearch_common_xcontent_support_XContentMapValues.java
1,252
public class NoNodeAvailableException extends ElasticsearchException { public NoNodeAvailableException() { super("No node available"); } @Override public RestStatus status() { return RestStatus.SERVICE_UNAVAILABLE; } }
0true
src_main_java_org_elasticsearch_client_transport_NoNodeAvailableException.java
146
public interface OBinarySerializer<T> { /** * Obtain size of the serialized object Size is the amount of bites that required for storing object (for example: for storing * integer we need 4 bytes) * * @param object is the object to measure its size * @param hints List of parameters which may be used to choose appropriate serialization approach. * @return size of the serialized object */ int getObjectSize(T object, Object... hints); /** * Return size serialized presentation of given object. * * @param stream Serialized content. * @param startPosition Position from which serialized presentation of given object is stored. * @return Size serialized presentation of given object in bytes. */ int getObjectSize(byte[] stream, int startPosition); /** * Writes object to the stream starting from the startPosition * * @param object is the object to serialize * @param stream is the stream where object will be written * @param startPosition * @param hints List of parameters which may be used to choose appropriate serialization approach. */ void serialize(T object, byte[] stream, int startPosition, Object... hints); /** * Reads object from the stream starting from the startPosition * * @param stream is the stream from object will be read * @param startPosition is the position to start reading from * @return instance of the deserialized object */ T deserialize(byte[] stream, int startPosition); /** * @return Identifier of given serializer. */ byte getId(); /** * @return <code>true</code> if binary presentation of object always has the same length. */ boolean isFixedLength(); /** * @return Length of serialized data if {@link #isFixedLength()} method returns <code>true</code>. If {@link #isFixedLength()} * method return <code>false</code> returned value is undefined. */ int getFixedLength(); /** * Writes object to the stream starting from the startPosition using native acceleration. Serialized object presentation is * platform dependant. * * @param object is the object to serialize * @param stream is the stream where object will be written * @param startPosition * @param hints List of parameters which may be used to choose appropriate serialization approach. */ void serializeNative(T object, byte[] stream, int startPosition, Object... hints); /** * Reads object from the stream starting from the startPosition, in case there were serialized using * {@link #serializeNative(T, byte[], int, Object...)} method. * * @param stream is the stream from object will be read * @param startPosition is the position to start reading from * @return instance of the deserialized object */ T deserializeNative(byte[] stream, int startPosition); /** * Return size serialized presentation of given object, if it was serialized using * {@link #serializeNative(T, byte[], int, Object...)} method. * * @param stream Serialized content. * @param startPosition Position from which serialized presentation of given object is stored. * @return Size serialized presentation of given object in bytes. */ int getObjectSizeNative(byte[] stream, int startPosition); void serializeInDirectMemory(T object, ODirectMemoryPointer pointer, long offset, Object... hints); T deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset); int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset); T preprocess(T value, Object... hints); }
0true
commons_src_main_java_com_orientechnologies_common_serialization_types_OBinarySerializer.java
73
public abstract class TransactionManagerProvider extends Service { public TransactionManagerProvider( String name ) { super( name ); } public abstract AbstractTransactionManager loadTransactionManager( String txLogDir, XaDataSourceManager xaDataSourceManager, KernelPanicEventGenerator kpe, RemoteTxHook rollbackHook, StringLogger msgLog, FileSystemAbstraction fileSystem, TransactionStateFactory stateFactory ); }
0true
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_TransactionManagerProvider.java
659
public interface ProductDao { /** * Retrieve a {@code Product} instance by its primary key * * @param productId the primary key of the product * @return the product instance at the specified primary key */ @Nonnull public Product readProductById(@Nonnull Long productId); /** * Retrieves a list of Product instances by their primary keys * * @param productIds the list of primary keys for products * @return the list of products specified by the primary keys */ public List<Product> readProductsByIds(@Nonnull List<Long> productIds); /** * Persist a {@code Product} instance to the datastore * * @param product the product instance * @return the updated state of the product instance after being persisted */ @Nonnull public Product save(@Nonnull Product product); /** * Find all {@code Product} instances whose name starts with * or is equal to the passed in search parameter * * @param searchName the partial or whole name to match * @return the list of product instances that were search hits */ @Nonnull public List<Product> readProductsByName(@Nonnull String searchName); /** * Find a subset of {@code Product} instances whose name starts with * or is equal to the passed in search parameter. Res * @param searchName * @param limit the maximum number of results * @param offset the starting point in the record set * @return the list of product instances that fit the search criteria */ @Nonnull public List<Product> readProductsByName(@Nonnull String searchName, @Nonnull int limit, @Nonnull int offset); /** * Find all products whose in the passed in category. * * @param categoryId the primary key of the category to whom the resulting product list should be related * @return the list of products qualified for the category and date */ public List<Product> readActiveProductsByCategory(@Nonnull Long categoryId); /** * Read a page of products for a category. * * @param categoryId * @param limit * @param offset * @return */ public List<Product> readActiveProductsByCategory(@Nonnull Long categoryId, @Nonnull int limit, @Nonnull int offset); /** * Find all active products that are related to the given category, match the given search criteria, and * are not marked as archived. * * @param categoryId * @param searchCriteria * @return the matching products */ @Nonnull public List<Product> readFilteredActiveProductsByCategory(Long categoryId, ProductSearchCriteria searchCriteria); /** * Find all products whose start and end dates are before and after the passed in * date, who match the search string, match the given search criteria, and are not * marked as archived. * * @param query * @param searchCriteria * @return the matching products */ @Nonnull public List<Product> readFilteredActiveProductsByQuery(String query, ProductSearchCriteria searchCriteria); /** * @deprecated Use {@link #readFilteredActiveProductsByCategory(Long, ProductSearchCriteria)} * * Find all products whose start and end dates are before and after the passed in * date, who are related to the given category, match the given search criteria, and * are not marked as archived. * * @param categoryId * @param currentDate * @param searchCriteria * @return the matching products */ @Nonnull public List<Product> readFilteredActiveProductsByCategory(Long categoryId, Date currentDate, ProductSearchCriteria searchCriteria); /** * @deprecated Use {@link #readActiveProductsByCategory(Long)} * * Find all products whose start and end dates are before and after the passed in * date and who are related to the given category * * @param categoryId the primary key of the category to whom the resulting product list should be related * @param currentDate the date for which the products should be checked against to determine their active state * @return the list of products qualified for the category and date */ @Nonnull public List<Product> readActiveProductsByCategory(@Nonnull Long categoryId, @Nonnull Date currentDate); /** * @deprecated Use {@link #readFilteredActiveProductsByQuery(String, ProductSearchCriteria)} * Find all products whose start and end dates are before and after the passed in * date, who match the search string, match the given search criteria, and are not * marked as archived. * * @param query * @param currentDate * @param searchCriteria * @return the matching products */ @Nonnull public List<Product> readFilteredActiveProductsByQuery(String query, Date currentDate, ProductSearchCriteria searchCriteria); /** * @deprecated Use {@link #readActiveProductsByCategory(Long)} */ @Nonnull public List<Product> readActiveProductsByCategory(@Nonnull Long categoryId, @Nonnull Date currentDate, @Nonnull int limit, @Nonnull int offset); /** * Find all products related to the passed in category * * @param categoryId the primary key of the category to whom the resulting product list should be related * @return the list of products qualified for the category */ @Nonnull public List<Product> readProductsByCategory(@Nonnull Long categoryId); /** * Find all products related to the passed in category * * @param categoryId the primary key of the category to whom the resulting product list should be related * @param limit the maximum number of results to return * @param offset the starting point in the record set * @return the list of products qualified for the category */ @Nonnull public List<Product> readProductsByCategory(@Nonnull Long categoryId, @Nonnull int limit, @Nonnull int offset); /** * Remove the passed in product instance from the datastore * * @param product the product instance to remove */ public void delete(@Nonnull Product product); /** * Create a new {@code Product} instance. The system will use the configuration in * {@code /BroadleafCommerce/core/BroadleafCommerceFramework/src/main/resources/bl-framework-applicationContext-entity.xml} * to determine which polymorphic version of {@code Product} to instantiate. To make Broadleaf instantiate your * extension of {@code Product} by default, include an entity configuration bean in your application context xml similar to: * <p> * {@code * <bean id="blEntityConfiguration" class="org.broadleafcommerce.common.persistence.EntityConfiguration"> * <property name="entityContexts"> * <list> * <value>classpath:myCompany-applicationContext-entity.xml</value> * </list> * </property> * </bean> * } * </p> * Declare the same key for your desired entity in your entity xml that is used in the Broadleaf entity xml, but change the value to the fully * qualified classname of your entity extension. * * @param productType the type of product you would like to create (presumably a Product or ProductSku instance). The getType method of {@code ProductType} provides the key for the entity configuration. * @return a {@code Product} instance based on the Broadleaf entity configuration. */ public Product create(ProductType productType); /** * Returns all active ProductBundles whose automatic property is true. * * @return */ public List<ProductBundle> readAutomaticProductBundles(); /** * Look up a product that matches the given URI * * @param uri - the relative URL to look up the Product by * @return List of products that match the passed in URI. * */ public List<Product> findProductByURI(String key); /** * * Reads all products from the database that are currently active. * * @return a list of all active products */ public List<Product> readAllActiveProducts(); /** * @deprecated use {@link #readAllActiveProducts()} * * @param currentDate * @return a list of all active products */ public List<Product> readAllActiveProducts(@Nonnull Date currentDate); /** * Reads all products from the database that are currently active. This method differs from * {@link #readAllActiveProducts()} in that this one will utilize database paging. * * It will fetch results in pages. For example, if page = 3 and pageSize = 25, this method would * return rows 75-99 from the database. * * @param page - the number of the page to get (0 indexed) * @param pageSize - the number of results per page * @return a list of active products for the given page */ public List<Product> readAllActiveProducts(int page, int pageSize); /** * @deprecated Use {@link #readAllActiveProducts(page, pageSize)} * * @param page - the number of the page to get (0 indexed) * @param pageSize - the number of results per page * @param currentDate * @return a list of active products for the given page */ public List<Product> readAllActiveProducts(int page, int pageSize, Date currentDate); /** * Returns the number of products that are currently active. * * @return the number of currently active products */ public Long readCountAllActiveProducts(); /** * @deprecated {@link #readActiveProductCount()} * * @param currentDate * @return the number of currently active products */ public Long readCountAllActiveProducts(Date currentDate); /** * Returns the number of milliseconds that the current date/time will be cached for queries before refreshing. * This aids in query caching, otherwise every query that utilized current date would be different and caching * would be ineffective. * * @return the milliseconds to cache the current date/time */ public Long getCurrentDateResolution(); /** * Sets the number of milliseconds that the current date/time will be cached for queries before refreshing. * This aids in query caching, otherwise every query that utilized current date would be different and caching * would be ineffective. * * @param currentDateResolution the milliseconds to cache the current date/time */ public void setCurrentDateResolution(Long currentDateResolution); }
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_dao_ProductDao.java
3,549
public class BinaryFieldMapper extends AbstractFieldMapper<BytesReference> { public static final String CONTENT_TYPE = "binary"; public static class Defaults extends AbstractFieldMapper.Defaults { public static final long COMPRESS_THRESHOLD = -1; public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE); static { FIELD_TYPE.setIndexed(false); FIELD_TYPE.freeze(); } } public static class Builder extends AbstractFieldMapper.Builder<Builder, BinaryFieldMapper> { private Boolean compress = null; private long compressThreshold = Defaults.COMPRESS_THRESHOLD; public Builder(String name) { super(name, new FieldType(Defaults.FIELD_TYPE)); builder = this; } public Builder compress(boolean compress) { this.compress = compress; return this; } public Builder compressThreshold(long compressThreshold) { this.compressThreshold = compressThreshold; return this; } @Override public BinaryFieldMapper build(BuilderContext context) { return new BinaryFieldMapper(buildNames(context), fieldType, compress, compressThreshold, postingsProvider, docValuesProvider, multiFieldsBuilder.build(this, context), copyTo); } } public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException { BinaryFieldMapper.Builder builder = binaryField(name); parseField(builder, name, node, parserContext); for (Map.Entry<String, Object> entry : node.entrySet()) { String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); if (fieldName.equals("compress") && fieldNode != null) { builder.compress(nodeBooleanValue(fieldNode)); } else if (fieldName.equals("compress_threshold") && fieldNode != null) { if (fieldNode instanceof Number) { builder.compressThreshold(((Number) fieldNode).longValue()); builder.compress(true); } else { builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString()).bytes()); builder.compress(true); } } } return builder; } } private Boolean compress; private long compressThreshold; protected BinaryFieldMapper(Names names, FieldType fieldType, Boolean compress, long compressThreshold, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider, MultiFields multiFields, CopyTo copyTo) { super(names, 1.0f, fieldType, null, null, null, postingsProvider, docValuesProvider, null, null, null, null, multiFields, copyTo); this.compress = compress; this.compressThreshold = compressThreshold; } @Override public FieldType defaultFieldType() { return Defaults.FIELD_TYPE; } @Override public FieldDataType defaultFieldDataType() { return null; } @Override public Object valueForSearch(Object value) { return value(value); } @Override public BytesReference value(Object value) { if (value == null) { return null; } BytesReference bytes; if (value instanceof BytesRef) { bytes = new BytesArray((BytesRef) value); } else if (value instanceof BytesReference) { bytes = (BytesReference) value; } else if (value instanceof byte[]) { bytes = new BytesArray((byte[]) value); } else { try { bytes = new BytesArray(Base64.decode(value.toString())); } catch (IOException e) { throw new ElasticsearchParseException("failed to convert bytes", e); } } try { return CompressorFactory.uncompressIfNeeded(bytes); } catch (IOException e) { throw new ElasticsearchParseException("failed to decompress source", e); } } @Override protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException { if (!fieldType().stored()) { return; } byte[] value; if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) { return; } else { value = context.parser().binaryValue(); if (compress != null && compress && !CompressorFactory.isCompressed(value, 0, value.length)) { if (compressThreshold == -1 || value.length > compressThreshold) { BytesStreamOutput bStream = new BytesStreamOutput(); StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); stream.writeBytes(value, 0, value.length); stream.close(); value = bStream.bytes().toBytes(); } } } if (value == null) { return; } fields.add(new Field(names.indexName(), value, fieldType)); } @Override protected String contentType() { return CONTENT_TYPE; } @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { builder.field("type", contentType()); if (includeDefaults || !names.name().equals(names.indexNameClean())) { builder.field("index_name", names.indexNameClean()); } if (compress != null) { builder.field("compress", compress); } else if (includeDefaults) { builder.field("compress", false); } if (compressThreshold != -1) { builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString()); } else if (includeDefaults) { builder.field("compress_threshold", -1); } if (includeDefaults || fieldType.stored() != defaultFieldType().stored()) { builder.field("store", fieldType.stored()); } } @Override public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { BinaryFieldMapper sourceMergeWith = (BinaryFieldMapper) mergeWith; if (!mergeContext.mergeFlags().simulate()) { if (sourceMergeWith.compress != null) { this.compress = sourceMergeWith.compress; } if (sourceMergeWith.compressThreshold != -1) { this.compressThreshold = sourceMergeWith.compressThreshold; } } } @Override public boolean hasDocValues() { return false; } }
1no label
src_main_java_org_elasticsearch_index_mapper_core_BinaryFieldMapper.java
409
trackedList.addChangeListener(new OMultiValueChangeListener<Integer, String>() { public void onAfterRecordChanged(final OMultiValueChangeEvent<Integer, String> event) { Assert.assertEquals(event.getChangeType(), OMultiValueChangeEvent.OChangeType.REMOVE); Assert.assertEquals(event.getOldValue(), "value2"); Assert.assertEquals(event.getKey().intValue(), 1); Assert.assertNull(event.getValue()); changed.value = true; } });
0true
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedListTest.java
1,196
@SuppressWarnings("unchecked") public class CacheRecycler extends AbstractComponent { public final Recycler<ObjectObjectOpenHashMap> hashMap; public final Recycler<ObjectOpenHashSet> hashSet; public final Recycler<DoubleObjectOpenHashMap> doubleObjectMap; public final Recycler<LongObjectOpenHashMap> longObjectMap; public final Recycler<LongLongOpenHashMap> longLongMap; public final Recycler<IntIntOpenHashMap> intIntMap; public final Recycler<FloatIntOpenHashMap> floatIntMap; public final Recycler<DoubleIntOpenHashMap> doubleIntMap; public final Recycler<LongIntOpenHashMap> longIntMap; public final Recycler<ObjectIntOpenHashMap> objectIntMap; public final Recycler<IntObjectOpenHashMap> intObjectMap; public final Recycler<ObjectFloatOpenHashMap> objectFloatMap; public void close() { hashMap.close(); hashSet.close(); doubleObjectMap.close(); longObjectMap.close(); longLongMap.close(); intIntMap.close(); floatIntMap.close(); doubleIntMap.close(); longIntMap.close(); objectIntMap.close(); intObjectMap.close(); objectFloatMap.close(); } @Inject public CacheRecycler(Settings settings) { super(settings); final Type type = Type.parse(settings.get("type")); int limit = settings.getAsInt("limit", 10); int smartSize = settings.getAsInt("smart_size", 1024); final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); hashMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectObjectOpenHashMap>() { @Override public ObjectObjectOpenHashMap newInstance(int sizing) { return new ObjectObjectOpenHashMap(size(sizing)); } @Override public void clear(ObjectObjectOpenHashMap value) { value.clear(); } }); hashSet = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectOpenHashSet>() { @Override public ObjectOpenHashSet newInstance(int sizing) { return new ObjectOpenHashSet(size(sizing), 0.5f); } @Override public void clear(ObjectOpenHashSet value) { value.clear(); } }); doubleObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleObjectOpenHashMap>() { @Override public DoubleObjectOpenHashMap newInstance(int sizing) { return new DoubleObjectOpenHashMap(size(sizing)); } @Override public void clear(DoubleObjectOpenHashMap value) { value.clear(); } }); longObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongObjectOpenHashMap>() { @Override public LongObjectOpenHashMap newInstance(int sizing) { return new LongObjectOpenHashMap(size(sizing)); } @Override public void clear(LongObjectOpenHashMap value) { value.clear(); } }); longLongMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongLongOpenHashMap>() { @Override public LongLongOpenHashMap newInstance(int sizing) { return new LongLongOpenHashMap(size(sizing)); } @Override public void clear(LongLongOpenHashMap value) { value.clear(); } }); intIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntIntOpenHashMap>() { @Override public IntIntOpenHashMap newInstance(int sizing) { return new IntIntOpenHashMap(size(sizing)); } @Override public void clear(IntIntOpenHashMap value) { value.clear(); } }); floatIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<FloatIntOpenHashMap>() { @Override public FloatIntOpenHashMap newInstance(int sizing) { return new FloatIntOpenHashMap(size(sizing)); } @Override public void clear(FloatIntOpenHashMap value) { value.clear(); } }); doubleIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleIntOpenHashMap>() { @Override public DoubleIntOpenHashMap newInstance(int sizing) { return new DoubleIntOpenHashMap(size(sizing)); } @Override public void clear(DoubleIntOpenHashMap value) { value.clear(); } }); longIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongIntOpenHashMap>() { @Override public LongIntOpenHashMap newInstance(int sizing) { return new LongIntOpenHashMap(size(sizing)); } @Override public void clear(LongIntOpenHashMap value) { value.clear(); } }); objectIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectIntOpenHashMap>() { @Override public ObjectIntOpenHashMap newInstance(int sizing) { return new ObjectIntOpenHashMap(size(sizing)); } @Override public void clear(ObjectIntOpenHashMap value) { value.clear(); } }); intObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntObjectOpenHashMap>() { @Override public IntObjectOpenHashMap newInstance(int sizing) { return new IntObjectOpenHashMap(size(sizing)); } @Override public void clear(IntObjectOpenHashMap value) { value.clear(); } }); objectFloatMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectFloatOpenHashMap>() { @Override public ObjectFloatOpenHashMap newInstance(int sizing) { return new ObjectFloatOpenHashMap(size(sizing)); } @Override public void clear(ObjectFloatOpenHashMap value) { value.clear(); } }); } public <K, V> Recycler.V<ObjectObjectOpenHashMap<K, V>> hashMap(int sizing) { return (Recycler.V) hashMap.obtain(sizing); } public <T> Recycler.V<ObjectOpenHashSet<T>> hashSet(int sizing) { return (Recycler.V) hashSet.obtain(sizing); } public <T> Recycler.V<DoubleObjectOpenHashMap<T>> doubleObjectMap(int sizing) { return (Recycler.V) doubleObjectMap.obtain(sizing); } public <T> Recycler.V<LongObjectOpenHashMap<T>> longObjectMap(int sizing) { return (Recycler.V) longObjectMap.obtain(sizing); } public Recycler.V<LongLongOpenHashMap> longLongMap(int sizing) { return longLongMap.obtain(sizing); } public Recycler.V<IntIntOpenHashMap> intIntMap(int sizing) { return intIntMap.obtain(sizing); } public Recycler.V<FloatIntOpenHashMap> floatIntMap(int sizing) { return floatIntMap.obtain(sizing); } public Recycler.V<DoubleIntOpenHashMap> doubleIntMap(int sizing) { return doubleIntMap.obtain(sizing); } public Recycler.V<LongIntOpenHashMap> longIntMap(int sizing) { return longIntMap.obtain(sizing); } public <T> Recycler.V<ObjectIntOpenHashMap<T>> objectIntMap(int sizing) { return (Recycler.V) objectIntMap.obtain(sizing); } public <T> Recycler.V<IntObjectOpenHashMap<T>> intObjectMap(int sizing) { return (Recycler.V) intObjectMap.obtain(sizing); } public <T> Recycler.V<ObjectFloatOpenHashMap<T>> objectFloatMap(int sizing) { return (Recycler.V) objectFloatMap.obtain(sizing); } static int size(int sizing) { return sizing > 0 ? sizing : 256; } private <T> Recycler<T> build(Type type, int limit, int smartSize, int availableProcessors, Recycler.C<T> c) { Recycler<T> recycler; try { recycler = type.build(c, limit, availableProcessors); if (smartSize > 0) { recycler = sizing(recycler, none(c), smartSize); } } catch (IllegalArgumentException ex) { throw new ElasticsearchIllegalArgumentException("no type support [" + type + "] for recycler"); } return recycler; } public static enum Type { SOFT_THREAD_LOCAL { @Override <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) { return threadLocal(softFactory(dequeFactory(c, limit))); } }, THREAD_LOCAL { @Override <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) { return threadLocal(dequeFactory(c, limit)); } }, QUEUE { @Override <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) { return concurrentDeque(c, limit); } }, SOFT_CONCURRENT { @Override <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) { return concurrent(softFactory(dequeFactory(c, limit)), availableProcessors); } }, CONCURRENT { @Override <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) { return concurrent(dequeFactory(c, limit), availableProcessors); } }, NONE { @Override <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) { return none(c); } }; public static Type parse(String type) { if (Strings.isNullOrEmpty(type)) { return SOFT_CONCURRENT; } try { return Type.valueOf(type.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException e) { throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]"); } } abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors); } }
0true
src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java
62
public interface TitanGraph extends TitanGraphTransaction, ThreadedTransactionalGraph { /* --------------------------------------------------------------- * Transactions and general admin * --------------------------------------------------------------- */ /** * Opens a new thread-independent {@link TitanTransaction}. * <p/> * The transaction is open when it is returned but MUST be explicitly closed by calling {@link com.thinkaurelius.titan.core.TitanTransaction#commit()} * or {@link com.thinkaurelius.titan.core.TitanTransaction#rollback()} when it is no longer needed. * <p/> * Note, that this returns a thread independent transaction object. It is not necessary to call this method * to use Blueprint's standard transaction framework which will automatically start a transaction with the first * operation on the graph. * * @return Transaction object representing a transactional context. */ public TitanTransaction newTransaction(); /** * Returns a {@link TransactionBuilder} to construct a new thread-independent {@link TitanTransaction}. * * @return a new TransactionBuilder * @see TransactionBuilder * @see #newTransaction() */ public TransactionBuilder buildTransaction(); /** * Returns the management system for this graph instance. The management system provides functionality * to change global configuration options, install indexes and inspect the graph schema. * <p /> * The management system operates in its own transactional context which must be explicitly closed. * * @return */ public TitanManagement getManagementSystem(); /** * Checks whether the graph is open. * * @return true, if the graph is open, else false. * @see #shutdown() */ public boolean isOpen(); /** * Checks whether the graph is closed. * * @return true, if the graph has been closed, else false */ public boolean isClosed(); /** * Closes the graph database. * <p/> * Closing the graph database causes a disconnect and possible closing of the underlying storage backend * and a release of all occupied resources by this graph database. * Closing a graph database requires that all open thread-independent transactions have been closed - * otherwise they will be left abandoned. * * @throws TitanException if closing the graph database caused errors in the storage backend */ public void shutdown() throws TitanException; }
0true
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanGraph.java
43
public class StatsCommandProcessor extends MemcacheCommandProcessor<StatsCommand> { public StatsCommandProcessor(TextCommandService textCommandService) { super(textCommandService); } public void handle(StatsCommand command) { Stats stats = textCommandService.getStats(); command.setResponse(stats); textCommandService.sendResponse(command); } public void handleRejection(StatsCommand command) { handle(command); } }
0true
hazelcast_src_main_java_com_hazelcast_ascii_memcache_StatsCommandProcessor.java
383
public class TransportClusterRerouteAction extends TransportMasterNodeOperationAction<ClusterRerouteRequest, ClusterRerouteResponse> { private final AllocationService allocationService; @Inject public TransportClusterRerouteAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, AllocationService allocationService) { super(settings, transportService, clusterService, threadPool); this.allocationService = allocationService; } @Override protected String executor() { // we go async right away return ThreadPool.Names.SAME; } @Override protected String transportAction() { return ClusterRerouteAction.NAME; } @Override protected ClusterRerouteRequest newRequest() { return new ClusterRerouteRequest(); } @Override protected ClusterRerouteResponse newResponse() { return new ClusterRerouteResponse(); } @Override protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) throws ElasticsearchException { clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.URGENT, new AckedClusterStateUpdateTask() { private volatile ClusterState clusterStateToSend; @Override public boolean mustAck(DiscoveryNode discoveryNode) { return true; } @Override public void onAllNodesAcked(@Nullable Throwable t) { listener.onResponse(new ClusterRerouteResponse(true, clusterStateToSend)); } @Override public void onAckTimeout() { listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend)); } @Override public TimeValue ackTimeout() { return request.timeout(); } @Override public TimeValue timeout() { return request.masterNodeTimeout(); } @Override public void onFailure(String source, Throwable t) { logger.debug("failed to perform [{}]", t, source); listener.onFailure(t); } @Override public ClusterState execute(ClusterState currentState) { RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, true); ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build(); clusterStateToSend = newState; if (request.dryRun) { return currentState; } return newState; } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } }); } }
1no label
src_main_java_org_elasticsearch_action_admin_cluster_reroute_TransportClusterRerouteAction.java
612
new OIndexEngine.ValuesResultListener() { @Override public boolean addResult(OIdentifiable identifiable) { return resultListener.addResult(identifiable); } });
1no label
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
334
new Thread() { public void run() { map.tryPut("key1", "value2", 5, TimeUnit.SECONDS); latch.countDown(); } }.start();
0true
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
127
public class ForkJoinWorkerThread extends Thread { /* * ForkJoinWorkerThreads are managed by ForkJoinPools and perform * ForkJoinTasks. For explanation, see the internal documentation * of class ForkJoinPool. * * This class just maintains links to its pool and WorkQueue. The * pool field is set immediately upon construction, but the * workQueue field is not set until a call to registerWorker * completes. This leads to a visibility race, that is tolerated * by requiring that the workQueue field is only accessed by the * owning thread. */ final ForkJoinPool pool; // the pool this thread works in final ForkJoinPool.WorkQueue workQueue; // work-stealing mechanics /** * Creates a ForkJoinWorkerThread operating in the given pool. * * @param pool the pool this thread works in * @throws NullPointerException if pool is null */ protected ForkJoinWorkerThread(ForkJoinPool pool) { // Use a placeholder until a useful name can be set in registerWorker super("aForkJoinWorkerThread"); this.pool = pool; this.workQueue = pool.registerWorker(this); } /** * Returns the pool hosting this thread. * * @return the pool */ public ForkJoinPool getPool() { return pool; } /** * Returns the unique index number of this thread in its pool. * The returned value ranges from zero to the maximum number of * threads (minus one) that may exist in the pool, and does not * change during the lifetime of the thread. This method may be * useful for applications that track status or collect results * per-worker-thread rather than per-task. * * @return the index number */ public int getPoolIndex() { return workQueue.poolIndex >>> 1; // ignore odd/even tag bit } /** * Initializes internal state after construction but before * processing any tasks. If you override this method, you must * invoke {@code super.onStart()} at the beginning of the method. * Initialization requires care: Most fields must have legal * default values, to ensure that attempted accesses from other * threads work correctly even before this thread starts * processing tasks. */ protected void onStart() { } /** * Performs cleanup associated with termination of this worker * thread. If you override this method, you must invoke * {@code super.onTermination} at the end of the overridden method. * * @param exception the exception causing this thread to abort due * to an unrecoverable error, or {@code null} if completed normally */ protected void onTermination(Throwable exception) { } /** * This method is required to be public, but should never be * called explicitly. It performs the main run loop to execute * {@link ForkJoinTask}s. */ public void run() { Throwable exception = null; try { onStart(); pool.runWorker(workQueue); } catch (Throwable ex) { exception = ex; } finally { try { onTermination(exception); } catch (Throwable ex) { if (exception == null) exception = ex; } finally { pool.deregisterWorker(this, exception); } } } }
0true
src_main_java_jsr166e_ForkJoinWorkerThread.java
161
@Test public class ShortSerializerTest { private static final int FIELD_SIZE = 2; private static final Short OBJECT = 1; private OShortSerializer shortSerializer; byte[] stream = new byte[FIELD_SIZE]; @BeforeClass public void beforeClass() { shortSerializer = new OShortSerializer(); } public void testFieldSize() { Assert.assertEquals(shortSerializer.getObjectSize(null), FIELD_SIZE); } public void testSerialize() { shortSerializer.serialize(OBJECT, stream, 0); Assert.assertEquals(shortSerializer.deserialize(stream, 0), OBJECT); } public void testSerializeNative() { shortSerializer.serializeNative(OBJECT, stream, 0); Assert.assertEquals(shortSerializer.deserializeNative(stream, 0), OBJECT); } public void testNativeDirectMemoryCompatibility() { shortSerializer.serializeNative(OBJECT, stream, 0); ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream); try { Assert.assertEquals(shortSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT); } finally { pointer.free(); } } }
0true
commons_src_test_java_com_orientechnologies_common_serialization_types_ShortSerializerTest.java
7
private static class HBasePidfileParseException extends Exception { private static final long serialVersionUID = 1L; public HBasePidfileParseException(String message) { super(message); } }
0true
titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_HBaseStatus.java
1,339
public class SolrSearchServiceImpl implements SearchService, DisposableBean { private static final Log LOG = LogFactory.getLog(SolrSearchServiceImpl.class); @Resource(name = "blProductDao") protected ProductDao productDao; @Resource(name = "blFieldDao") protected FieldDao fieldDao; @Resource(name = "blSearchFacetDao") protected SearchFacetDao searchFacetDao; @Resource(name = "blSolrHelperService") protected SolrHelperService shs; @Resource(name = "blSolrIndexService") protected SolrIndexService solrIndexService; @Resource(name = "blSolrSearchServiceExtensionManager") protected SolrSearchServiceExtensionManager extensionManager; public SolrSearchServiceImpl(String solrServer) throws IOException, ParserConfigurationException, SAXException { if ("solrhome".equals(solrServer)) { final String baseTempPath = System.getProperty("java.io.tmpdir"); File tempDir = new File(baseTempPath + File.separator + System.getProperty("user.name") + File.separator + "solrhome"); if (System.getProperty("tmpdir.solrhome") != null) { //allow for an override of tmpdir tempDir = new File(System.getProperty("tmpdir.solrhome")); } if (!tempDir.exists()) { tempDir.mkdirs(); } solrServer = tempDir.getAbsolutePath(); } File solrXml = new File(new File(solrServer), "solr.xml"); if (!solrXml.exists()) { copyConfigToSolrHome(this.getClass().getResourceAsStream("/solr-default.xml"), solrXml); } LOG.debug(String.format("Using [%s] as solrhome", solrServer)); LOG.debug(String.format("Using [%s] as solr.xml", solrXml.getAbsoluteFile())); if (LOG.isTraceEnabled()) { LOG.trace("Contents of solr.xml:"); BufferedReader br = null; try { br = new BufferedReader(new FileReader(solrXml)); String line; while ((line = br.readLine()) != null) { LOG.trace(line); } } finally { if (br != null) { try { br.close(); } catch (Throwable e) { //do nothing } } } LOG.trace("Done printing solr.xml"); } CoreContainer coreContainer = CoreContainer.createAndLoad(solrServer, solrXml); EmbeddedSolrServer primaryServer = new EmbeddedSolrServer(coreContainer, SolrContext.PRIMARY); EmbeddedSolrServer reindexServer = new EmbeddedSolrServer(coreContainer, SolrContext.REINDEX); SolrContext.setPrimaryServer(primaryServer); SolrContext.setReindexServer(reindexServer); } public void copyConfigToSolrHome(InputStream configIs, File destFile) throws IOException { BufferedInputStream bis = null; BufferedOutputStream bos = null; try { bis = new BufferedInputStream(configIs); bos = new BufferedOutputStream(new FileOutputStream(destFile, false)); boolean eof = false; while (!eof) { int temp = bis.read(); if (temp == -1) { eof = true; } else { bos.write(temp); } } bos.flush(); } finally { if (bis != null) { try { bis.close(); } catch (Throwable e) { //do nothing } } if (bos != null) { try { bos.close(); } catch (Throwable e) { //do nothing } } } } public SolrSearchServiceImpl(SolrServer solrServer) { SolrContext.setPrimaryServer(solrServer); } /** * This constructor serves to mimic the one below this, which takes in two {@link SolrServer} arguments. * By having this and then simply disregarding the second parameter, we can more easily support 2-core * Solr configurations that use embedded/standalone per environment. * * @param solrServer * @param reindexServer * @throws SAXException * @throws ParserConfigurationException * @throws IOException */ public SolrSearchServiceImpl(String solrServer, String reindexServer) throws IOException, ParserConfigurationException, SAXException { this(solrServer); } public SolrSearchServiceImpl(SolrServer solrServer, SolrServer reindexServer) { SolrContext.setPrimaryServer(solrServer); SolrContext.setReindexServer(reindexServer); } @Override public void rebuildIndex() throws ServiceException, IOException { solrIndexService.rebuildIndex(); } @Override public void destroy() throws Exception { if (SolrContext.getServer() instanceof EmbeddedSolrServer) { ((EmbeddedSolrServer) SolrContext.getServer()).shutdown(); } } @Override public ProductSearchResult findExplicitProductsByCategory(Category category, ProductSearchCriteria searchCriteria) throws ServiceException { List<SearchFacetDTO> facets = getCategoryFacets(category); String query = shs.getExplicitCategoryFieldName() + ":" + category.getId(); return findProducts("*:*", facets, searchCriteria, shs.getCategorySortFieldName(category) + " asc", query); } @Override public ProductSearchResult findProductsByCategory(Category category, ProductSearchCriteria searchCriteria) throws ServiceException { List<SearchFacetDTO> facets = getCategoryFacets(category); String query = shs.getCategoryFieldName() + ":" + category.getId(); return findProducts("*:*", facets, searchCriteria, shs.getCategorySortFieldName(category) + " asc", query); } @Override public ProductSearchResult findProductsByQuery(String query, ProductSearchCriteria searchCriteria) throws ServiceException { List<SearchFacetDTO> facets = getSearchFacets(); query = "(" + sanitizeQuery(query) + ")"; return findProducts(query, facets, searchCriteria, null); } @Override public ProductSearchResult findProductsByCategoryAndQuery(Category category, String query, ProductSearchCriteria searchCriteria) throws ServiceException { List<SearchFacetDTO> facets = getSearchFacets(); String catFq = shs.getCategoryFieldName() + ":" + category.getId(); query = "(" + sanitizeQuery(query) + ")"; return findProducts(query, facets, searchCriteria, null, catFq); } public String getLocalePrefix() { if (BroadleafRequestContext.getBroadleafRequestContext() != null) { Locale locale = BroadleafRequestContext.getBroadleafRequestContext().getLocale(); if (locale != null) { return locale.getLocaleCode() + "_"; } } return ""; } protected String buildQueryFieldsString() { StringBuilder queryBuilder = new StringBuilder(); List<Field> fields = fieldDao.readAllProductFields(); for (Field currentField : fields) { if (currentField.getSearchable()) { appendFieldToQuery(queryBuilder, currentField); } } return queryBuilder.toString(); } protected void appendFieldToQuery(StringBuilder queryBuilder, Field currentField) { List<FieldType> searchableFieldTypes = shs.getSearchableFieldTypes(currentField); for (FieldType currentType : searchableFieldTypes) { queryBuilder.append(shs.getPropertyNameForFieldSearchable(currentField, currentType)).append(" "); } } /** * @deprecated in favor of the other findProducts() method */ protected ProductSearchResult findProducts(String qualifiedSolrQuery, List<SearchFacetDTO> facets, ProductSearchCriteria searchCriteria, String defaultSort) throws ServiceException { return findProducts(qualifiedSolrQuery, facets, searchCriteria, defaultSort, null); } /** * Given a qualified solr query string (such as "category:2002"), actually performs a solr search. It will * take into considering the search criteria to build out facets / pagination / sorting. * * @param qualifiedSolrQuery * @param facets * @param searchCriteria * @return the ProductSearchResult of the search * @throws ServiceException */ protected ProductSearchResult findProducts(String qualifiedSolrQuery, List<SearchFacetDTO> facets, ProductSearchCriteria searchCriteria, String defaultSort, String... filterQueries) throws ServiceException { Map<String, SearchFacetDTO> namedFacetMap = getNamedFacetMap(facets, searchCriteria); // Build the basic query SolrQuery solrQuery = new SolrQuery() .setQuery(qualifiedSolrQuery) .setFields(shs.getProductIdFieldName()) .setRows(searchCriteria.getPageSize()) .setStart((searchCriteria.getPage() - 1) * searchCriteria.getPageSize()); if (filterQueries != null) { solrQuery.setFilterQueries(filterQueries); } solrQuery.addFilterQuery(shs.getNamespaceFieldName() + ":" + shs.getCurrentNamespace()); solrQuery.set("defType", "edismax"); solrQuery.set("qf", buildQueryFieldsString()); // Attach additional restrictions attachSortClause(solrQuery, searchCriteria, defaultSort); attachActiveFacetFilters(solrQuery, namedFacetMap, searchCriteria); attachFacets(solrQuery, namedFacetMap); extensionManager.getProxy().modifySolrQuery(solrQuery, qualifiedSolrQuery, facets, searchCriteria, defaultSort); if (LOG.isTraceEnabled()) { try { LOG.trace(URLDecoder.decode(solrQuery.toString(), "UTF-8")); } catch (Exception e) { LOG.trace("Couldn't UTF-8 URL Decode: " + solrQuery.toString()); } } // Query solr QueryResponse response; try { //solrQuery = new SolrQuery().setQuery("*:*"); response = SolrContext.getServer().query(solrQuery); if (LOG.isTraceEnabled()) { LOG.trace(response.toString()); for (SolrDocument doc : response.getResults()) { LOG.trace(doc); } } } catch (SolrServerException e) { throw new ServiceException("Could not perform search", e); } // Get the facets setFacetResults(namedFacetMap, response); sortFacetResults(namedFacetMap); // Get the products List<Product> products = getProducts(response); ProductSearchResult result = new ProductSearchResult(); result.setFacets(facets); result.setProducts(products); setPagingAttributes(result, response, searchCriteria); return result; } @Override public List<SearchFacetDTO> getSearchFacets() { return buildSearchFacetDTOs(searchFacetDao.readAllSearchFacets()); } @Override public List<SearchFacetDTO> getCategoryFacets(Category category) { List<CategorySearchFacet> categorySearchFacets = category.getCumulativeSearchFacets(); List<SearchFacet> searchFacets = new ArrayList<SearchFacet>(); for (CategorySearchFacet categorySearchFacet : categorySearchFacets) { searchFacets.add(categorySearchFacet.getSearchFacet()); } return buildSearchFacetDTOs(searchFacets); } /** * Sets up the sorting criteria. This will support sorting by multiple fields at a time * * @param query * @param searchCriteria */ protected void attachSortClause(SolrQuery query, ProductSearchCriteria searchCriteria, String defaultSort) { Map<String, String> solrFieldKeyMap = getSolrFieldKeyMap(searchCriteria); String sortQuery = searchCriteria.getSortQuery(); if (StringUtils.isBlank(sortQuery)) { sortQuery = defaultSort; } if (StringUtils.isNotBlank(sortQuery)) { String[] sortFields = sortQuery.split(","); for (String sortField : sortFields) { String field = sortField.split(" ")[0]; if (solrFieldKeyMap.containsKey(field)) { field = solrFieldKeyMap.get(field); } ORDER order = "desc".equals(sortField.split(" ")[1]) ? ORDER.desc : ORDER.asc; if (field != null) { query.addSortField(field, order); } } } } /** * Restricts the query by adding active facet filters. * * @param query * @param namedFacetMap * @param searchCriteria */ protected void attachActiveFacetFilters(SolrQuery query, Map<String, SearchFacetDTO> namedFacetMap, ProductSearchCriteria searchCriteria) { for (Entry<String, String[]> entry : searchCriteria.getFilterCriteria().entrySet()) { String solrKey = null; for (Entry<String, SearchFacetDTO> dtoEntry : namedFacetMap.entrySet()) { if (dtoEntry.getValue().getFacet().getField().getAbbreviation().equals(entry.getKey())) { solrKey = dtoEntry.getKey(); dtoEntry.getValue().setActive(true); } } if (solrKey != null) { String solrTag = getSolrFieldTag(shs.getGlobalFacetTagField(), "tag"); String[] selectedValues = entry.getValue().clone(); for (int i = 0; i < selectedValues.length; i++) { if (selectedValues[i].contains("range[")) { String rangeValue = selectedValues[i].substring(selectedValues[i].indexOf('[') + 1, selectedValues[i].indexOf(']')); String[] rangeValues = StringUtils.split(rangeValue, ':'); if (rangeValues[1].equals("null")) { rangeValues[1] = "*"; } selectedValues[i] = solrKey + ":[" + rangeValues[0] + " TO " + rangeValues[1] + "]"; } else { selectedValues[i] = solrKey + ":\"" + selectedValues[i] + "\""; } } String valueString = StringUtils.join(selectedValues, " OR "); StringBuilder sb = new StringBuilder(); sb.append(solrTag).append("(").append(valueString).append(")"); query.addFilterQuery(sb.toString()); } } } /** * Notifies solr about which facets you want it to determine results and counts for * * @param query * @param namedFacetMap */ protected void attachFacets(SolrQuery query, Map<String, SearchFacetDTO> namedFacetMap) { query.setFacet(true); for (Entry<String, SearchFacetDTO> entry : namedFacetMap.entrySet()) { SearchFacetDTO dto = entry.getValue(); String facetTagField = entry.getValue().isActive() ? shs.getGlobalFacetTagField() : entry.getKey(); // Clone the list - we don't want to remove these facets from the DB List<SearchFacetRange> facetRanges = new ArrayList<SearchFacetRange>(dto.getFacet().getSearchFacetRanges()); if (extensionManager != null) { extensionManager.getProxy().filterSearchFacetRanges(dto, facetRanges); } if (facetRanges != null && facetRanges.size() > 0) { for (SearchFacetRange range : facetRanges) { query.addFacetQuery(getSolrTaggedFieldString(entry.getKey(), facetTagField, "ex", range)); } } else { query.addFacetField(getSolrTaggedFieldString(entry.getKey(), facetTagField, "ex", null)); } } } /** * Builds out the DTOs for facet results from the search. This will then be used by the view layer to * display which values are available given the current constraints as well as the count of the values. * * @param namedFacetMap * @param response */ protected void setFacetResults(Map<String, SearchFacetDTO> namedFacetMap, QueryResponse response) { if (response.getFacetFields() != null) { for (FacetField facet : response.getFacetFields()) { String facetFieldName = facet.getName(); SearchFacetDTO facetDTO = namedFacetMap.get(facetFieldName); for (Count value : facet.getValues()) { SearchFacetResultDTO resultDTO = new SearchFacetResultDTO(); resultDTO.setFacet(facetDTO.getFacet()); resultDTO.setQuantity(new Long(value.getCount()).intValue()); resultDTO.setValue(value.getName()); facetDTO.getFacetValues().add(resultDTO); } } } if (response.getFacetQuery() != null) { for (Entry<String, Integer> entry : response.getFacetQuery().entrySet()) { String key = entry.getKey(); String facetFieldName = key.substring(key.indexOf("}") + 1, key.indexOf(':')); SearchFacetDTO facetDTO = namedFacetMap.get(facetFieldName); String minValue = key.substring(key.indexOf("[") + 1, key.indexOf(" TO")); String maxValue = key.substring(key.indexOf(" TO ") + 4, key.indexOf("]")); if (maxValue.equals("*")) { maxValue = null; } SearchFacetResultDTO resultDTO = new SearchFacetResultDTO(); resultDTO.setFacet(facetDTO.getFacet()); resultDTO.setQuantity(entry.getValue()); resultDTO.setMinValue(new BigDecimal(minValue)); resultDTO.setMaxValue(maxValue == null ? null : new BigDecimal(maxValue)); facetDTO.getFacetValues().add(resultDTO); } } } /** * Invoked to sort the facet results. This method will use the natural sorting of the value attribute of the * facet (or, if value is null, the minValue of the facet result). Override this method to customize facet * sorting for your given needs. * * @param namedFacetMap */ protected void sortFacetResults(Map<String, SearchFacetDTO> namedFacetMap) { for (Entry<String, SearchFacetDTO> entry : namedFacetMap.entrySet()) { Collections.sort(entry.getValue().getFacetValues(), new Comparator<SearchFacetResultDTO>() { public int compare(SearchFacetResultDTO o1, SearchFacetResultDTO o2) { if (o1.getValue() != null && o2.getValue() != null) { return o1.getValue().compareTo(o2.getValue()); } else if (o1.getMinValue() != null && o2.getMinValue() != null) { return o1.getMinValue().compareTo(o2.getMinValue()); } return 0; // Don't know how to compare } }); } } /** * Sets the total results, the current page, and the page size on the ProductSearchResult. Total results comes * from solr, while page and page size are duplicates of the searchCriteria conditions for ease of use. * * @param result * @param response * @param searchCriteria */ public void setPagingAttributes(ProductSearchResult result, QueryResponse response, ProductSearchCriteria searchCriteria) { result.setTotalResults(new Long(response.getResults().getNumFound()).intValue()); result.setPage(searchCriteria.getPage()); result.setPageSize(searchCriteria.getPageSize()); } /** * Given a list of product IDs from solr, this method will look up the IDs via the productDao and build out * actual Product instances. It will return a Products that is sorted by the order of the IDs in the passed * in list. * * @param response * @return the actual Product instances as a result of the search */ protected List<Product> getProducts(QueryResponse response) { final List<Long> productIds = new ArrayList<Long>(); SolrDocumentList docs = response.getResults(); for (SolrDocument doc : docs) { productIds.add((Long) doc.getFieldValue(shs.getProductIdFieldName())); } List<Product> products = productDao.readProductsByIds(productIds); // We have to sort the products list by the order of the productIds list to maintain sortability in the UI if (products != null) { Collections.sort(products, new Comparator<Product>() { public int compare(Product o1, Product o2) { return new Integer(productIds.indexOf(o1.getId())).compareTo(productIds.indexOf(o2.getId())); } }); } return products; } /** * Create the wrapper DTO around the SearchFacet * * @param searchFacets * @return the wrapper DTO */ protected List<SearchFacetDTO> buildSearchFacetDTOs(List<SearchFacet> searchFacets) { List<SearchFacetDTO> facets = new ArrayList<SearchFacetDTO>(); Map<String, String[]> requestParameters = BroadleafRequestContext.getRequestParameterMap(); for (SearchFacet facet : searchFacets) { if (facetIsAvailable(facet, requestParameters)) { SearchFacetDTO dto = new SearchFacetDTO(); dto.setFacet(facet); dto.setShowQuantity(true); facets.add(dto); } } return facets; } /** * Checks to see if the requiredFacets condition for a given facet is met. * * @param facet * @param request * @return whether or not the facet parameter is available */ protected boolean facetIsAvailable(SearchFacet facet, Map<String, String[]> params) { // Facets are available by default if they have no requiredFacets if (CollectionUtils.isEmpty(facet.getRequiredFacets())) { return true; } // If we have at least one required facet but no active facets, it's impossible for this facet to be available if (MapUtils.isEmpty(params)) { return false; } // We must either match all or just one of the required facets depending on the requiresAllDependentFacets flag int requiredMatches = facet.getRequiresAllDependentFacets() ? facet.getRequiredFacets().size() : 1; int matchesSoFar = 0; for (RequiredFacet requiredFacet : facet.getRequiredFacets()) { if (requiredMatches == matchesSoFar) { return true; } // Check to see if the required facet has a value in the current request parameters for (Entry<String, String[]> entry : params.entrySet()) { String key = entry.getKey(); if (key.equals(requiredFacet.getRequiredFacet().getField().getAbbreviation())) { matchesSoFar++; break; } } } return requiredMatches == matchesSoFar; } /** * Perform any necessary query sanitation here. For example, we disallow open and close parentheses, colons, and we also * ensure that quotes are actual quotes (") and not the URL encoding (&quot;) so that Solr is able to properly handle * the user's intent. * * @param query * @return the sanitized query */ protected String sanitizeQuery(String query) { return query.replace("(", "").replace("%28", "") .replace(")", "").replace("%29", "") .replace(":", "").replace("%3A", "").replace("%3a", "") .replace("&quot;", "\""); // Allow quotes in the query for more finely tuned matches } /** * Returns a field string. Given indexField = a and a non-null range, would produce the following String: * a:[minVal TO maxVal] */ protected String getSolrFieldString(String indexField, SearchFacetRange range) { StringBuilder sb = new StringBuilder(); sb.append(indexField); if (range != null) { String minValue = range.getMinValue().toPlainString(); String maxValue = range.getMaxValue() == null ? "*" : range.getMaxValue().toPlainString(); sb.append(":[").append(minValue).append(" TO ").append(maxValue).append("]"); } return sb.toString(); } /** * Returns a fully composed solr field string. Given indexField = a, tag = ex, and a non-null range, * would produce the following String: {!ex=a}a:[minVal TO maxVal] */ protected String getSolrTaggedFieldString(String indexField, String tagField, String tag, SearchFacetRange range) { return getSolrFieldTag(tagField, tag) + getSolrFieldString(indexField, range); } /** * Returns a solr field tag. Given indexField = a, tag = ex, would produce the following String: * {!ex=a} */ protected String getSolrFieldTag(String tagField, String tag) { StringBuilder sb = new StringBuilder(); if (StringUtils.isNotBlank(tag)) { sb.append("{!").append(tag).append("=").append(tagField).append("}"); } return sb.toString(); } /** * @param facets * @param searchCriteria * @return a map of fully qualified solr index field key to the searchFacetDTO object */ protected Map<String, SearchFacetDTO> getNamedFacetMap(List<SearchFacetDTO> facets, final ProductSearchCriteria searchCriteria) { return BLCMapUtils.keyedMap(facets, new TypedClosure<String, SearchFacetDTO>() { public String getKey(SearchFacetDTO facet) { return getSolrFieldKey(facet.getFacet().getField(), searchCriteria); } }); } /** * This method will be used to map a field abbreviation to the appropriate solr index field to use. Typically, * this default implementation that maps to the facet field type will be sufficient. However, there may be * cases where you would want to use a different solr index depending on other currently active facets. In that * case, you would associate that mapping here. For example, for the "price" abbreviation, we would generally * want to use "defaultSku.retailPrice_td". However, if a secondary facet on item condition is selected (such * as "refurbished", we may want to index "price" to "refurbishedSku.retailPrice_td". That mapping occurs here. * * @param fields * @param searchCriteria the searchCriteria in case it is needed to determine the field key * @return the solr field index key to use */ protected String getSolrFieldKey(Field field, ProductSearchCriteria searchCriteria) { return shs.getPropertyNameForFieldFacet(field); } /** * @param searchCriteria * @return a map of abbreviated key to fully qualified solr index field key for all product fields */ protected Map<String, String> getSolrFieldKeyMap(ProductSearchCriteria searchCriteria) { List<Field> fields = fieldDao.readAllProductFields(); Map<String, String> solrFieldKeyMap = new HashMap<String, String>(); for (Field field : fields) { solrFieldKeyMap.put(field.getAbbreviation(), getSolrFieldKey(field, searchCriteria)); } return solrFieldKeyMap; } }
1no label
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_service_solr_SolrSearchServiceImpl.java
407
EventHandler<PortableItemEvent> eventHandler = new EventHandler<PortableItemEvent>() { public void handle(PortableItemEvent portableItemEvent) { E item = includeValue ? (E) getContext().getSerializationService().toObject(portableItemEvent.getItem()) : null; Member member = getContext().getClusterService().getMember(portableItemEvent.getUuid()); ItemEvent<E> itemEvent = new ItemEvent<E>(getName(), portableItemEvent.getEventType(), item, member); if (portableItemEvent.getEventType() == ItemEventType.ADDED) { listener.itemAdded(itemEvent); } else { listener.itemRemoved(itemEvent); } } @Override public void onListenerRegister() { } };
1no label
hazelcast-client_src_main_java_com_hazelcast_client_proxy_AbstractClientCollectionProxy.java
1,104
public class OSQLFunctionMap extends OSQLFunctionMultiValueAbstract<Map<Object, Object>> { public static final String NAME = "map"; public OSQLFunctionMap() { super(NAME, 1, -1); } @SuppressWarnings("unchecked") public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) { if (iParameters.length > 2) // IN LINE MODE context = new HashMap<Object, Object>(); if (iParameters.length == 1) { if (iParameters[0] instanceof Map<?, ?>) // INSERT EVERY SINGLE COLLECTION ITEM context.putAll((Map<Object, Object>) iParameters[0]); else throw new IllegalArgumentException("Map function: expected a map or pairs of parameters as key, value"); } else if (iParameters.length % 2 != 0) throw new IllegalArgumentException("Map function: expected a map or pairs of parameters as key, value"); else for (int i = 0; i < iParameters.length; i += 2) { final Object key = iParameters[i]; final Object value = iParameters[i + 1]; if (value != null) { if (iParameters.length <= 2 && context == null) // AGGREGATION MODE (STATEFULL) context = new HashMap<Object, Object>(); context.put(key, value); } } return prepareResult(context); } public String getSyntax() { return "Syntax error: map(<map>|[<key>,<value>]*)"; } public boolean aggregateResults(final Object[] configuredParameters) { return configuredParameters.length <= 2; } @Override public Map<Object, Object> getResult() { final Map<Object, Object> res = context; context = null; return prepareResult(res); } protected Map<Object, Object> prepareResult(Map<Object, Object> res) { if (returnDistributedResult()) { final Map<String, Object> doc = new HashMap<String, Object>(); doc.put("node", getDistributedStorageId()); doc.put("context", res); return Collections.<Object, Object> singletonMap("doc", doc); } else { return res; } } @SuppressWarnings("unchecked") @Override public Object mergeDistributedResult(List<Object> resultsToMerge) { final Map<Long, Map<Object, Object>> chunks = new HashMap<Long, Map<Object, Object>>(); for (Object iParameter : resultsToMerge) { final Map<String, Object> container = (Map<String, Object>) ((Map<Object, Object>) iParameter).get("doc"); chunks.put((Long) container.get("node"), (Map<Object, Object>) container.get("context")); } final Map<Object, Object> result = new HashMap<Object, Object>(); for (Map<Object, Object> chunk : chunks.values()) { result.putAll(chunk); } return result; } }
1no label
core_src_main_java_com_orientechnologies_orient_core_sql_functions_coll_OSQLFunctionMap.java
97
private static class TxLockElement { private final Transaction tx; // access to these is guarded by synchronized blocks private int readCount; private int writeCount; private boolean movedOn; TxLockElement( Transaction tx ) { this.tx = tx; } boolean isFree() { return readCount == 0 && writeCount == 0; } }
0true
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_RWLock.java
349
public class JPAPropertiesPersistenceUnitPostProcessor implements org.springframework.orm.jpa.persistenceunit.PersistenceUnitPostProcessor { protected Map<String, String> persistenceUnitProperties = new HashMap<String, String>(); protected Map<String, String> overrideProperties = new HashMap<String, String>(); @Value("${blPU.hibernate.hbm2ddl.auto}") protected String blPUHibernateHbm2ddlAuto; @Value("${blPU.hibernate.dialect}") protected String blPUHibernateDialect; @Value("${blPU.hibernate.show_sql}") protected String blPUHibernateShow_sql; @Value("${blPU.hibernate.cache.use_second_level_cache}") protected String blPUHibernateCacheUse_second_level_cache; @Value("${blPU.hibernate.cache.use_query_cache}") protected String blPUHibernateCacheUse_query_cache; @Value("${blPU.hibernate.hbm2ddl.import_files}") protected String blPUHibernateHbm2ddlImport_files; @Value("${blPU.hibernate.hbm2ddl.import_files_sql_extractor}") protected String blPUHibernateHbm2ddlImport_files_sql_extractor; @Value("${blCMSStorage.hibernate.hbm2ddl.auto}") protected String blCMSStorageHibernateHbm2ddlAuto; @Value("${blCMSStorage.hibernate.dialect}") protected String blCMSStorageHibernateDialect; @Value("${blCMSStorage.hibernate.show_sql}") protected String blCMSStorageHibernateShow_sql; @Value("${blCMSStorage.hibernate.cache.use_second_level_cache}") protected String blCMSStorageHibernateCacheUse_second_level_cache; @Value("${blCMSStorage.hibernate.cache.use_query_cache}") protected String blCMSStorageHibernateCacheUse_query_cache; @Value("${blCMSStorage.hibernate.hbm2ddl.import_files}") protected String blCMSStorageHibernateHbm2ddlImport_files; @Value("${blCMSStorage.hibernate.hbm2ddl.import_files_sql_extractor}") protected String blCMSStorageHibernateHbm2ddlImport_files_sql_extractor; @Value("${blSecurePU.hibernate.hbm2ddl.auto}") protected String blSecurePUHibernateHbm2ddlAuto; @Value("${blSecurePU.hibernate.dialect}") protected String blSecurePUHibernateDialect; @Value("${blSecurePU.hibernate.show_sql}") protected String blSecurePUHibernateShow_sql; @Value("${blSecurePU.hibernate.cache.use_second_level_cache}") protected String blSecurePUHibernateCacheUse_second_level_cache; @Value("${blSecurePU.hibernate.cache.use_query_cache}") protected String blSecurePUHibernateCacheUse_query_cache; @Value("${blSecurePU.hibernate.hbm2ddl.import_files}") protected String blSecurePUHibernateHbm2ddlImport_files; @Value("${blSecurePU.hibernate.hbm2ddl.import_files_sql_extractor}") protected String blSecurePUHibernateHbm2ddlImport_files_sql_extractor; @PostConstruct public void populatePresetProperties() { if (!blPUHibernateHbm2ddlAuto.startsWith("${")) persistenceUnitProperties.put("blPU.hibernate.hbm2ddl.auto", blPUHibernateHbm2ddlAuto); if (!blPUHibernateDialect.startsWith("${")) persistenceUnitProperties.put("blPU.hibernate.dialect", blPUHibernateDialect); if (!blPUHibernateShow_sql.startsWith("${")) persistenceUnitProperties.put("blPU.hibernate.show_sql", blPUHibernateShow_sql); if (!blPUHibernateCacheUse_second_level_cache.startsWith("${")) persistenceUnitProperties.put("blPU.hibernate.cache.use_second_level_cache", blPUHibernateCacheUse_second_level_cache); if (!blPUHibernateCacheUse_query_cache.startsWith("${")) persistenceUnitProperties.put("blPU.hibernate.cache.use_query_cache", blPUHibernateCacheUse_query_cache); if (!blPUHibernateHbm2ddlImport_files.startsWith("${")) persistenceUnitProperties.put("blPU.hibernate.hbm2ddl.import_files", blPUHibernateHbm2ddlImport_files); if (!blPUHibernateHbm2ddlImport_files_sql_extractor.startsWith("${")) persistenceUnitProperties.put("blPU.hibernate.hbm2ddl.import_files_sql_extractor", blPUHibernateHbm2ddlImport_files_sql_extractor); if (!blCMSStorageHibernateHbm2ddlAuto.startsWith("${")) persistenceUnitProperties.put("blCMSStorage.hibernate.hbm2ddl.auto", blCMSStorageHibernateHbm2ddlAuto); if (!blCMSStorageHibernateDialect.startsWith("${")) persistenceUnitProperties.put("blCMSStorage.hibernate.dialect", blCMSStorageHibernateDialect); if (!blCMSStorageHibernateShow_sql.startsWith("${")) persistenceUnitProperties.put("blCMSStorage.hibernate.show_sql", blCMSStorageHibernateShow_sql); if (!blCMSStorageHibernateCacheUse_second_level_cache.startsWith("${")) persistenceUnitProperties.put("blCMSStorage.hibernate.cache.use_second_level_cache", blCMSStorageHibernateCacheUse_second_level_cache); if (!blCMSStorageHibernateCacheUse_query_cache.startsWith("${")) persistenceUnitProperties.put("blCMSStorage.hibernate.cache.use_query_cache", blCMSStorageHibernateCacheUse_query_cache); if (!blCMSStorageHibernateHbm2ddlImport_files.startsWith("${")) persistenceUnitProperties.put("blCMSStorage.hibernate.hbm2ddl.import_files", blCMSStorageHibernateHbm2ddlImport_files); if (!blCMSStorageHibernateHbm2ddlImport_files_sql_extractor.startsWith("${")) persistenceUnitProperties.put("blCMSStorage.hibernate.hbm2ddl.import_files_sql_extractor", blCMSStorageHibernateHbm2ddlImport_files_sql_extractor); if (!blSecurePUHibernateHbm2ddlAuto.startsWith("${")) persistenceUnitProperties.put("blSecurePU.hibernate.hbm2ddl.auto", blSecurePUHibernateHbm2ddlAuto); if (!blSecurePUHibernateDialect.startsWith("${")) persistenceUnitProperties.put("blSecurePU.hibernate.dialect", blSecurePUHibernateDialect); if (!blSecurePUHibernateShow_sql.startsWith("${")) persistenceUnitProperties.put("blSecurePU.hibernate.show_sql", blSecurePUHibernateShow_sql); if (!blSecurePUHibernateCacheUse_second_level_cache.startsWith("${")) persistenceUnitProperties.put("blSecurePU.hibernate.cache.use_second_level_cache", blSecurePUHibernateCacheUse_second_level_cache); if (!blSecurePUHibernateCacheUse_query_cache.startsWith("${")) persistenceUnitProperties.put("blSecurePU.hibernate.cache.use_query_cache", blSecurePUHibernateCacheUse_query_cache); if (!blSecurePUHibernateHbm2ddlImport_files.startsWith("${")) persistenceUnitProperties.put("blSecurePU.hibernate.hbm2ddl.import_files", blSecurePUHibernateHbm2ddlImport_files); if (!blSecurePUHibernateHbm2ddlImport_files_sql_extractor.startsWith("${")) persistenceUnitProperties.put("blSecurePU.hibernate.hbm2ddl.import_files_sql_extractor", blSecurePUHibernateHbm2ddlImport_files_sql_extractor); persistenceUnitProperties.putAll(overrideProperties); } @Override public void postProcessPersistenceUnitInfo(MutablePersistenceUnitInfo pui) { if (persistenceUnitProperties != null) { String puName = pui.getPersistenceUnitName() + "."; Set<String> keys = persistenceUnitProperties.keySet(); Properties props = pui.getProperties(); for (String key : keys) { if (key.startsWith(puName)){ String value = persistenceUnitProperties.get(key); String newKey = key.substring(puName.length()); if ("null".equalsIgnoreCase(value)){ props.remove(newKey); } else if (value != null && ! "".equals(value)) { props.put(newKey, value); } } } pui.setProperties(props); } } public void setPersistenceUnitProperties(Map<String, String> properties) { this.overrideProperties = properties; } }
0true
common_src_main_java_org_broadleafcommerce_common_extensibility_jpa_JPAPropertiesPersistenceUnitPostProcessor.java
608
public class OIndexManagerRemote extends OIndexManagerAbstract { private static final String QUERY_DROP = "drop index %s"; public OIndexManagerRemote(final ODatabaseRecord iDatabase) { super(iDatabase); } protected OIndex<?> getRemoteIndexInstance(boolean isMultiValueIndex, String type, String name, Set<String> clustersToIndex, OIndexDefinition indexDefinition, ORID identity, ODocument configuration) { if (isMultiValueIndex) return new OIndexRemoteMultiValue(name, type, identity, indexDefinition, configuration, clustersToIndex); return new OIndexRemoteOneValue(name, type, identity, indexDefinition, configuration, clustersToIndex); } public OIndex<?> createIndex(final String iName, final String iType, final OIndexDefinition iIndexDefinition, final int[] iClusterIdsToIndex, final OProgressListener iProgressListener) { final String createIndexDDL; if (iIndexDefinition != null) { createIndexDDL = iIndexDefinition.toCreateIndexDDL(iName, iType); } else { createIndexDDL = new OSimpleKeyIndexDefinition().toCreateIndexDDL(iName, iType); } acquireExclusiveLock(); try { if (iProgressListener != null) { iProgressListener.onBegin(this, 0); } getDatabase().command(new OCommandSQL(createIndexDDL)).execute(); document.setIdentity(new ORecordId(document.getDatabase().getStorage().getConfiguration().indexMgrRecordId)); if (iProgressListener != null) { iProgressListener.onCompletition(this, true); } reload(); return preProcessBeforeReturn(indexes.get(iName.toLowerCase())); } finally { releaseExclusiveLock(); } } public OIndexManager dropIndex(final String iIndexName) { acquireExclusiveLock(); try { final String text = String.format(QUERY_DROP, iIndexName); getDatabase().command(new OCommandSQL(text)).execute(); // REMOVE THE INDEX LOCALLY indexes.remove(iIndexName.toLowerCase()); reload(); return this; } finally { releaseExclusiveLock(); } } @Override protected void fromStream() { acquireExclusiveLock(); try { clearMetadata(); final Collection<ODocument> idxs = document.field(CONFIG_INDEXES); if (idxs != null) { for (ODocument d : idxs) { try { OIndexInternal<?> newIndex = OIndexes.createIndex(getDatabase(), (String) d.field(OIndexInternal.CONFIG_TYPE), document.<String> field(OIndexInternal.ALGORITHM), document.<String> field(OIndexInternal.VALUE_CONTAINER_ALGORITHM)); OIndexInternal.IndexMetadata newIndexMetadata = newIndex.loadMetadata(d); addIndexInternal(getRemoteIndexInstance(newIndex instanceof OIndexMultiValues, newIndexMetadata.getType(), newIndexMetadata.getName(), newIndexMetadata.getClustersToIndex(), newIndexMetadata.getIndexDefinition(), (ORID) d.field(OIndexAbstract.CONFIG_MAP_RID, OType.LINK), d)); } catch (Exception e) { OLogManager.instance().error(this, "Error on loading of index by configuration: %s", e, d); } } } } finally { releaseExclusiveLock(); } } @Override public ODocument toStream() { throw new UnsupportedOperationException("Remote index cannot be streamed"); } @Override public void recreateIndexes() { throw new UnsupportedOperationException("recreateIndexes()"); } @Override public void waitTillIndexRestore() { } @Override public boolean autoRecreateIndexesAfterCrash() { return false; } }
1no label
core_src_main_java_com_orientechnologies_orient_core_index_OIndexManagerRemote.java
151
static final class Itr<E> implements ListIterator<E>, Enumeration<E> { final StampedLock lock; final ReadMostlyVector<E> list; Object[] items; long seq; int cursor; int fence; int lastRet; Itr(ReadMostlyVector<E> list, int index) { final StampedLock lock = list.lock; long stamp = lock.readLock(); try { this.list = list; this.lock = lock; this.items = list.array; this.fence = list.count; this.cursor = index; this.lastRet = -1; } finally { this.seq = lock.tryConvertToOptimisticRead(stamp); } if (index < 0 || index > fence) throw new ArrayIndexOutOfBoundsException(index); } public boolean hasPrevious() { return cursor > 0; } public int nextIndex() { return cursor; } public int previousIndex() { return cursor - 1; } public boolean hasNext() { return cursor < fence; } public E next() { int i = cursor; Object[] es = items; if (es == null || i < 0 || i >= fence || i >= es.length) throw new NoSuchElementException(); @SuppressWarnings("unchecked") E e = (E)es[i]; lastRet = i; cursor = i + 1; if (!lock.validate(seq)) throw new ConcurrentModificationException(); return e; } public E previous() { int i = cursor - 1; Object[] es = items; if (es == null || i < 0 || i >= fence || i >= es.length) throw new NoSuchElementException(); @SuppressWarnings("unchecked") E e = (E)es[i]; lastRet = i; cursor = i; if (!lock.validate(seq)) throw new ConcurrentModificationException(); return e; } public void remove() { int i = lastRet; if (i < 0) throw new IllegalStateException(); if ((seq = lock.tryConvertToWriteLock(seq)) == 0) throw new ConcurrentModificationException(); try { list.rawRemoveAt(i); fence = list.count; cursor = i; lastRet = -1; } finally { seq = lock.tryConvertToOptimisticRead(seq); } } public void set(E e) { int i = lastRet; Object[] es = items; if (es == null || i < 0 | i >= fence) throw new IllegalStateException(); if ((seq = lock.tryConvertToWriteLock(seq)) == 0) throw new ConcurrentModificationException(); try { es[i] = e; } finally { seq = lock.tryConvertToOptimisticRead(seq); } } public void add(E e) { int i = cursor; if (i < 0) throw new IllegalStateException(); if ((seq = lock.tryConvertToWriteLock(seq)) == 0) throw new ConcurrentModificationException(); try { list.rawAddAt(i, e); items = list.array; fence = list.count; cursor = i + 1; lastRet = -1; } finally { seq = lock.tryConvertToOptimisticRead(seq); } } public boolean hasMoreElements() { return hasNext(); } public E nextElement() { return next(); } }
0true
src_main_java_jsr166e_extra_ReadMostlyVector.java
309
static final class Fields { static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name"); static final XContentBuilderString STATUS = new XContentBuilderString("status"); static final XContentBuilderString TIMED_OUT = new XContentBuilderString("timed_out"); static final XContentBuilderString NUMBER_OF_NODES = new XContentBuilderString("number_of_nodes"); static final XContentBuilderString NUMBER_OF_DATA_NODES = new XContentBuilderString("number_of_data_nodes"); static final XContentBuilderString ACTIVE_PRIMARY_SHARDS = new XContentBuilderString("active_primary_shards"); static final XContentBuilderString ACTIVE_SHARDS = new XContentBuilderString("active_shards"); static final XContentBuilderString RELOCATING_SHARDS = new XContentBuilderString("relocating_shards"); static final XContentBuilderString INITIALIZING_SHARDS = new XContentBuilderString("initializing_shards"); static final XContentBuilderString UNASSIGNED_SHARDS = new XContentBuilderString("unassigned_shards"); static final XContentBuilderString VALIDATION_FAILURES = new XContentBuilderString("validation_failures"); static final XContentBuilderString INDICES = new XContentBuilderString("indices"); }
0true
src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterHealthResponse.java
31
static final class ThenCombine<T,U,V> extends Completion { final CompletableFuture<? extends T> src; final CompletableFuture<? extends U> snd; final BiFun<? super T,? super U,? extends V> fn; final CompletableFuture<V> dst; final Executor executor; ThenCombine(CompletableFuture<? extends T> src, CompletableFuture<? extends U> snd, BiFun<? super T,? super U,? extends V> fn, CompletableFuture<V> dst, Executor executor) { this.src = src; this.snd = snd; this.fn = fn; this.dst = dst; this.executor = executor; } public final void run() { final CompletableFuture<? extends T> a; final CompletableFuture<? extends U> b; final BiFun<? super T,? super U,? extends V> fn; final CompletableFuture<V> dst; Object r, s; T t; U u; Throwable ex; if ((dst = this.dst) != null && (fn = this.fn) != null && (a = this.src) != null && (r = a.result) != null && (b = this.snd) != null && (s = b.result) != null && compareAndSet(0, 1)) { if (r instanceof AltResult) { ex = ((AltResult)r).ex; t = null; } else { ex = null; @SuppressWarnings("unchecked") T tr = (T) r; t = tr; } if (ex != null) u = null; else if (s instanceof AltResult) { ex = ((AltResult)s).ex; u = null; } else { @SuppressWarnings("unchecked") U us = (U) s; u = us; } Executor e = executor; V v = null; if (ex == null) { try { if (e != null) e.execute(new AsyncCombine<T,U,V>(t, u, fn, dst)); else v = fn.apply(t, u); } catch (Throwable rex) { ex = rex; } } if (e == null || ex != null) dst.internalComplete(v, ex); } } private static final long serialVersionUID = 5232453952276885070L; }
0true
src_main_java_jsr166e_CompletableFuture.java
2,079
public class PartitionWideEntryBackupOperation extends AbstractMapOperation implements BackupOperation, PartitionAwareOperation { EntryBackupProcessor entryProcessor; public PartitionWideEntryBackupOperation(String name, EntryBackupProcessor entryProcessor) { super(name); this.entryProcessor = entryProcessor; } public PartitionWideEntryBackupOperation() { } public void run() { Map.Entry entry; RecordStore recordStore = mapService.getRecordStore(getPartitionId(), name); Map<Data, Record> records = recordStore.getReadonlyRecordMap(); for (Map.Entry<Data, Record> recordEntry : records.entrySet()) { Data dataKey = recordEntry.getKey(); Record record = recordEntry.getValue(); Object objectKey = mapService.toObject(record.getKey()); Object valueBeforeProcess = mapService.toObject(record.getValue()); if (getPredicate() != null) { QueryEntry queryEntry = new QueryEntry(getNodeEngine().getSerializationService(), dataKey, objectKey, valueBeforeProcess); if (!getPredicate().apply(queryEntry)) { continue; } } entry = new AbstractMap.SimpleEntry(objectKey, valueBeforeProcess); entryProcessor.processBackup(entry); if (entry.getValue() == null){ recordStore.removeBackup(dataKey); } else { recordStore.putBackup(dataKey, entry.getValue()); } } } @Override public boolean returnsResponse() { return true; } protected Predicate getPredicate() { return null; } @Override protected void readInternal(ObjectDataInput in) throws IOException { super.readInternal(in); entryProcessor = in.readObject(); } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { super.writeInternal(out); out.writeObject(entryProcessor); } @Override public Object getResponse() { return true; } @Override public String toString() { return "PartitionWideEntryBackupOperation{}"; } }
1no label
hazelcast_src_main_java_com_hazelcast_map_operation_PartitionWideEntryBackupOperation.java
22
@Controller("blAdminOfferController") @RequestMapping("/" + AdminOfferController.SECTION_KEY) public class AdminOfferController extends AdminBasicEntityController { protected static final String SECTION_KEY = "offer"; @Override protected String getSectionKey(Map<String, String> pathVars) { //allow external links to work for ToOne items if (super.getSectionKey(pathVars) != null) { return super.getSectionKey(pathVars); } return SECTION_KEY; } @Override @RequestMapping(value = "/{id}", method = RequestMethod.GET) public String viewEntityForm(HttpServletRequest request, HttpServletResponse response, Model model, @PathVariable Map<String, String> pathVars, @PathVariable(value="id") String id) throws Exception { String view = super.viewEntityForm(request, response, model, pathVars, id); modifyModelAttributes(model); return view; } @Override @RequestMapping(value = "/add", method = RequestMethod.GET) public String viewAddEntityForm(HttpServletRequest request, HttpServletResponse response, Model model, @PathVariable Map<String, String> pathVars, @RequestParam(defaultValue = "") String entityType) throws Exception { String view = super.viewAddEntityForm(request, response, model, pathVars, entityType); modifyModelAttributes(model); return view; } @Override @RequestMapping(value = "/add", method = RequestMethod.POST) public String addEntity(HttpServletRequest request, HttpServletResponse response, Model model, @PathVariable Map<String, String> pathVars, @ModelAttribute(value="entityForm") EntityForm entityForm, BindingResult result) throws Exception { String view = super.addEntity(request, response, model, pathVars, entityForm, result); if (result.hasErrors()) { modifyModelAttributes(model); } return view; } /** * Offer field visibility is dependent on other fields in the entity. Mark the form with the appropriate class * so that the Javascript will know to handle this form. * * We also want to tell the UI to make item target criteria required. We cannot manage this at the entity level via an * @AdminPresentation annotation as it is only required when the offer type has a type of {@link OfferType#ORDER_ITEM}. */ protected void modifyModelAttributes(Model model) { model.addAttribute("additionalControllerClasses", "offer-form"); EntityForm form = (EntityForm) model.asMap().get("entityForm"); form.findField("targetItemCriteria").setRequired(true); } }
0true
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_controller_entity_AdminOfferController.java
121
public class ClientReAuthOperation extends AbstractOperation implements UrgentSystemOperation { private String clientUuid; private boolean firstConnection; public ClientReAuthOperation() { } public ClientReAuthOperation(String clientUuid, boolean firstConnection) { this.clientUuid = clientUuid; this.firstConnection = firstConnection; } public void run() throws Exception { ClientEngineImpl service = getService(); Set<ClientEndpoint> endpoints = service.getEndpoints(clientUuid); for (ClientEndpoint endpoint : endpoints) { ClientPrincipal principal = new ClientPrincipal(clientUuid, getCallerUuid()); endpoint.authenticated(principal, firstConnection); } } @Override public boolean returnsResponse() { return false; } @Override public Object getResponse() { return Boolean.TRUE; } @Override public String getServiceName() { return ClientEngineImpl.SERVICE_NAME; } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { super.writeInternal(out); out.writeUTF(clientUuid); out.writeBoolean(firstConnection); } @Override protected void readInternal(ObjectDataInput in) throws IOException { super.readInternal(in); clientUuid = in.readUTF(); firstConnection = in.readBoolean(); } }
1no label
hazelcast_src_main_java_com_hazelcast_client_ClientReAuthOperation.java
558
public final class ClientCancellableDelegatingFuture<V> extends DelegatingFuture<V> { private final ClientContext context; private final String uuid; private final Address target; private final int partitionId; private volatile boolean cancelled; public ClientCancellableDelegatingFuture(ICompletableFuture future, ClientContext context, String uuid, Address target, int partitionId) { super(future, context.getSerializationService()); this.context = context; this.uuid = uuid; this.target = target; this.partitionId = partitionId; } public ClientCancellableDelegatingFuture(ICompletableFuture future, ClientContext context, String uuid, Address target, int partitionId, V defaultValue) { super(future, context.getSerializationService(), defaultValue); this.context = context; this.uuid = uuid; this.target = target; this.partitionId = partitionId; } @Override public boolean cancel(boolean mayInterruptIfRunning) { if (isDone() || cancelled) { return false; } final Future f = invokeCancelRequest(mayInterruptIfRunning); try { final Boolean b = context.getSerializationService().toObject(f.get()); if (b != null && b) { setError(new CancellationException()); cancelled = true; return true; } return false; } catch (Exception e) { throw rethrow(e); } finally { setDone(); } } private Future invokeCancelRequest(boolean mayInterruptIfRunning) { CancellationRequest request; Address address = target; if (target != null) { request = new CancellationRequest(uuid, target, mayInterruptIfRunning); } else { final ClientPartitionService partitionService = context.getPartitionService(); address = partitionService.getPartitionOwner(partitionId); request = new CancellationRequest(uuid, partitionId, mayInterruptIfRunning); } try { return context.getInvocationService().invokeOnTarget(request, address); } catch (Exception e) { throw rethrow(e); } } @Override public boolean isCancelled() { return cancelled; } }
0true
hazelcast-client_src_main_java_com_hazelcast_client_util_ClientCancellableDelegatingFuture.java
136
public class NoSuchLogVersionException extends MissingLogDataException { private long version; public NoSuchLogVersionException( long version ) { super( "No such log version: '" + version + "'. This means we encountered a log file that we expected " + "to find was missing. If you are unable to start the database due to this problem, please make " + "sure that the correct logical log files are in the database directory." ); this.version = version; } public long getVersion() { return version; } }
0true
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_NoSuchLogVersionException.java
3,371
final class OperationThread extends Thread { private final int threadId; private final boolean isPartitionSpecific; private final BlockingQueue workQueue; private final Queue priorityWorkQueue; public OperationThread(String name, boolean isPartitionSpecific, int threadId, BlockingQueue workQueue, Queue priorityWorkQueue) { super(node.threadGroup, name); setContextClassLoader(node.getConfigClassLoader()); this.isPartitionSpecific = isPartitionSpecific; this.workQueue = workQueue; this.priorityWorkQueue = priorityWorkQueue; this.threadId = threadId; } @Override public void run() { try { doRun(); } catch (OutOfMemoryError e) { onOutOfMemory(e); } catch (Throwable t) { logger.severe(t); } } private void doRun() { for (; ; ) { Object task; try { task = workQueue.take(); } catch (InterruptedException e) { if (shutdown) { return; } continue; } if (shutdown) { return; } processPriorityMessages(); process(task); } } private void process(Object task) { try { processor.process(task); } catch (Exception e) { logger.severe("Failed to process task: " + task + " on partitionThread:" + getName()); } } private void processPriorityMessages() { for (; ; ) { Object task = priorityWorkQueue.poll(); if (task == null) { return; } process(task); } } public void awaitTermination(int timeout, TimeUnit unit) throws InterruptedException { join(unit.toMillis(timeout)); } }
1no label
hazelcast_src_main_java_com_hazelcast_spi_impl_BasicOperationScheduler.java
893
private class AsyncAction { private final SearchScrollRequest request; private final ActionListener<SearchResponse> listener; private final ParsedScrollId scrollId; private final DiscoveryNodes nodes; private volatile AtomicArray<ShardSearchFailure> shardFailures; private final AtomicArray<QueryFetchSearchResult> queryFetchResults; private final AtomicInteger successfulOps; private final AtomicInteger counter; private final long startTime = System.currentTimeMillis(); private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) { this.request = request; this.listener = listener; this.scrollId = scrollId; this.nodes = clusterService.state().nodes(); this.successfulOps = new AtomicInteger(scrollId.getContext().length); this.counter = new AtomicInteger(scrollId.getContext().length); this.queryFetchResults = new AtomicArray<QueryFetchSearchResult>(scrollId.getContext().length); } protected final ShardSearchFailure[] buildShardFailures() { if (shardFailures == null) { return ShardSearchFailure.EMPTY_ARRAY; } List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList(); ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()]; for (int i = 0; i < failures.length; i++) { failures[i] = entries.get(i).value; } return failures; } // we do our best to return the shard failures, but its ok if its not fully concurrently safe // we simply try and return as much as possible protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) { if (shardFailures == null) { shardFailures = new AtomicArray<ShardSearchFailure>(scrollId.getContext().length); } shardFailures.set(shardIndex, failure); } public void start() { if (scrollId.getContext().length == 0) { final InternalSearchResponse internalResponse = new InternalSearchResponse(new InternalSearchHits(InternalSearchHits.EMPTY, Long.parseLong(this.scrollId.getAttributes().get("total_hits")), 0.0f), null, null, null, false); listener.onResponse(new SearchResponse(internalResponse, request.scrollId(), 0, 0, 0l, buildShardFailures())); return; } int localOperations = 0; Tuple<String, Long>[] context = scrollId.getContext(); for (int i = 0; i < context.length; i++) { Tuple<String, Long> target = context[i]; DiscoveryNode node = nodes.get(target.v1()); if (node != null) { if (nodes.localNodeId().equals(node.id())) { localOperations++; } else { executePhase(i, node, target.v2()); } } else { if (logger.isDebugEnabled()) { logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]"); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } } } if (localOperations > 0) { if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { Tuple<String, Long>[] context1 = scrollId.getContext(); for (int i = 0; i < context1.length; i++) { Tuple<String, Long> target = context1[i]; DiscoveryNode node = nodes.get(target.v1()); if (node != null && nodes.localNodeId().equals(node.id())) { executePhase(i, node, target.v2()); } } } }); } else { boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; Tuple<String, Long>[] context1 = scrollId.getContext(); for (int i = 0; i < context1.length; i++) { final Tuple<String, Long> target = context1[i]; final int shardIndex = i; final DiscoveryNode node = nodes.get(target.v1()); if (node != null && nodes.localNodeId().equals(node.id())) { try { if (localAsync) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { executePhase(shardIndex, node, target.v2()); } }); } else { executePhase(shardIndex, node, target.v2()); } } catch (Throwable t) { onPhaseFailure(t, target.v2(), shardIndex); } } } } } for (Tuple<String, Long> target : scrollId.getContext()) { DiscoveryNode node = nodes.get(target.v1()); if (node == null) { if (logger.isDebugEnabled()) { logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]"); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } } else { } } } void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) { searchService.sendExecuteScan(node, internalScrollSearchRequest(searchId, request), new SearchServiceListener<QueryFetchSearchResult>() { @Override public void onResult(QueryFetchSearchResult result) { queryFetchResults.set(shardIndex, result); if (counter.decrementAndGet() == 0) { finishHim(); } } @Override public void onFailure(Throwable t) { onPhaseFailure(t, searchId, shardIndex); } }); } void onPhaseFailure(Throwable t, long searchId, int shardIndex) { if (logger.isDebugEnabled()) { logger.debug("[{}] Failed to execute query phase", t, searchId); } addShardFailure(shardIndex, new ShardSearchFailure(t)); successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } } private void finishHim() { try { innerFinishHim(); } catch (Throwable e) { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } listener.onFailure(failure); } } private void innerFinishHim() throws IOException { int numberOfHits = 0; for (AtomicArray.Entry<QueryFetchSearchResult> entry : queryFetchResults.asList()) { numberOfHits += entry.value.queryResult().topDocs().scoreDocs.length; } ScoreDoc[] docs = new ScoreDoc[numberOfHits]; int counter = 0; for (AtomicArray.Entry<QueryFetchSearchResult> entry : queryFetchResults.asList()) { ScoreDoc[] scoreDocs = entry.value.queryResult().topDocs().scoreDocs; for (ScoreDoc scoreDoc : scoreDocs) { scoreDoc.shardIndex = entry.index; docs[counter++] = scoreDoc; } } final InternalSearchResponse internalResponse = searchPhaseController.merge(docs, queryFetchResults, queryFetchResults); ((InternalSearchHits) internalResponse.hits()).totalHits = Long.parseLong(this.scrollId.getAttributes().get("total_hits")); for (AtomicArray.Entry<QueryFetchSearchResult> entry : queryFetchResults.asList()) { if (entry.value.queryResult().topDocs().scoreDocs.length < entry.value.queryResult().size()) { // we found more than we want for this round, remove this from our scrolling queryFetchResults.set(entry.index, null); } } String scrollId = null; if (request.scroll() != null) { // we rebuild the scroll id since we remove shards that we finished scrolling on scrollId = TransportSearchHelper.buildScrollId(this.scrollId.getType(), queryFetchResults, this.scrollId.getAttributes()); // continue moving the total_hits } listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(), System.currentTimeMillis() - startTime, buildShardFailures())); } }
0true
src_main_java_org_elasticsearch_action_search_type_TransportSearchScrollScanAction.java
4,070
public class ParentQuery extends Query { private final Query originalParentQuery; private final String parentType; private final Filter childrenFilter; private Query rewrittenParentQuery; private IndexReader rewriteIndexReader; public ParentQuery(Query parentQuery, String parentType, Filter childrenFilter) { this.originalParentQuery = parentQuery; this.parentType = parentType; this.childrenFilter = childrenFilter; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || obj.getClass() != this.getClass()) { return false; } ParentQuery that = (ParentQuery) obj; if (!originalParentQuery.equals(that.originalParentQuery)) { return false; } if (!parentType.equals(that.parentType)) { return false; } if (getBoost() != that.getBoost()) { return false; } return true; } @Override public int hashCode() { int result = originalParentQuery.hashCode(); result = 31 * result + parentType.hashCode(); result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @Override public String toString(String field) { StringBuilder sb = new StringBuilder(); sb.append("ParentQuery[").append(parentType).append("](") .append(originalParentQuery.toString(field)).append(')') .append(ToStringUtils.boost(getBoost())); return sb.toString(); } @Override // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { if (rewrittenParentQuery == null) { rewriteIndexReader = reader; rewrittenParentQuery = originalParentQuery.rewrite(reader); } return this; } @Override public void extractTerms(Set<Term> terms) { rewrittenParentQuery.extractTerms(terms); } @Override public Weight createWeight(IndexSearcher searcher) throws IOException { SearchContext searchContext = SearchContext.current(); searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves()); Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore = searchContext.cacheRecycler().objectFloatMap(-1); ParentUidCollector collector = new ParentUidCollector(uidToScore.v(), searchContext, parentType); final Query parentQuery; if (rewrittenParentQuery == null) { parentQuery = rewrittenParentQuery = searcher.rewrite(originalParentQuery); } else { assert rewriteIndexReader == searcher.getIndexReader(); parentQuery = rewrittenParentQuery; } IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); indexSearcher.search(parentQuery, collector); if (uidToScore.v().isEmpty()) { uidToScore.release(); return Queries.newMatchNoDocsQuery().createWeight(searcher); } ChildWeight childWeight = new ChildWeight(parentQuery.createWeight(searcher), childrenFilter, searchContext, uidToScore); searchContext.addReleasable(childWeight); return childWeight; } private static class ParentUidCollector extends NoopCollector { private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore; private final SearchContext searchContext; private final String parentType; private Scorer scorer; private IdReaderTypeCache typeCache; ParentUidCollector(ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, SearchContext searchContext, String parentType) { this.uidToScore = uidToScore; this.searchContext = searchContext; this.parentType = parentType; } @Override public void collect(int doc) throws IOException { if (typeCache == null) { return; } HashedBytesArray parentUid = typeCache.idByDoc(doc); uidToScore.put(parentUid, scorer.score()); } @Override public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } @Override public void setNextReader(AtomicReaderContext context) throws IOException { typeCache = searchContext.idCache().reader(context.reader()).type(parentType); } } private class ChildWeight extends Weight implements Releasable { private final Weight parentWeight; private final Filter childrenFilter; private final SearchContext searchContext; private final Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore; private ChildWeight(Weight parentWeight, Filter childrenFilter, SearchContext searchContext, Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore) { this.parentWeight = parentWeight; this.childrenFilter = new ApplyAcceptedDocsFilter(childrenFilter); this.searchContext = searchContext; this.uidToScore = uidToScore; } @Override public Explanation explain(AtomicReaderContext context, int doc) throws IOException { return new Explanation(getBoost(), "not implemented yet..."); } @Override public Query getQuery() { return ParentQuery.this; } @Override public float getValueForNormalization() throws IOException { float sum = parentWeight.getValueForNormalization(); sum *= getBoost() * getBoost(); return sum; } @Override public void normalize(float norm, float topLevelBoost) { } @Override public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException { DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, acceptDocs); if (DocIdSets.isEmpty(childrenDocSet)) { return null; } IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType); if (idTypeCache == null) { return null; } return new ChildScorer(this, uidToScore.v(), childrenDocSet.iterator(), idTypeCache); } @Override public boolean release() throws ElasticsearchException { Releasables.release(uidToScore); return true; } } private static class ChildScorer extends Scorer { private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore; private final DocIdSetIterator childrenIterator; private final IdReaderTypeCache typeCache; private int currentChildDoc = -1; private float currentScore; ChildScorer(Weight weight, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator childrenIterator, IdReaderTypeCache typeCache) { super(weight); this.uidToScore = uidToScore; this.childrenIterator = childrenIterator; this.typeCache = typeCache; } @Override public float score() throws IOException { return currentScore; } @Override public int freq() throws IOException { // We don't have the original child query hit info here... // But the freq of the children could be collector and returned here, but makes this Scorer more expensive. return 1; } @Override public int docID() { return currentChildDoc; } @Override public int nextDoc() throws IOException { while (true) { currentChildDoc = childrenIterator.nextDoc(); if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) { return currentChildDoc; } HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc); if (uid == null) { continue; } if (uidToScore.containsKey(uid)) { // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler) currentScore = uidToScore.lget(); return currentChildDoc; } } } @Override public int advance(int target) throws IOException { currentChildDoc = childrenIterator.advance(target); if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) { return currentChildDoc; } HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc); if (uid == null) { return nextDoc(); } if (uidToScore.containsKey(uid)) { // Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler) currentScore = uidToScore.lget(); return currentChildDoc; } else { return nextDoc(); } } @Override public long cost() { return childrenIterator.cost(); } } }
1no label
src_main_java_org_elasticsearch_index_search_child_ParentQuery.java
515
public abstract class TestThread extends Thread { private volatile Throwable error; protected final Random random = new Random(); public TestThread() { setName(getClass().getName() + "" + ID_GENERATOR.getAndIncrement()); } @Override public final void run() { try { startLatch.await(); doRun(); } catch (Throwable t) { if (stopOnError) { stopTest(); } t.printStackTrace(); this.error = t; } } public final void assertNoError() { assertNull(getName() + " encountered an error", error); } public abstract void doRun() throws Exception; }
0true
hazelcast-client_src_test_java_com_hazelcast_client_stress_StressTestSupport.java
50
@Component("blTimeDTOCustomPersistenceHandler") public class TimeDTOCustomPersistenceHandler extends CustomPersistenceHandlerAdapter { private static final Log LOG = LogFactory.getLog(TimeDTOCustomPersistenceHandler.class); @Override public Boolean canHandleFetch(PersistencePackage persistencePackage) { return canHandleInspect(persistencePackage); } @Override public Boolean canHandleAdd(PersistencePackage persistencePackage) { return canHandleInspect(persistencePackage); } @Override public Boolean canHandleRemove(PersistencePackage persistencePackage) { return canHandleInspect(persistencePackage); } @Override public Boolean canHandleUpdate(PersistencePackage persistencePackage) { return canHandleInspect(persistencePackage); } @Override public Boolean canHandleInspect(PersistencePackage persistencePackage) { String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname(); return TimeDTO.class.getName().equals(ceilingEntityFullyQualifiedClassname); } @Override public DynamicResultSet inspect(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, InspectHelper helper) throws ServiceException { String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname(); try { Map<MergedPropertyType, Map<String, FieldMetadata>> allMergedProperties = new HashMap<MergedPropertyType, Map<String, FieldMetadata>>(); Map<String, FieldMetadata> mergedProperties = dynamicEntityDao.getSimpleMergedProperties(ceilingEntityFullyQualifiedClassname, persistencePackage.getPersistencePerspective()); allMergedProperties.put(MergedPropertyType.PRIMARY, mergedProperties); ClassMetadata mergedMetadata = helper.getMergedClassMetadata(new Class<?>[]{Class.forName(ceilingEntityFullyQualifiedClassname)}, allMergedProperties); DynamicResultSet results = new DynamicResultSet(mergedMetadata); return results; } catch (Exception e) { ServiceException ex = new ServiceException("Unable to retrieve inspection results for " + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e); throw ex; } } }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_TimeDTOCustomPersistenceHandler.java
452
public class OIndexRIDContainer implements Set<OIdentifiable> { public static final String INDEX_FILE_EXTENSION = ".irs"; private final long fileId; private Set<OIdentifiable> underlying; private boolean isEmbedded; private int topThreshold = 80; private int bottomThreshold = 60; public OIndexRIDContainer(String name) { final OStorageLocalAbstract storage = (OStorageLocalAbstract) ODatabaseRecordThreadLocal.INSTANCE.get().getStorage() .getUnderlying(); try { fileId = storage.getDiskCache().openFile(name + INDEX_FILE_EXTENSION); } catch (IOException e) { throw new OSBTreeException("Error creation of sbtree with name" + name, e); } underlying = new HashSet<OIdentifiable>(); isEmbedded = true; } public OIndexRIDContainer(long fileId, Set<OIdentifiable> underlying) { this.fileId = fileId; this.underlying = underlying; isEmbedded = !(underlying instanceof OIndexRIDContainerSBTree); } public long getFileId() { return fileId; } @Override public int size() { return underlying.size(); } @Override public boolean isEmpty() { return underlying.isEmpty(); } @Override public boolean contains(Object o) { return underlying.contains(o); } @Override public Iterator<OIdentifiable> iterator() { return underlying.iterator(); } @Override public Object[] toArray() { return underlying.toArray(); } @Override public <T> T[] toArray(T[] a) { return underlying.toArray(a); } @Override public boolean add(OIdentifiable oIdentifiable) { final boolean res = underlying.add(oIdentifiable); checkTopThreshold(); return res; } @Override public boolean remove(Object o) { final boolean res = underlying.remove(o); checkBottomThreshold(); return res; } @Override public boolean containsAll(Collection<?> c) { return underlying.containsAll(c); } @Override public boolean addAll(Collection<? extends OIdentifiable> c) { final boolean res = underlying.addAll(c); checkTopThreshold(); return res; } @Override public boolean retainAll(Collection<?> c) { return underlying.retainAll(c); } @Override public boolean removeAll(Collection<?> c) { final boolean res = underlying.removeAll(c); checkBottomThreshold(); return res; } @Override public void clear() { if (isEmbedded) underlying.clear(); else { final OIndexRIDContainerSBTree tree = (OIndexRIDContainerSBTree) underlying; tree.delete(); underlying = new HashSet<OIdentifiable>(); isEmbedded = true; } } public boolean isEmbedded() { return isEmbedded; } public Set<OIdentifiable> getUnderlying() { return underlying; } private void checkTopThreshold() { if (isEmbedded && topThreshold < underlying.size()) convertToSbTree(); } private void checkBottomThreshold() { if (!isEmbedded && bottomThreshold > underlying.size()) convertToEmbedded(); } private void convertToEmbedded() { final OIndexRIDContainerSBTree tree = (OIndexRIDContainerSBTree) underlying; final Set<OIdentifiable> set = new HashSet<OIdentifiable>(tree); tree.delete(); underlying = set; isEmbedded = true; } private void convertToSbTree() { final OIndexRIDContainerSBTree tree = new OIndexRIDContainerSBTree(fileId); tree.addAll(underlying); underlying = tree; isEmbedded = false; } }
0true
core_src_main_java_com_orientechnologies_orient_core_db_record_ridset_sbtree_OIndexRIDContainer.java
106
{ @Override public Void doWork( State state ) { state.tx.success(); state.tx.finish(); return null; } } );
0true
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestManualAcquireLock.java
1,634
public class OHazelcastDistributedDatabase implements ODistributedDatabase { protected final OHazelcastPlugin manager; protected final OHazelcastDistributedMessageService msgService; protected final String databaseName; protected final static Map<String, IQueue<?>> queues = new HashMap<String, IQueue<?>>(); protected final Lock requestLock; protected volatile ODatabaseDocumentTx database; public static final String NODE_QUEUE_PREFIX = "orientdb.node."; public static final String NODE_QUEUE_REQUEST_POSTFIX = ".request"; public static final String NODE_QUEUE_UNDO_POSTFIX = ".undo"; private static final String NODE_LOCK_PREFIX = "orientdb.reqlock."; protected volatile Class<? extends OAbstractRemoteTask> waitForTaskType; protected AtomicBoolean status = new AtomicBoolean(false); protected Object waitForOnline = new Object(); public OHazelcastDistributedDatabase(final OHazelcastPlugin manager, final OHazelcastDistributedMessageService msgService, final String iDatabaseName) { this.manager = manager; this.msgService = msgService; this.databaseName = iDatabaseName; this.requestLock = manager.getHazelcastInstance().getLock(NODE_LOCK_PREFIX + iDatabaseName); long resyncEvery = manager.getDatabaseConfiguration(databaseName).getResyncEvery(); if (resyncEvery > 0) { resyncEvery *= 1000; // TRANSFORM IN SECONDS // CREATE A TIMER TASK TO RESYNCH Orient.instance().getTimer().schedule(new TimerTask() { @Override public void run() { resynch(); } }, resyncEvery, resyncEvery); } checkLocalNodeInConfiguration(); } @Override public void send2Node(final ODistributedRequest iRequest, final String iTargetNode) { final IQueue<ODistributedRequest> queue = msgService.getQueue(OHazelcastDistributedMessageService.getRequestQueueName( iTargetNode, iRequest.getDatabaseName())); try { queue.offer(iRequest, OGlobalConfiguration.DISTRIBUTED_QUEUE_TIMEOUT.getValueAsLong(), TimeUnit.MILLISECONDS); Orient .instance() .getProfiler() .updateCounter("distributed.replication." + databaseName + ".fixMsgSent", "Number of replication fix messages sent from current node", +1, "distributed.replication.fixMsgSent"); } catch (Throwable e) { throw new ODistributedException("Error on sending distributed request against " + iTargetNode, e); } } @Override public ODistributedResponse send(final ODistributedRequest iRequest) { final String databaseName = iRequest.getDatabaseName(); final String clusterName = iRequest.getClusterName(); final ODistributedConfiguration cfg = manager.getDatabaseConfiguration(databaseName); final ODistributedPartitioningStrategy strategy = manager.getPartitioningStrategy(cfg.getPartitionStrategy(clusterName)); final ODistributedPartition partition = strategy.getPartition(manager, databaseName, clusterName); final Set<String> nodes = partition.getNodes(); if (nodes.isEmpty()) { ODistributedServerLog.error(this, getLocalNodeName(), null, DIRECTION.OUT, "No nodes configured for partition '%s.%s' request: %s", databaseName, clusterName, iRequest); throw new ODistributedException("No nodes configured for partition '" + databaseName + "." + clusterName + "' request: " + iRequest); } final IQueue<ODistributedRequest>[] reqQueues = getRequestQueues(databaseName, nodes); int quorum = calculateQuorum(iRequest, clusterName, cfg, nodes); iRequest.setSenderNodeName(manager.getLocalNodeName()); int availableNodes = 0; for (String node : nodes) { if (manager.isNodeAvailable(node)) availableNodes++; else { if (ODistributedServerLog.isDebugEnabled()) ODistributedServerLog.debug(this, getLocalNodeName(), node, DIRECTION.OUT, "skip listening of response because node '%s' is not online", node); } } final int queueSize = nodes.size(); int expectedSynchronousResponses = quorum > 0 ? Math.min(quorum, availableNodes) : queueSize; final boolean waitLocalNode = nodes.contains(manager.getLocalNodeName()) && cfg.isReadYourWrites(clusterName); // CREATE THE RESPONSE MANAGER final ODistributedResponseManager currentResponseMgr = new ODistributedResponseManager(manager, iRequest, nodes, expectedSynchronousResponses, quorum, waitLocalNode, iRequest.getTask().getSynchronousTimeout(expectedSynchronousResponses), iRequest.getTask().getTotalTimeout(queueSize)); msgService.registerRequest(iRequest.getId(), currentResponseMgr); if (ODistributedServerLog.isDebugEnabled()) ODistributedServerLog.debug(this, getLocalNodeName(), nodes.toString(), DIRECTION.OUT, "request %s", iRequest.getTask()); final long timeout = OGlobalConfiguration.DISTRIBUTED_QUEUE_TIMEOUT.getValueAsLong(); try { requestLock.lock(); try { // LOCK = ASSURE MESSAGES IN THE QUEUE ARE INSERTED SEQUENTIALLY AT CLUSTER LEVEL // BROADCAST THE REQUEST TO ALL THE NODE QUEUES for (IQueue<ODistributedRequest> queue : reqQueues) { queue.offer(iRequest, timeout, TimeUnit.MILLISECONDS); } } finally { requestLock.unlock(); } Orient .instance() .getProfiler() .updateCounter("distributed.replication." + databaseName + ".msgSent", "Number of replication messages sent from current node", +1, "distributed.replication.*.msgSent"); return collectResponses(iRequest, currentResponseMgr); } catch (Throwable e) { throw new ODistributedException("Error on sending distributed request against " + databaseName + (clusterName != null ? ":" + clusterName : ""), e); } } protected void resynch() { final long startTimer = System.currentTimeMillis(); try { send(new OHazelcastDistributedRequest(manager.getLocalNodeName(), databaseName, null, new OResynchTask(), EXECUTION_MODE.RESPONSE)); } catch (ODistributedException e) { // HIDE EXCEPTION IF ANY ERROR ON QUORUM } Orient .instance() .getProfiler() .stopChrono("distributed.replication." + databaseName + ".resynch", "Synchronization time among all the nodes", startTimer, "distributed.replication.*.resynch"); } protected int calculateQuorum(final ODistributedRequest iRequest, final String clusterName, final ODistributedConfiguration cfg, final Set<String> nodes) { final QUORUM_TYPE quorumType = iRequest.getTask().getQuorumType(); final int queueSize = nodes.size(); int quorum = 0; switch (quorumType) { case NONE: quorum = 0; break; case READ: quorum = cfg.getReadQuorum(clusterName); break; case WRITE: quorum = cfg.getWriteQuorum(clusterName); break; case ALL: quorum = queueSize; break; } if (quorum > queueSize) { final boolean failureAvailableNodesLessQuorum = cfg.getFailureAvailableNodesLessQuorum(clusterName); if (failureAvailableNodesLessQuorum) throw new ODistributedException( "Quorum cannot be reached because it is major than available nodes and 'failureAvailableNodesLessQuorum' settings is true"); else { // SET THE QUORUM TO THE AVAILABLE NODE SIZE ODistributedServerLog.debug(this, getLocalNodeName(), nodes.toString(), DIRECTION.OUT, "quorum less then available nodes, downgrade quorum to %d", queueSize); quorum = queueSize; } } return quorum; } protected ODistributedResponse collectResponses(final ODistributedRequest iRequest, final ODistributedResponseManager currentResponseMgr) throws InterruptedException { if (iRequest.getExecutionMode() == EXECUTION_MODE.NO_RESPONSE) return null; final long beginTime = System.currentTimeMillis(); // WAIT FOR THE MINIMUM SYNCHRONOUS RESPONSES (WRITE QUORUM) if (!currentResponseMgr.waitForSynchronousResponses()) { ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.IN, "timeout (%dms) on waiting for synchronous responses from nodes=%s responsesSoFar=%s request=%s", System.currentTimeMillis() - beginTime, currentResponseMgr.getExpectedNodes(), currentResponseMgr.getRespondingNodes(), iRequest); } if (currentResponseMgr.isWaitForLocalNode() && !currentResponseMgr.isReceivedCurrentNode()) ODistributedServerLog.warn(this, getLocalNodeName(), manager.getLocalNodeName(), DIRECTION.IN, "no response received from local node about request %s", iRequest); // QUORUM REACHED return currentResponseMgr.getResponse(iRequest.getTask().getResultStrategy()); } public OHazelcastDistributedDatabase configureDatabase(final ODatabaseDocumentTx iDatabase, final boolean iRestoreMessages, final boolean iUnqueuePendingMessages) { // CREATE A QUEUE PER DATABASE final String queueName = OHazelcastDistributedMessageService.getRequestQueueName(manager.getLocalNodeName(), databaseName); final IQueue<ODistributedRequest> requestQueue = msgService.getQueue(queueName); if (ODistributedServerLog.isDebugEnabled()) ODistributedServerLog.debug(this, getLocalNodeName(), null, DIRECTION.NONE, "listening for incoming requests on queue: %s", queueName); // UNDO PREVIOUS MESSAGE IF ANY final IMap<Object, Object> undoMap = restoreMessagesBeforeFailure(iRestoreMessages); msgService.checkForPendingMessages(requestQueue, queueName, iUnqueuePendingMessages); // CREATE THREAD LISTENER AGAINST orientdb.node.<node>.<db>.request, ONE PER NODE, THEN DISPATCH THE MESSAGE INTERNALLY USING // THE THREAD ID new Thread(new Runnable() { @Override public void run() { while (!Thread.interrupted()) { String senderNode = null; ODistributedRequest message = null; try { message = readRequest(requestQueue); // SAVE THE MESSAGE IN THE UNDO MAP IN CASE OF FAILURE undoMap.put(databaseName, message); if (message != null) { senderNode = message.getSenderNodeName(); onMessage(message); } // OK: REMOVE THE UNDO BUFFER undoMap.remove(databaseName); } catch (InterruptedException e) { // EXIT CURRENT THREAD Thread.interrupted(); break; } catch (Throwable e) { ODistributedServerLog.error(this, getLocalNodeName(), senderNode, DIRECTION.IN, "error on reading distributed request: %s", e, message != null ? message.getTask() : "-"); } } } }).start(); return this; } public void setOnline() { if (database == null) { // OPEN IT final OServerUserConfiguration replicatorUser = manager.getServerInstance().getUser( ODistributedAbstractPlugin.REPLICATOR_USER); database = (ODatabaseDocumentTx) manager.getServerInstance().openDatabase("document", databaseName, replicatorUser.name, replicatorUser.password); } status.set(true); // WAKE UP ANY WAITERS synchronized (waitForOnline) { waitForOnline.notifyAll(); } } protected void waitForOnline() { synchronized (waitForOnline) { try { waitForOnline.wait(); } catch (InterruptedException e) { Thread.interrupted(); } } } public OHazelcastDistributedDatabase setWaitForTaskType(Class<? extends OAbstractRemoteTask> iTaskType) { waitForTaskType = iTaskType; return this; } protected ODistributedRequest readRequest(final IQueue<ODistributedRequest> requestQueue) throws InterruptedException { ODistributedRequest req = requestQueue.take(); while (waitForTaskType != null) { if (req != null) { if (req.getTask().getClass().equals(waitForTaskType)) { // ARRIVED, RESET IT waitForTaskType = null; return req; } else { // SKIP IT ODistributedServerLog.debug(this, manager.getLocalNodeName(), req.getSenderNodeName(), DIRECTION.OUT, "skip request because the node is not online yet, request=%s sourceNode=%s", req, req.getSenderNodeName()); // READ THE NEXT ONE req = requestQueue.take(); } } } while (!status.get() && req.getTask().isRequireNodeOnline()) { // WAIT UNTIL THE NODE IS ONLINE synchronized (waitForOnline) { waitForOnline.wait(5000); } } return req; } /** * Execute the remote call on the local node and send back the result */ protected void onMessage(final ODistributedRequest iRequest) { OScenarioThreadLocal.INSTANCE.set(RUN_MODE.RUNNING_DISTRIBUTED); try { final OAbstractRemoteTask task = iRequest.getTask(); if (ODistributedServerLog.isDebugEnabled()) ODistributedServerLog.debug(this, manager.getLocalNodeName(), iRequest.getSenderNodeName(), DIRECTION.IN, "request %s", task); // EXECUTE IT LOCALLY final Serializable responsePayload; try { ODatabaseRecordThreadLocal.INSTANCE.set(database); task.setNodeSource(iRequest.getSenderNodeName()); responsePayload = manager.executeOnLocalNode(iRequest, database); } finally { if (database != null) database.getLevel1Cache().clear(); } if (ODistributedServerLog.isDebugEnabled()) ODistributedServerLog.debug(this, manager.getLocalNodeName(), iRequest.getSenderNodeName(), DIRECTION.OUT, "sending back response %s to request %s", responsePayload, task); final OHazelcastDistributedResponse response = new OHazelcastDistributedResponse(iRequest.getId(), manager.getLocalNodeName(), iRequest.getSenderNodeName(), responsePayload); try { // GET THE SENDER'S RESPONSE QUEUE final IQueue<ODistributedResponse> queue = msgService.getQueue(OHazelcastDistributedMessageService .getResponseQueueName(iRequest.getSenderNodeName())); if (!queue.offer(response, OGlobalConfiguration.DISTRIBUTED_QUEUE_TIMEOUT.getValueAsLong(), TimeUnit.MILLISECONDS)) throw new ODistributedException("Timeout on dispatching response to the thread queue " + iRequest.getSenderNodeName()); } catch (Exception e) { throw new ODistributedException("Cannot dispatch response to the thread queue " + iRequest.getSenderNodeName(), e); } } finally { OScenarioThreadLocal.INSTANCE.set(RUN_MODE.DEFAULT); } } @SuppressWarnings("unchecked") protected IQueue<ODistributedRequest>[] getRequestQueues(final String iDatabaseName, final Set<String> nodes) { final IQueue<ODistributedRequest>[] queues = new IQueue[nodes.size()]; int i = 0; for (String node : nodes) queues[i++] = msgService.getQueue(OHazelcastDistributedMessageService.getRequestQueueName(node, iDatabaseName)); return queues; } public void shutdown() { try { database.close(); } catch (Exception e) { } } /** * Composes the undo queue name based on node name. */ protected String getUndoMapName(final String iDatabaseName) { final StringBuilder buffer = new StringBuilder(); buffer.append(NODE_QUEUE_PREFIX); buffer.append(manager.getLocalNodeName()); if (iDatabaseName != null) { buffer.append('.'); buffer.append(iDatabaseName); } buffer.append(NODE_QUEUE_UNDO_POSTFIX); return buffer.toString(); } protected String getLocalNodeName() { return manager.getLocalNodeName(); } protected IMap<Object, Object> restoreMessagesBeforeFailure(final boolean iRestoreMessages) { final IMap<Object, Object> undoMap = manager.getHazelcastInstance().getMap(getUndoMapName(databaseName)); final ODistributedRequest undoRequest = (ODistributedRequest) undoMap.remove(databaseName); if (undoRequest != null && iRestoreMessages) { ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE, "restore last replication message before the crash for database %s: %s", databaseName, undoRequest); try { onMessage(undoRequest); } catch (Throwable t) { ODistributedServerLog.error(this, getLocalNodeName(), null, DIRECTION.NONE, "error on executing restored message for database %s", t, databaseName); } } return undoMap; } public ODatabaseDocumentTx getDatabase() { return database; } protected void checkLocalNodeInConfiguration() { final String localNode = manager.getLocalNodeName(); // GET DATABASE CFG final ODistributedConfiguration cfg = manager.getDatabaseConfiguration(databaseName); for (String clusterName : cfg.getClusterNames()) { final List<List<String>> partitions = cfg.getPartitions(clusterName); if (partitions != null) for (List<String> partition : partitions) { for (String node : partition) if (node.equals(localNode)) // FOUND: DO NOTHING return; } } // NOT FOUND: ADD THE NODE IN CONFIGURATION. LOOK FOR $newNode TAG boolean dirty = false; for (String clusterName : cfg.getClusterNames()) { final List<List<String>> partitions = cfg.getPartitions(clusterName); if (partitions != null) for (int p = 0; p < partitions.size(); ++p) { List<String> partition = partitions.get(p); for (String node : partition) if (node.equalsIgnoreCase(ODistributedConfiguration.NEW_NODE_TAG)) { ODistributedServerLog.info(this, manager.getLocalNodeName(), null, DIRECTION.NONE, "adding node '%s' in partition: %s.%s.%d", localNode, databaseName, clusterName, p); partition.add(localNode); dirty = true; break; } } } if (dirty) { final ODocument doc = cfg.serialize(); manager.updateCachedDatabaseConfiguration(databaseName, doc); manager.getConfigurationMap().put(OHazelcastPlugin.CONFIG_DATABASE_PREFIX + databaseName, doc); } } protected void removeNodeInConfiguration(final String iNode, final boolean iForce) { // GET DATABASE CFG final ODistributedConfiguration cfg = manager.getDatabaseConfiguration(databaseName); if (!iForce && cfg.isHotAlignment()) // DO NOTHING return; boolean dirty = false; for (String clusterName : cfg.getClusterNames()) { final List<List<String>> partitions = cfg.getPartitions(clusterName); if (partitions != null) { for (int p = 0; p < partitions.size(); ++p) { final List<String> partition = partitions.get(p); for (int n = 0; n < partition.size(); ++n) { final String node = partition.get(n); if (node.equals(iNode)) { // FOUND: REMOVE IT ODistributedServerLog.info(this, manager.getLocalNodeName(), null, DIRECTION.NONE, "removing node '%s' in partition: %s.%s.%d", iNode, databaseName, clusterName, p); partition.remove(n); msgService.removeQueue(OHazelcastDistributedMessageService.getRequestQueueName(iNode, databaseName)); dirty = true; break; } } } } } if (dirty) { final ODocument doc = cfg.serialize(); manager.updateCachedDatabaseConfiguration(databaseName, doc); manager.getConfigurationMap().put(OHazelcastPlugin.CONFIG_DATABASE_PREFIX + databaseName, doc); } } }
1no label
distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_OHazelcastDistributedDatabase.java
483
int indexesSizeTwo = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<Integer>() { public Integer call() { return indexManagerTwo.getIndexes().size(); } });
0true
core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseCompare.java
408
public class IdOverrideTableGenerator extends TableGenerator { public static final String ENTITY_NAME_PARAM = "entity_name"; private static final Map<String, Field> FIELD_CACHE = MapUtils.synchronizedMap(new HashMap<String, Field>()); private String entityName; private Field getIdField(Class<?> clazz) { Field response = null; Field[] fields = clazz.getDeclaredFields(); for (Field field : fields) { if (field.getAnnotation(Id.class) != null) { response = field; break; } } if (response == null && clazz.getSuperclass() != null) { response = getIdField(clazz.getSuperclass()); } return response; } @Override public Serializable generate(SessionImplementor session, Object obj) { /* This works around an issue in Hibernate where if the entityPersister is retrieved from the session and used to get the Id, the entity configuration can be recycled, which is messing with the load persister and current persister on some collections. This may be a jrebel thing, but this workaround covers all environments */ String objName = obj.getClass().getName(); if (!FIELD_CACHE.containsKey(objName)) { Field field = getIdField(obj.getClass()); if (field == null) { throw new IllegalArgumentException("Cannot specify IdOverrideTableGenerator for an entity (" + objName + ") that does not have an Id field declared using the @Id annotation."); } field.setAccessible(true); FIELD_CACHE.put(objName, field); } Field field = FIELD_CACHE.get(objName); final Serializable id; try { id = (Serializable) field.get(obj); } catch (IllegalAccessException e) { throw new RuntimeException(e); } if ( id != null ) { return id; } return super.generate(session, obj); } @Override public void configure(Type type, Properties params, Dialect dialect) throws MappingException { if (params.get("table_name") == null) { params.put("table_name", "SEQUENCE_GENERATOR"); } if (params.get("segment_column_name") == null) { params.put("segment_column_name", "ID_NAME"); } if (params.get("value_column_name") == null) { params.put("value_column_name", "ID_VAL"); } if (params.get("increment_size") == null) { params.put("increment_size", 50); } super.configure(type, params, dialect); entityName = (String) params.get(ENTITY_NAME_PARAM); } public String getEntityName() { return entityName; } public void setEntityName(String entityName) { this.entityName = entityName; } }
0true
common_src_main_java_org_broadleafcommerce_common_persistence_IdOverrideTableGenerator.java
162
public final class OperationFactoryWrapper implements OperationFactory { private OperationFactory opFactory; private String uuid; public OperationFactoryWrapper() { } public OperationFactoryWrapper(OperationFactory opFactory, String uuid) { this.opFactory = opFactory; this.uuid = uuid; } @Override public Operation createOperation() { Operation op = opFactory.createOperation(); op.setCallerUuid(uuid); return op; } @Override public void writeData(ObjectDataOutput out) throws IOException { out.writeUTF(uuid); out.writeObject(opFactory); } @Override public void readData(ObjectDataInput in) throws IOException { uuid = in.readUTF(); opFactory = in.readObject(); } }
0true
hazelcast_src_main_java_com_hazelcast_client_OperationFactoryWrapper.java
1,272
public class FaunusSerializer { // This is volatile to support double-checked locking private static volatile Serializer standardSerializer; private final FaunusSchemaManager types; private final boolean trackState; private final boolean trackPaths; private final Configuration configuration; private static final Logger log = LoggerFactory.getLogger(FaunusSerializer.class); public FaunusSerializer(final Configuration configuration) { Preconditions.checkNotNull(configuration); this.types = FaunusSchemaManager.getTypeManager(configuration); this.configuration = configuration; this.trackState = configuration.get(TitanHadoopConfiguration.PIPELINE_TRACK_STATE); this.trackPaths = configuration.get(TitanHadoopConfiguration.PIPELINE_TRACK_PATHS); } public void writeVertex(final FaunusVertex vertex, final DataOutput out) throws IOException { //Need to write the id up front for the comparator WritableUtils.writeVLong(out, vertex.id); Schema schema = new Schema(); vertex.updateSchema(schema); schema.writeSchema(out); writePathElement(vertex, schema, out); writeEdges(vertex, vertex.inAdjacency, out, Direction.IN, schema); FaunusVertexLabel vl = (FaunusVertexLabel)vertex.getVertexLabel(); out.writeUTF(vl.isDefault()?"":vl.getName()); } public void readVertex(final FaunusVertex vertex, final DataInput in) throws IOException { WritableUtils.readVLong(in); Schema schema = readSchema(in); readPathElement(vertex, schema, in); vertex.inAdjacency = readEdges(vertex, in, Direction.IN, schema); String labelName = in.readUTF(); vertex.setVertexLabel(StringUtils.isBlank(labelName)?FaunusVertexLabel.DEFAULT_VERTEXLABEL: types.getVertexLabel(labelName)); } public void writeEdge(final StandardFaunusEdge edge, final DataOutput out) throws IOException { writePathElement(edge, out); WritableUtils.writeVLong(out, edge.inVertex); WritableUtils.writeVLong(out, edge.outVertex); writeFaunusType(edge.getType(), out); } public void readEdge(final StandardFaunusEdge edge, final DataInput in) throws IOException { readPathElement(edge, in); edge.inVertex = WritableUtils.readVLong(in); edge.outVertex = WritableUtils.readVLong(in); edge.setLabel((FaunusEdgeLabel)readFaunusType(in)); } public void writeProperty(final StandardFaunusProperty property, final DataOutput out) throws IOException { writePathElement(property, out); WritableUtils.writeVLong(out, property.vertexid); serializeObject(out,property.getValue()); writeFaunusType(property.getType(), out); } public void readProperty(final StandardFaunusProperty property, final DataInput in) throws IOException { readPathElement(property, in); property.vertexid = WritableUtils.readVLong(in); property.value = deserializeObject(in); property.setKey((FaunusPropertyKey)readFaunusType(in)); } private void readPathElement(final FaunusPathElement element, final DataInput in) throws IOException { readPathElement(element, null, in); } private void writePathElement(final FaunusPathElement element, final DataOutput out) throws IOException { writePathElement(element, null, out); } private void readPathElement(final FaunusPathElement element, Schema schema, final DataInput in) throws IOException { readElement(element, schema, in); if (trackPaths) { List<List<MicroElement>> paths = readElementPaths(in); element.tracker = new FaunusPathElement.Tracker(paths, (element instanceof FaunusVertex) ? new FaunusVertex.MicroVertex(element.id) : new StandardFaunusEdge.MicroEdge(element.id)); log.trace("readPathElement element={} paths={}", element, paths); } else { element.pathCounter = WritableUtils.readVLong(in); element.tracker = FaunusPathElement.DEFAULT_TRACK; } } private void writePathElement(final FaunusPathElement element, final Schema schema, final DataOutput out) throws IOException { writeElement(element, schema, out); if (trackPaths) writeElementPaths(element.tracker.paths, out); else WritableUtils.writeVLong(out, element.pathCounter); } private void readElement(final FaunusElement element, Schema schema, final DataInput in) throws IOException { element.id = WritableUtils.readVLong(in); if (trackState) element.setLifeCycle(in.readByte()); element.outAdjacency = readEdges(element,in,Direction.OUT,schema); } private void writeElement(final FaunusElement element, final Schema schema, final DataOutput out) throws IOException { Preconditions.checkArgument(trackState || !element.isRemoved()); WritableUtils.writeVLong(out, element.id); if (trackState) out.writeByte(element.getLifeCycle()); writeEdges(element, element.outAdjacency, out, Direction.OUT, schema); } private void serializeObject(final DataOutput out, Object value) throws IOException { final com.thinkaurelius.titan.graphdb.database.serialize.DataOutput o = getStandardSerializer().getDataOutput(40); o.writeClassAndObject(value); final StaticBuffer buffer = o.getStaticBuffer(); WritableUtils.writeVInt(out, buffer.length()); out.write(buffer.as(StaticBuffer.ARRAY_FACTORY)); } private Object deserializeObject(final DataInput in) throws IOException { int byteLength = WritableUtils.readVInt(in); byte[] bytes = new byte[byteLength]; in.readFully(bytes); final ReadBuffer buffer = new ReadArrayBuffer(bytes); return getStandardSerializer().readClassAndObject(buffer); } /** * Return the StandardSerializer singleton shared between all instances of FaunusSerializer. * * If it has not yet been initialized, then the singleton is created using the maximum * Kryo buffer size configured in the calling FaunusSerializer. * * @return */ private Serializer getStandardSerializer() { if (null == standardSerializer) { // N.B. standardSerializer is volatile synchronized (FaunusSerializer.class) { if (null == standardSerializer) { int maxOutputBufSize = configuration.get(KRYO_MAX_OUTPUT_SIZE); standardSerializer = new StandardSerializer(true, maxOutputBufSize); } } } // TODO consider checking whether actual output buffer size matches config, create new StandardSerializer if mismatched? Might not be worth it return standardSerializer; } private <T extends FaunusRelation> Iterable<T> filterDeletedRelations(Iterable<T> elements) { if (trackState) return elements; else return Iterables.filter(elements, new Predicate<T>() { @Override public boolean apply(@Nullable T element) { return !element.isRemoved(); } }); } private SetMultimap<FaunusRelationType, FaunusRelation> readEdges(final FaunusElement element, final DataInput in, final Direction direction, final Schema schema) throws IOException { final SetMultimap<FaunusRelationType, FaunusRelation> adjacency = HashMultimap.create(); int numTypes = WritableUtils.readVInt(in); for (int i = 0; i < numTypes; i++) { FaunusRelationType type; if (schema == null) type = readFaunusType(in); else type = schema.getType(WritableUtils.readVLong(in)); final int size = WritableUtils.readVInt(in); for (int j = 0; j < size; j++) { FaunusRelation relation; if (element instanceof FaunusVertex) { if (type.isEdgeLabel()) { final StandardFaunusEdge edge = new StandardFaunusEdge(configuration); edge.setLabel((FaunusEdgeLabel)type); readPathElement(edge, schema, in); long otherId = WritableUtils.readVLong(in); switch (direction) { case IN: edge.inVertex = element.getLongId(); edge.outVertex = otherId; break; case OUT: edge.outVertex = element.getLongId(); edge.inVertex = otherId; break; default: throw ExceptionFactory.bothIsNotSupported(); } relation = edge; log.trace("readEdges edge={} paths={}", edge, edge.tracker.paths); } else { assert type.isPropertyKey() && direction==Direction.OUT; final StandardFaunusProperty property = new StandardFaunusProperty(configuration); property.setKey((FaunusPropertyKey) type); readPathElement(property, schema, in); property.value = deserializeObject(in); relation = property; } } else { byte lifecycle = trackState?in.readByte():-1; if (type.isEdgeLabel()) { relation = new SimpleFaunusEdge((FaunusEdgeLabel)type,new FaunusVertex(configuration,WritableUtils.readVLong(in))); } else { assert type.isPropertyKey() && direction==Direction.OUT; relation = new SimpleFaunusProperty((FaunusPropertyKey)type,deserializeObject(in)); } if (trackState) relation.setLifeCycle(lifecycle); } adjacency.put(type, relation); } } if (adjacency.isEmpty()) return FaunusElement.EMPTY_ADJACENCY; return adjacency; } private void writeEdges(final FaunusElement element, final SetMultimap<FaunusRelationType, FaunusRelation> edges, final DataOutput out, final Direction direction, final Schema schema) throws IOException { Map<FaunusRelationType, Integer> counts = Maps.newHashMap(); int typeCount = 0; for (FaunusRelationType type : edges.keySet()) { int count = IterablesUtil.size(filterDeletedRelations(edges.get(type))); counts.put(type, count); if (count > 0) typeCount++; } WritableUtils.writeVInt(out, typeCount); for (FaunusRelationType type : edges.keySet()) { if (counts.get(type) == 0) continue; if (schema == null) writeFaunusType(type, out); else WritableUtils.writeVLong(out, schema.getTypeId(type)); WritableUtils.writeVInt(out, counts.get(type)); Iterable<FaunusRelation> subset = filterDeletedRelations(edges.get(type)); for (final FaunusRelation rel : subset) { if (element instanceof FaunusVertex) { assert rel instanceof StandardFaunusRelation; writePathElement((StandardFaunusRelation)rel,schema,out); } else { assert rel instanceof SimpleFaunusRelation; if (trackState) out.writeByte(((SimpleFaunusRelation)rel).getLifeCycle()); } if (rel.isEdge()) { WritableUtils.writeVLong(out, ((FaunusEdge)rel).getVertexId(direction.opposite())); } else { serializeObject(out,((FaunusProperty)rel).getValue()); } } } } private void writeElementPaths(final List<List<MicroElement>> paths, final DataOutput out) throws IOException { if (null == paths) { WritableUtils.writeVInt(out, 0); } else { WritableUtils.writeVInt(out, paths.size()); for (final List<MicroElement> path : paths) { WritableUtils.writeVInt(out, path.size()); for (MicroElement element : path) { if (element instanceof FaunusVertex.MicroVertex) out.writeChar('v'); else out.writeChar('e'); WritableUtils.writeVLong(out, element.getId()); } } } } private List<List<MicroElement>> readElementPaths(final DataInput in) throws IOException { int pathsSize = WritableUtils.readVInt(in); if (pathsSize == 0) return new ArrayList<List<MicroElement>>(); else { final List<List<MicroElement>> paths = new ArrayList<List<MicroElement>>(pathsSize); for (int i = 0; i < pathsSize; i++) { int pathSize = WritableUtils.readVInt(in); final List<MicroElement> path = new ArrayList<MicroElement>(pathSize); for (int j = 0; j < pathSize; j++) { char type = in.readChar(); if (type == 'v') path.add(new FaunusVertex.MicroVertex(WritableUtils.readVLong(in))); else path.add(new StandardFaunusEdge.MicroEdge(WritableUtils.readVLong(in))); } paths.add(path); } return paths; } } private void writeFaunusType(final FaunusRelationType type, final DataOutput out) throws IOException { out.writeByte(type.isPropertyKey()?0:1); out.writeUTF(type.getName()); } private FaunusRelationType readFaunusType(final DataInput in) throws IOException { int type = in.readByte(); String typeName = in.readUTF(); assert type==0 || type==1; if (type==0) return types.getOrCreatePropertyKey(typeName); else return types.getOrCreateEdgeLabel(typeName); } class Schema { private final BiMap<FaunusRelationType, Long> localTypes; private long count = 1; private Schema() { this(8); } private Schema(int size) { this.localTypes = HashBiMap.create(size); } void add(String type) { this.add(types.getRelationType(type)); } void add(FaunusRelationType type) { if (!localTypes.containsKey(type)) localTypes.put(type, count++); } void addAll(Iterable<FaunusRelationType> types) { for (FaunusRelationType type : types) add(type); } long getTypeId(FaunusRelationType type) { Long id = localTypes.get(type); Preconditions.checkArgument(id != null, "Type is not part of the schema: " + type); return id; } FaunusRelationType getType(long id) { FaunusRelationType type = localTypes.inverse().get(id); Preconditions.checkArgument(type != null, "Type is not part of the schema: " + id); return type; } private void add(FaunusRelationType type, long index) { Preconditions.checkArgument(!localTypes.containsValue(index)); localTypes.put(type, index); count = index + 1; } private void writeSchema(final DataOutput out) throws IOException { WritableUtils.writeVInt(out, localTypes.size()); for (Map.Entry<FaunusRelationType, Long> entry : localTypes.entrySet()) { writeFaunusType(entry.getKey(), out); WritableUtils.writeVLong(out, entry.getValue()); } } } private Schema readSchema(final DataInput in) throws IOException { int size = WritableUtils.readVInt(in); Schema schema = new Schema(size); for (int i = 0; i < size; i++) { schema.add(readFaunusType(in), WritableUtils.readVLong(in)); } return schema; } static { WritableComparator.define(FaunusPathElement.class, new Comparator()); } public static class Comparator extends WritableComparator { public Comparator() { super(FaunusPathElement.class); } @Override public int compare(final byte[] element1, final int start1, final int length1, final byte[] element2, final int start2, final int length2) { try { return Long.valueOf(readVLong(element1, start1)).compareTo(readVLong(element2, start2)); } catch (IOException e) { return -1; } } @Override public int compare(final WritableComparable a, final WritableComparable b) { if (a instanceof FaunusElement && b instanceof FaunusElement) return ((Long) (((FaunusElement) a).getLongId())).compareTo(((FaunusElement) b).getLongId()); else return super.compare(a, b); } } //################################################ // Serialization for vanilla Blueprints //################################################ /** * All graph element identifiers must be of the long data type. Implementations of this * interface makes it possible to control the conversion of the identifier in the * VertexToHadoopBinary utility class. * * @author Stephen Mallette (http://stephen.genoprime.com) */ // public static interface ElementIdHandler { // long convertIdentifier(final Element element); // } // // public void writeVertex(final Vertex vertex, final ElementIdHandler elementIdHandler, final DataOutput out) throws IOException { // Schema schema = new Schema(); // //Convert properties and update schema // Multimap<HadoopType, FaunusProperty> properties = getProperties(vertex); // for (HadoopType type : properties.keySet()) schema.add(type); // for (Edge edge : vertex.getEdges(Direction.BOTH)) { // schema.add(edge.getLabel()); // for (String key : edge.getPropertyKeys()) schema.add(key); // } // // WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(vertex)); // schema.writeSchema(out); // WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(vertex)); // if (trackState) out.writeByte(ElementState.NEW.getByteValue()); // writeProperties(properties, schema, out); // out.writeBoolean(false); // WritableUtils.writeVLong(out, 0); // writeEdges(vertex, Direction.IN, elementIdHandler, schema, out); // writeEdges(vertex, Direction.OUT, elementIdHandler, schema, out); // // } // // private Multimap<HadoopType, FaunusProperty> getProperties(Element element) { // Multimap<HadoopType, FaunusProperty> properties = HashMultimap.create(); // for (String key : element.getPropertyKeys()) { // HadoopType type = types.get(key); // properties.put(type, new FaunusProperty(type, element.getProperty(key))); // } // return properties; // } // // private void writeEdges(final Vertex vertex, final Direction direction, final ElementIdHandler elementIdHandler, // final Schema schema, final DataOutput out) throws IOException { // final Multiset<String> labelCount = HashMultiset.create(); // for (final Edge edge : vertex.getEdges(direction)) { // labelCount.add(edge.getLabel()); // } // WritableUtils.writeVInt(out, labelCount.elementSet().size()); // for (String label : labelCount.elementSet()) { // HadoopType type = types.get(label); // WritableUtils.writeVLong(out, schema.getTypeId(type)); // WritableUtils.writeVInt(out, labelCount.count(label)); // for (final Edge edge : vertex.getEdges(direction, label)) { // WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(edge)); // if (trackState) out.writeByte(ElementState.NEW.getByteValue()); // writeProperties(getProperties(edge), schema, out); // out.writeBoolean(false); // WritableUtils.writeVLong(out, 0); // WritableUtils.writeVLong(out, elementIdHandler.convertIdentifier(edge.getVertex(direction.opposite()))); // } // } // } }
1no label
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_FaunusSerializer.java
1,665
@Service("blPersistencePackageFactory") public class PersistencePackageFactoryImpl implements PersistencePackageFactory { @Override public PersistencePackage create(PersistencePackageRequest request) { PersistencePerspective persistencePerspective = new PersistencePerspective(); persistencePerspective.setAdditionalForeignKeys(request.getAdditionalForeignKeys()); persistencePerspective.setAdditionalNonPersistentProperties(new String[] {}); if (request.getForeignKey() != null) { persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.FOREIGNKEY, request.getForeignKey()); } switch (request.getType()) { case STANDARD: persistencePerspective.setOperationTypes(getDefaultOperationTypes()); break; case ADORNED: if (request.getAdornedList() == null) { throw new IllegalArgumentException("ADORNED type requires the adornedList to be set"); } persistencePerspective.setOperationTypes(getOperationTypes(OperationType.ADORNEDTARGETLIST)); persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.ADORNEDTARGETLIST, request.getAdornedList()); break; case MAP: if (request.getMapStructure() == null) { throw new IllegalArgumentException("MAP type requires the mapStructure to be set"); } persistencePerspective.setOperationTypes(getOperationTypes(OperationType.MAP)); persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.MAPSTRUCTURE, request.getMapStructure()); break; } if (request.getOperationTypesOverride() != null) { persistencePerspective.setOperationTypes(request.getOperationTypesOverride()); } PersistencePackage pp = new PersistencePackage(); pp.setCeilingEntityFullyQualifiedClassname(request.getCeilingEntityClassname()); pp.setFetchTypeFullyQualifiedClassname(null); pp.setPersistencePerspective(persistencePerspective); pp.setCustomCriteria(request.getCustomCriteria()); pp.setCsrfToken(null); pp.setValidateUnsubmittedProperties(request.isValidateUnsubmittedProperties()); if (request.getEntity() != null) { pp.setEntity(request.getEntity()); } for (Map.Entry<String, PersistencePackageRequest> subRequest : request.getSubRequests().entrySet()) { pp.getSubPackages().put(subRequest.getKey(), create(subRequest.getValue())); } return pp; } protected OperationTypes getDefaultOperationTypes() { OperationTypes operationTypes = new OperationTypes(); operationTypes.setFetchType(OperationType.BASIC); operationTypes.setRemoveType(OperationType.BASIC); operationTypes.setAddType(OperationType.BASIC); operationTypes.setUpdateType(OperationType.BASIC); operationTypes.setInspectType(OperationType.BASIC); return operationTypes; } protected OperationTypes getOperationTypes(OperationType nonInspectOperationType) { OperationTypes operationTypes = new OperationTypes(); operationTypes.setFetchType(nonInspectOperationType); operationTypes.setRemoveType(nonInspectOperationType); operationTypes.setAddType(nonInspectOperationType); operationTypes.setUpdateType(nonInspectOperationType); operationTypes.setInspectType(OperationType.BASIC); return operationTypes; } }
1no label
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_factory_PersistencePackageFactoryImpl.java
452
public static class AdminPresentationAdornedTargetCollection { public static final String FRIENDLYNAME = "friendlyName"; public static final String SECURITYLEVEL = "securityLevel"; public static final String EXCLUDED = "excluded"; public static final String SHOWIFPROPERTY = "showIfProperty"; public static final String READONLY = "readOnly"; public static final String USESERVERSIDEINSPECTIONCACHE = "useServerSideInspectionCache"; public static final String PARENTOBJECTPROPERTY = "parentObjectProperty"; public static final String PARENTOBJECTIDPROPERTY = "parentObjectIdProperty"; public static final String TARGETOBJECTPROPERTY = "targetObjectProperty"; public static final String MAINTAINEDADORNEDTARGETFIELDS = "maintainedAdornedTargetFields"; public static final String GRIDVISIBLEFIELDS = "gridVisibleFields"; public static final String TARGETOBJECTIDPROPERTY = "targetObjectIdProperty"; public static final String JOINENTITYCLASS = "joinEntityClass"; public static final String SORTPROPERTY = "sortProperty"; public static final String SORTASCENDING = "sortAscending"; public static final String IGNOREADORNEDPROPERTIES = "ignoreAdornedProperties"; public static final String ORDER = "order"; public static final String TAB = "tab"; public static final String TABORDER = "tabOrder"; public static final String CUSTOMCRITERIA = "customCriteria"; public static final String CURRENCYCODEFIELD = "currencyCodeField"; public static final String OPERATIONTYPES = "operationTypes"; }
0true
common_src_main_java_org_broadleafcommerce_common_presentation_override_PropertyType.java
144
public final class HazelcastClient implements HazelcastInstance { static { OutOfMemoryErrorDispatcher.setClientHandler(new ClientOutOfMemoryHandler()); } private static final AtomicInteger CLIENT_ID = new AtomicInteger(); private static final ConcurrentMap<Integer, HazelcastClientProxy> CLIENTS = new ConcurrentHashMap<Integer, HazelcastClientProxy>(5); private final int id = CLIENT_ID.getAndIncrement(); private final String instanceName; private final ClientConfig config; private final ThreadGroup threadGroup; private final LifecycleServiceImpl lifecycleService; private final SerializationServiceImpl serializationService; private final ClientConnectionManager connectionManager; private final ClientClusterServiceImpl clusterService; private final ClientPartitionServiceImpl partitionService; private final ClientInvocationServiceImpl invocationService; private final ClientExecutionServiceImpl executionService; private final ClientTransactionManager transactionManager; private final ProxyManager proxyManager; private final ConcurrentMap<String, Object> userContext; private final LoadBalancer loadBalancer; public final ClientProperties clientProperties; private HazelcastClient(ClientConfig config) { this.config = config; final GroupConfig groupConfig = config.getGroupConfig(); instanceName = "hz.client_" + id + (groupConfig != null ? "_" + groupConfig.getName() : ""); threadGroup = new ThreadGroup(instanceName); lifecycleService = new LifecycleServiceImpl(this); clientProperties = new ClientProperties(config); SerializationService ss; try { String partitioningStrategyClassName = System.getProperty(GroupProperties.PROP_PARTITIONING_STRATEGY_CLASS); final PartitioningStrategy partitioningStrategy; if (partitioningStrategyClassName != null && partitioningStrategyClassName.length() > 0) { partitioningStrategy = ClassLoaderUtil.newInstance(config.getClassLoader(), partitioningStrategyClassName); } else { partitioningStrategy = new DefaultPartitioningStrategy(); } ss = new SerializationServiceBuilder() .setManagedContext(new HazelcastClientManagedContext(this, config.getManagedContext())) .setClassLoader(config.getClassLoader()) .setConfig(config.getSerializationConfig()) .setPartitioningStrategy(partitioningStrategy) .build(); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } serializationService = (SerializationServiceImpl) ss; proxyManager = new ProxyManager(this); executionService = new ClientExecutionServiceImpl(instanceName, threadGroup, Thread.currentThread().getContextClassLoader(), config.getExecutorPoolSize()); transactionManager = new ClientTransactionManager(this); LoadBalancer lb = config.getLoadBalancer(); if (lb == null) { lb = new RoundRobinLB(); } loadBalancer = lb; connectionManager = new ClientConnectionManagerImpl(this, loadBalancer); clusterService = new ClientClusterServiceImpl(this); invocationService = new ClientInvocationServiceImpl(this); userContext = new ConcurrentHashMap<String, Object>(); proxyManager.init(config); partitionService = new ClientPartitionServiceImpl(this); } public static HazelcastInstance newHazelcastClient() { return newHazelcastClient(new XmlClientConfigBuilder().build()); } public static HazelcastInstance newHazelcastClient(ClientConfig config) { if (config == null) { config = new XmlClientConfigBuilder().build(); } final ClassLoader tccl = Thread.currentThread().getContextClassLoader(); HazelcastClientProxy proxy; try { Thread.currentThread().setContextClassLoader(HazelcastClient.class.getClassLoader()); final HazelcastClient client = new HazelcastClient(config); client.start(); OutOfMemoryErrorDispatcher.register(client); proxy = new HazelcastClientProxy(client); CLIENTS.put(client.id, proxy); } finally { Thread.currentThread().setContextClassLoader(tccl); } return proxy; } public static Collection<HazelcastInstance> getAllHazelcastClients() { return Collections.<HazelcastInstance>unmodifiableCollection(CLIENTS.values()); } public static void shutdownAll() { for (HazelcastClientProxy proxy : CLIENTS.values()) { try { proxy.client.getLifecycleService().shutdown(); } catch (Exception ignored) { } proxy.client = null; } CLIENTS.clear(); } private void start() { lifecycleService.setStarted(); connectionManager.start(); try { clusterService.start(); } catch (IllegalStateException e) { //there was an authentication failure (todo: perhaps use an AuthenticationException // ??) lifecycleService.shutdown(); throw e; } loadBalancer.init(getCluster(), config); partitionService.start(); } public Config getConfig() { throw new UnsupportedOperationException("Client cannot access cluster config!"); } @Override public String getName() { return instanceName; } @Override public <E> IQueue<E> getQueue(String name) { return getDistributedObject(QueueService.SERVICE_NAME, name); } @Override public <E> ITopic<E> getTopic(String name) { return getDistributedObject(TopicService.SERVICE_NAME, name); } @Override public <E> ISet<E> getSet(String name) { return getDistributedObject(SetService.SERVICE_NAME, name); } @Override public <E> IList<E> getList(String name) { return getDistributedObject(ListService.SERVICE_NAME, name); } @Override public <K, V> IMap<K, V> getMap(String name) { return getDistributedObject(MapService.SERVICE_NAME, name); } @Override public <K, V> MultiMap<K, V> getMultiMap(String name) { return getDistributedObject(MultiMapService.SERVICE_NAME, name); } @Override public <K, V> ReplicatedMap<K, V> getReplicatedMap(String name) { return getDistributedObject(ReplicatedMapService.SERVICE_NAME, name); } @Override public JobTracker getJobTracker(String name) { return getDistributedObject(MapReduceService.SERVICE_NAME, name); } @Override public ILock getLock(String key) { return getDistributedObject(LockServiceImpl.SERVICE_NAME, key); } @Override @Deprecated public ILock getLock(Object key) { //this method will be deleted in the near future. String name = LockProxy.convertToStringKey(key, serializationService); return getDistributedObject(LockServiceImpl.SERVICE_NAME, name); } @Override public Cluster getCluster() { return new ClientClusterProxy(clusterService); } @Override public Client getLocalEndpoint() { return clusterService.getLocalClient(); } @Override public IExecutorService getExecutorService(String name) { return getDistributedObject(DistributedExecutorService.SERVICE_NAME, name); } public <T> T executeTransaction(TransactionalTask<T> task) throws TransactionException { return transactionManager.executeTransaction(task); } @Override public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task) throws TransactionException { return transactionManager.executeTransaction(options, task); } @Override public TransactionContext newTransactionContext() { return transactionManager.newTransactionContext(); } @Override public TransactionContext newTransactionContext(TransactionOptions options) { return transactionManager.newTransactionContext(options); } @Override public IdGenerator getIdGenerator(String name) { return getDistributedObject(IdGeneratorService.SERVICE_NAME, name); } @Override public IAtomicLong getAtomicLong(String name) { return getDistributedObject(AtomicLongService.SERVICE_NAME, name); } @Override public <E> IAtomicReference<E> getAtomicReference(String name) { return getDistributedObject(AtomicReferenceService.SERVICE_NAME, name); } @Override public ICountDownLatch getCountDownLatch(String name) { return getDistributedObject(CountDownLatchService.SERVICE_NAME, name); } @Override public ISemaphore getSemaphore(String name) { return getDistributedObject(SemaphoreService.SERVICE_NAME, name); } @Override public Collection<DistributedObject> getDistributedObjects() { try { GetDistributedObjectsRequest request = new GetDistributedObjectsRequest(); final Future<SerializableCollection> future = invocationService.invokeOnRandomTarget(request); final SerializableCollection serializableCollection = serializationService.toObject(future.get()); for (Data data : serializableCollection) { final DistributedObjectInfo o = serializationService.toObject(data); getDistributedObject(o.getServiceName(), o.getName()); } return (Collection<DistributedObject>) proxyManager.getDistributedObjects(); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } } @Override public String addDistributedObjectListener(DistributedObjectListener distributedObjectListener) { return proxyManager.addDistributedObjectListener(distributedObjectListener); } @Override public boolean removeDistributedObjectListener(String registrationId) { return proxyManager.removeDistributedObjectListener(registrationId); } @Override public PartitionService getPartitionService() { return new PartitionServiceProxy(partitionService); } @Override public ClientService getClientService() { throw new UnsupportedOperationException(); } @Override public LoggingService getLoggingService() { throw new UnsupportedOperationException(); } @Override public LifecycleService getLifecycleService() { return lifecycleService; } @Override @Deprecated public <T extends DistributedObject> T getDistributedObject(String serviceName, Object id) { if (id instanceof String) { return (T) proxyManager.getProxy(serviceName, (String) id); } throw new IllegalArgumentException("'id' must be type of String!"); } @Override public <T extends DistributedObject> T getDistributedObject(String serviceName, String name) { return (T) proxyManager.getProxy(serviceName, name); } @Override public ConcurrentMap<String, Object> getUserContext() { return userContext; } public ClientConfig getClientConfig() { return config; } public SerializationService getSerializationService() { return serializationService; } public ClientConnectionManager getConnectionManager() { return connectionManager; } public ClientClusterService getClientClusterService() { return clusterService; } public ClientExecutionService getClientExecutionService() { return executionService; } public ClientPartitionService getClientPartitionService() { return partitionService; } public ClientInvocationService getInvocationService() { return invocationService; } public ThreadGroup getThreadGroup() { return threadGroup; } @Override public void shutdown() { getLifecycleService().shutdown(); } void doShutdown() { CLIENTS.remove(id); executionService.shutdown(); partitionService.stop(); clusterService.stop(); transactionManager.shutdown(); connectionManager.shutdown(); proxyManager.destroy(); serializationService.destroy(); } }
1no label
hazelcast-client_src_main_java_com_hazelcast_client_HazelcastClient.java
55
new Visitor() { @Override public void visit(Tree.StaticMemberOrTypeExpression that) { Tree.TypeArguments tal = that.getTypeArguments(); Integer startIndex = tal==null ? null : that.getTypeArguments().getStartIndex(); if (startIndex!=null && startIndex2!=null && startIndex.intValue()==startIndex2.intValue()) { ProducedReference pr = that.getTarget(); Declaration d = that.getDeclaration(); if (d instanceof Functional && pr!=null) { try { String pref = document.get(that.getStartIndex(), that.getStopIndex()-that.getStartIndex()+1); addInvocationProposals(offset, pref, cpc, result, d, pr, scope, null, typeArgText, false); } catch (BadLocationException e) { e.printStackTrace(); } } } super.visit(that); } public void visit(Tree.SimpleType that) { Tree.TypeArgumentList tal = that.getTypeArgumentList(); Integer startIndex = tal==null ? null : tal.getStartIndex(); if (startIndex!=null && startIndex2!=null && startIndex.intValue()==startIndex2.intValue()) { Declaration d = that.getDeclarationModel(); if (d instanceof Functional) { try { String pref = document.get(that.getStartIndex(), that.getStopIndex()-that.getStartIndex()+1); addInvocationProposals(offset, pref, cpc, result, d, that.getTypeModel(), scope, null, typeArgText, false); } catch (BadLocationException e) { e.printStackTrace(); } } } super.visit(that); } }.visit(cpc.getRootNode());
0true
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_TypeArgumentListCompletions.java
1,146
public class ChildSearchShortCircuitBenchmark { public static void main(String[] args) throws Exception { Settings settings = settingsBuilder() .put("index.refresh_interval", "-1") .put("gateway.type", "local") .put(SETTING_NUMBER_OF_SHARDS, 1) .put(SETTING_NUMBER_OF_REPLICAS, 0) .build(); String clusterName = ChildSearchShortCircuitBenchmark.class.getSimpleName(); Node node1 = nodeBuilder().clusterName(clusterName) .settings(settingsBuilder().put(settings).put("name", "node1")) .node(); Client client = node1.client(); long PARENT_COUNT = SizeValue.parseSizeValue("10M").singles(); int BATCH = 100; int QUERY_WARMUP = 5; int QUERY_COUNT = 25; String indexName = "test"; client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); try { client.admin().indices().create(createIndexRequest(indexName)).actionGet(); client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child") .startObject("_parent").field("type", "parent").endObject() .endObject().endObject()).execute().actionGet(); Thread.sleep(5000); StopWatch stopWatch = new StopWatch().start(); System.out.println("--> Indexing [" + PARENT_COUNT + "] parent document and some child documents"); long ITERS = PARENT_COUNT / BATCH; int i = 1; int counter = 0; for (; i <= ITERS; i++) { BulkRequestBuilder request = client.prepareBulk(); for (int j = 0; j < BATCH; j++) { counter++; request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter)) .source(parentSource(counter))); } BulkResponse response = request.execute().actionGet(); if (response.hasFailures()) { System.err.println("--> failures..."); } if (((i * BATCH) % 10000) == 0) { System.out.println("--> Indexed " + (i * BATCH) + "parent docs; took " + stopWatch.stop().lastTaskTime()); stopWatch.start(); } } int id = 0; for (i = 1; i <= PARENT_COUNT; i *= 2) { int parentId = 1; for (int j = 0; j < i; j++) { client.prepareIndex(indexName, "child", Integer.toString(id++)) .setParent(Integer.toString(parentId++)) .setSource(childSource(i)) .execute().actionGet(); } } System.out.println("--> Indexing took " + stopWatch.totalTime()); } catch (Exception e) { System.out.println("--> Index already exists, ignoring indexing phase, waiting for green"); ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet(); if (clusterHealthResponse.isTimedOut()) { System.err.println("--> Timed out waiting for cluster health"); } } client.admin().indices().prepareRefresh().execute().actionGet(); System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount()); System.out.println("--> Running just child query"); // run just the child query, warm up first for (int i = 1; i <= 10000; i *= 2) { SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(matchQuery("child.field2", i)).execute().actionGet(); System.out.println("--> Warmup took["+ i +"]: " + searchResponse.getTook()); if (searchResponse.getHits().totalHits() != i) { System.err.println("--> mismatch on hits"); } } NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats() .setJvm(true).execute().actionGet(); System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted()); System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed()); // run parent child constant query for (int j = 1; j < QUERY_WARMUP; j *= 2) { SearchResponse searchResponse = client.prepareSearch(indexName) .setQuery( hasChildQuery("child", matchQuery("field2", j)) ) .execute().actionGet(); if (searchResponse.getFailedShards() > 0) { System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures())); } if (searchResponse.getHits().totalHits() != j) { System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + PARENT_COUNT + "]"); } } long totalQueryTime = 0; for (int i = 1; i < PARENT_COUNT; i *= 2) { for (int j = 0; j < QUERY_COUNT; j++) { SearchResponse searchResponse = client.prepareSearch(indexName) .setQuery(filteredQuery(matchAllQuery(), hasChildFilter("child", matchQuery("field2", i)))) .execute().actionGet(); if (searchResponse.getHits().totalHits() != i) { System.err.println("--> mismatch on hits"); } totalQueryTime += searchResponse.getTookInMillis(); } System.out.println("--> has_child filter " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms"); } statsResponse = client.admin().cluster().prepareNodesStats() .setJvm(true).setIndices(true).execute().actionGet(); System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize()); System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed()); totalQueryTime = 0; for (int i = 1; i < PARENT_COUNT; i *= 2) { for (int j = 0; j < QUERY_COUNT; j++) { SearchResponse searchResponse = client.prepareSearch(indexName) .setQuery(hasChildQuery("child", matchQuery("field2", i)).scoreType("max")) .execute().actionGet(); if (searchResponse.getHits().totalHits() != i) { System.err.println("--> mismatch on hits"); } totalQueryTime += searchResponse.getTookInMillis(); } System.out.println("--> has_child query " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms"); } System.gc(); statsResponse = client.admin().cluster().prepareNodesStats() .setJvm(true).setIndices(true).execute().actionGet(); System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize()); System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed()); client.close(); node1.close(); } private static XContentBuilder parentSource(int val) throws IOException { return jsonBuilder().startObject().field("field1", Integer.toString(val)).endObject(); } private static XContentBuilder childSource(int val) throws IOException { return jsonBuilder().startObject().field("field2", Integer.toString(val)).endObject(); } }
0true
src_test_java_org_elasticsearch_benchmark_search_child_ChildSearchShortCircuitBenchmark.java
646
public class DeleteIndexTemplateResponse extends AcknowledgedResponse { DeleteIndexTemplateResponse() { } DeleteIndexTemplateResponse(boolean acknowledged) { super(acknowledged); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readAcknowledged(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } }
0true
src_main_java_org_elasticsearch_action_admin_indices_template_delete_DeleteIndexTemplateResponse.java
2,908
public static class Employee implements Serializable { long id; String name; String city; int age; boolean active; double salary; Timestamp date; Date createDate; java.sql.Date sqlDate; State state; BigDecimal bigDecimal = new BigDecimal("1.23E3"); public Employee(long id, String name, int age, boolean live, double salary, State state) { this(id,name,age,live,salary); this.state = state; } public Employee(long id, String name, int age, boolean live, double salary) { this(id, name, null, age, live, salary); } public Employee(String name, int age, boolean live, double salary) { this(-1, name, age, live, salary); } public Employee(String name, String city, int age, boolean live, double salary) { this(-1, name, city, age, live, salary); } public Employee(long id, String name, String city, int age, boolean live, double salary) { this.id = id; this.name = name; this.city = city; this.age = age; this.active = live; this.salary = salary; this.createDate = new Date(); this.date = new Timestamp(createDate.getTime()); this.sqlDate = new java.sql.Date(createDate.getTime()); } public Employee() { } public long getId() { return id; } public void setId(long id) { this.id = id; } public Date getCreateDate() { return createDate; } public void setCreateDate(Date createDate) { this.createDate = createDate; } public java.sql.Date getSqlDate() { return sqlDate; } public void setSqlDate(java.sql.Date sqlDate) { this.sqlDate = sqlDate; } public void setName(String name) { this.name = name; } public void setCity(String city) { this.city = city; } public void setAge(int age) { this.age = age; } public void setActive(boolean active) { this.active = active; } public void setSalary(double salary) { this.salary = salary; } public void setDate(Timestamp date) { this.date = date; } public void setBigDecimal(BigDecimal bigDecimal) { this.bigDecimal = bigDecimal; } public BigDecimal getBigDecimal() { return bigDecimal; } public Timestamp getDate() { return date; } public String getName() { return name; } public String getCity() { return city; } public int getAge() { return age; } public double getSalary() { return salary; } public boolean isActive() { return active; } public State getState() { return state; } public void setState(State state) { this.state = state; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Employee employee = (Employee) o; if (active != employee.active) return false; if (age != employee.age) return false; if (Double.compare(employee.salary, salary) != 0) return false; if (name != null ? !name.equals(employee.name) : employee.name != null) return false; return true; } @Override public int hashCode() { int result; long temp; result = name != null ? name.hashCode() : 0; result = 31 * result + age; result = 31 * result + (active ? 1 : 0); temp = salary != +0.0d ? Double.doubleToLongBits(salary) : 0L; result = 31 * result + (int) (temp ^ (temp >>> 32)); return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("Employee"); sb.append("{name='").append(name).append('\''); sb.append(", city=").append(city); sb.append(", age=").append(age); sb.append(", active=").append(active); sb.append(", salary=").append(salary); sb.append('}'); return sb.toString(); } }
1no label
hazelcast_src_test_java_com_hazelcast_query_SampleObjects.java
433
map.addChangeListener(new OMultiValueChangeListener<Object, String>() { public void onAfterRecordChanged(final OMultiValueChangeEvent<Object, String> event) { Assert.assertEquals(event.getChangeType(), OMultiValueChangeEvent.OChangeType.UPDATE); Assert.assertEquals(event.getOldValue(), "value1"); Assert.assertEquals(event.getKey(), "key1"); Assert.assertEquals(event.getValue(), "value2"); changed.value = true; } });
0true
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedMapTest.java
3,861
public class HasParentFilterParser implements FilterParser { public static final String NAME = "has_parent"; @Inject public HasParentFilterParser() { } @Override public String[] names() { return new String[]{NAME, Strings.toCamelCase(NAME)}; } @Override public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); Query query = null; boolean queryFound = false; String parentType = null; boolean cache = false; CacheKeyFilter.Key cacheKey = null; String filterName = null; String currentFieldName = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { // TODO handle `query` element before `type` element... String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType}); try { query = parseContext.parseInnerQuery(); queryFound = true; } finally { QueryParseContext.setTypes(origTypes); } } else if ("filter".equals(currentFieldName)) { // TODO handle `filter` element before `type` element... String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType}); try { Filter innerFilter = parseContext.parseInnerFilter(); query = new XConstantScoreQuery(innerFilter); queryFound = true; } finally { QueryParseContext.setTypes(origTypes); } } else { throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) { parentType = parser.text(); } else if ("_scope".equals(currentFieldName)) { throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_parent] filter has been removed, use a filter as a facet_filter in the relevant global facet"); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else if ("_cache".equals(currentFieldName)) { cache = parser.booleanValue(); } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new CacheKeyFilter.Key(parser.text()); } else { throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]"); } } } if (!queryFound) { throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'query' field"); } if (query == null) { return null; } if (parentType == null) { throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'parent_type' field"); } DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { throw new QueryParsingException(parseContext.index(), "[has_parent] filter configured 'parent_type' [" + parentType + "] is not a valid type"); } // wrap the query with type query query = new XFilteredQuery(query, parseContext.cacheFilter(parentDocMapper.typeFilter(), null)); Set<String> parentTypes = new HashSet<String>(5); parentTypes.add(parentType); for (DocumentMapper documentMapper : parseContext.mapperService()) { ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper(); if (parentFieldMapper.active()) { DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type()); if (parentTypeDocumentMapper == null) { // Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent. parentTypes.add(parentFieldMapper.type()); } } } Filter parentFilter; if (parentTypes.size() == 1) { DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypes.iterator().next()); parentFilter = parseContext.cacheFilter(documentMapper.typeFilter(), null); } else { XBooleanFilter parentsFilter = new XBooleanFilter(); for (String parentTypeStr : parentTypes) { DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypeStr); Filter filter = parseContext.cacheFilter(documentMapper.typeFilter(), null); parentsFilter.add(filter, BooleanClause.Occur.SHOULD); } parentFilter = parentsFilter; } Filter childrenFilter = parseContext.cacheFilter(new NotFilter(parentFilter), null); Query parentConstantScoreQuery = new ParentConstantScoreQuery(query, parentType, childrenFilter); if (filterName != null) { parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(parentConstantScoreQuery)); } boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source()); if (deleteByQuery) { return new DeleteByQueryWrappingFilter(parentConstantScoreQuery); } else { return new CustomQueryWrappingFilter(parentConstantScoreQuery); } } }
1no label
src_main_java_org_elasticsearch_index_query_HasParentFilterParser.java
222
private class PlaceholderResolvingStringValueResolver implements StringValueResolver { private final PropertyPlaceholderHelper helper; private final PropertyPlaceholderHelper.PlaceholderResolver resolver; public PlaceholderResolvingStringValueResolver(Properties props) { this.helper = new PropertyPlaceholderHelper("${", "}", ":", true); this.resolver = new PropertyPlaceholderConfigurerResolver(props); } public String resolveStringValue(String strVal) throws BeansException { String value = this.helper.replacePlaceholders(strVal, this.resolver); return (value.equals("") ? null : value); } }
0true
common_src_main_java_org_broadleafcommerce_common_config_RuntimeEnvironmentPropertiesConfigurer.java
2,581
public final class Packet extends DataAdapter implements SocketWritable, SocketReadable { public static final byte VERSION = 1; public static final int HEADER_OP = 0; public static final int HEADER_RESPONSE = 1; public static final int HEADER_EVENT = 2; public static final int HEADER_WAN_REPLICATION = 3; public static final int HEADER_URGENT = 4; private static final int ST_VERSION = 11; private static final int ST_HEADER = 12; private static final int ST_PARTITION = 13; private short header; private int partitionId; private transient Connection conn; public Packet(SerializationContext context) { super(context); } public Packet(Data value, SerializationContext context) { this(value, -1, context); } public Packet(Data value, int partitionId, SerializationContext context) { super(value, context); this.partitionId = partitionId; } /** * Gets the Connection this Packet was send with. * * @return the Connection. Could be null. */ public Connection getConn() { return conn; } /** * Sets the Connection this Packet is send with. * <p/> * This is done on the reading side of the Packet to make it possible to retrieve information about * the sender of the Packet. * * @param conn the connection. */ public void setConn(final Connection conn) { this.conn = conn; } public void setHeader(int bit) { header |= 1 << bit; } public boolean isHeaderSet(int bit) { return (header & 1 << bit) != 0; } /** * Returns the header of the Packet. The header is used to figure out what the content is of this Packet before * the actual payload needs to be processed. * * @return the header. */ public short getHeader() { return header; } /** * Returns the partition id of this packet. If this packet is not for a particular partition, -1 is returned. * * @return the partition id. */ public int getPartitionId() { return partitionId; } @Override public boolean isUrgent() { return isHeaderSet(HEADER_URGENT); } @Override public boolean writeTo(ByteBuffer destination) { if (!isStatusSet(ST_VERSION)) { if (!destination.hasRemaining()) { return false; } destination.put(VERSION); setStatus(ST_VERSION); } if (!isStatusSet(ST_HEADER)) { if (destination.remaining() < 2) { return false; } destination.putShort(header); setStatus(ST_HEADER); } if (!isStatusSet(ST_PARTITION)) { if (destination.remaining() < 4) { return false; } destination.putInt(partitionId); setStatus(ST_PARTITION); } return super.writeTo(destination); } @Override public boolean readFrom(ByteBuffer source) { if (!isStatusSet(ST_VERSION)) { if (!source.hasRemaining()) { return false; } byte version = source.get(); setStatus(ST_VERSION); if (VERSION != version) { throw new IllegalArgumentException("Packet versions are not matching! This -> " + VERSION + ", Incoming -> " + version); } } if (!isStatusSet(ST_HEADER)) { if (source.remaining() < 2) { return false; } header = source.getShort(); setStatus(ST_HEADER); } if (!isStatusSet(ST_PARTITION)) { if (source.remaining() < 4) { return false; } partitionId = source.getInt(); setStatus(ST_PARTITION); } return super.readFrom(source); } /** * Returns an estimation of the packet, including its payload, in bytes. * * @return the size of the packet. */ public int size() { // 7 = byte(version) + short(header) + int(partitionId) return (data != null ? data.totalSize() : 0) + 7; } @Override public String toString() { final StringBuilder sb = new StringBuilder("Packet{"); sb.append("header=").append(header); sb.append(", isResponse=").append(isHeaderSet(Packet.HEADER_RESPONSE)); sb.append(", isOperation=").append(isHeaderSet(Packet.HEADER_OP)); sb.append(", isEvent=").append(isHeaderSet(Packet.HEADER_EVENT)); sb.append(", partitionId=").append(partitionId); sb.append(", conn=").append(conn); sb.append('}'); return sb.toString(); } }
1no label
hazelcast_src_main_java_com_hazelcast_nio_Packet.java
4,916
public class RestGetAction extends BaseRestHandler { @Inject public RestGetAction(Settings settings, Client client, RestController controller) { super(settings, client); controller.registerHandler(GET, "/{index}/{type}/{id}", this); } @Override public void handleRequest(final RestRequest request, final RestChannel channel) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.listenerThreaded(false); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing getRequest.parent(request.param("parent")); getRequest.preference(request.param("preference")); getRequest.realtime(request.paramAsBoolean("realtime", null)); String sField = request.param("fields"); if (sField != null) { String[] sFields = Strings.splitStringByCommaToArray(sField); if (sFields != null) { getRequest.fields(sFields); } } getRequest.version(RestActions.parseVersion(request)); getRequest.versionType(VersionType.fromString(request.param("version_type"), getRequest.versionType())); getRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request)); client.get(getRequest, new ActionListener<GetResponse>() { @Override public void onResponse(GetResponse response) { try { XContentBuilder builder = restContentBuilder(request); response.toXContent(builder, request); if (!response.isExists()) { channel.sendResponse(new XContentRestResponse(request, NOT_FOUND, builder)); } else { channel.sendResponse(new XContentRestResponse(request, OK, builder)); } } catch (Throwable e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(new XContentThrowableRestResponse(request, e)); } catch (IOException e1) { logger.error("Failed to send failure response", e1); } } }); } }
1no label
src_main_java_org_elasticsearch_rest_action_get_RestGetAction.java
558
public abstract class OAbstractIndexDefinition extends ODocumentWrapperNoClass implements OIndexDefinition { protected OCollate collate = new ODefaultCollate(); protected OAbstractIndexDefinition() { super(new ODocument()); } public OCollate getCollate() { return collate; } public void setCollate(final OCollate collate) { if (collate == null) throw new IllegalArgumentException("COLLATE cannot be null"); this.collate = collate; } public void setCollate(String iCollate) { if (iCollate == null) iCollate = ODefaultCollate.NAME; setCollate(OSQLEngine.getCollate(iCollate)); } }
0true
core_src_main_java_com_orientechnologies_orient_core_index_OAbstractIndexDefinition.java
107
public interface Timestamp extends Comparable<Timestamp> { /** * Returns the length of time since UNIX epoch in the given {@link java.util.concurrent.TimeUnit}. * * @param unit * @return */ public long sinceEpoch(TimeUnit unit); /** * Returns the native unit used by this Timestamp. The actual time is specified in this unit of time. * </p> * @return */ public TimeUnit getNativeUnit(); }
0true
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Timestamp.java
23
public interface Generator<T> { T get(); }
0true
src_main_java_jsr166e_CompletableFuture.java
835
public class SearchScrollRequest extends ActionRequest<SearchScrollRequest> { private String scrollId; private Scroll scroll; private SearchOperationThreading operationThreading = SearchOperationThreading.THREAD_PER_SHARD; public SearchScrollRequest() { } public SearchScrollRequest(String scrollId) { this.scrollId = scrollId; } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (scrollId == null) { validationException = addValidationError("scrollId is missing", validationException); } return validationException; } /** * Controls the the search operation threading model. */ public SearchOperationThreading operationThreading() { return this.operationThreading; } /** * Controls the the search operation threading model. */ public SearchScrollRequest operationThreading(SearchOperationThreading operationThreading) { this.operationThreading = operationThreading; return this; } /** * The scroll id used to scroll the search. */ public String scrollId() { return scrollId; } public SearchScrollRequest scrollId(String scrollId) { this.scrollId = scrollId; return this; } /** * If set, will enable scrolling of the search request. */ public Scroll scroll() { return scroll; } /** * If set, will enable scrolling of the search request. */ public SearchScrollRequest scroll(Scroll scroll) { this.scroll = scroll; return this; } /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchScrollRequest scroll(TimeValue keepAlive) { return scroll(new Scroll(keepAlive)); } /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchScrollRequest scroll(String keepAlive) { return scroll(new Scroll(TimeValue.parseTimeValue(keepAlive, null))); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); operationThreading = SearchOperationThreading.fromId(in.readByte()); scrollId = in.readString(); if (in.readBoolean()) { scroll = readScroll(in); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeByte(operationThreading.id()); out.writeString(scrollId); if (scroll == null) { out.writeBoolean(false); } else { out.writeBoolean(true); scroll.writeTo(out); } } }
0true
src_main_java_org_elasticsearch_action_search_SearchScrollRequest.java
109
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() { public sun.misc.Unsafe run() throws Exception { Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class; for (java.lang.reflect.Field f : k.getDeclaredFields()) { f.setAccessible(true); Object x = f.get(null); if (k.isInstance(x)) return k.cast(x); } throw new NoSuchFieldError("the Unsafe"); }});
0true
src_main_java_jsr166e_CountedCompleter.java
670
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OTreeInternal.RangeResultListener<Object, V>() { @Override public boolean addResult(Map.Entry<Object, V> entry) { final Object key = entry.getKey(); final V value = entry.getValue(); return addToEntriesResult(transformer, entriesResultListener, key, value); } });
1no label
core_src_main_java_com_orientechnologies_orient_core_index_engine_OSBTreeIndexEngine.java
749
public class TxnListRemoveRequest extends TxnCollectionRequest { public TxnListRemoveRequest() { } public TxnListRemoveRequest(String name, Data value) { super(name, value); } @Override public Object innerCall() throws Exception { return getEndpoint().getTransactionContext(txnId).getList(name).remove(value); } @Override public String getServiceName() { return ListService.SERVICE_NAME; } @Override public int getClassId() { return CollectionPortableHook.TXN_LIST_REMOVE; } @Override public Permission getRequiredPermission() { return new ListPermission(name, ActionConstants.ACTION_REMOVE); } }
0true
hazelcast_src_main_java_com_hazelcast_collection_client_TxnListRemoveRequest.java
521
public class BLCCollectionUtils { /** * Delegates to {@link CollectionUtils#collect(Collection, Transformer)}, but performs the necessary type coercion * to allow the returned collection to be correctly casted based on the TypedTransformer. * * @param inputCollection * @param transformer * @return the typed, collected Collection */ @SuppressWarnings({ "unchecked", "rawtypes" }) public static <T> Collection<T> collect(Collection inputCollection, TypedTransformer<T> transformer) { return CollectionUtils.collect(inputCollection, transformer); } /** * Delegates to {@link CollectionUtils#select(Collection, org.apache.commons.collections.Predicate)}, but will * force the return type to be a List<T>. * * @param inputCollection * @param predicate * @return */ public static <T> List<T> selectList(Collection<T> inputCollection, TypedPredicate<T> predicate) { ArrayList<T> answer = new ArrayList<T>(inputCollection.size()); CollectionUtils.select(inputCollection, predicate, answer); return answer; } }
0true
common_src_main_java_org_broadleafcommerce_common_util_BLCCollectionUtils.java
240
public class ModuleConfigurationType implements BroadleafEnumerationType, Serializable { private static final long serialVersionUID = 1L; private static final Map<String, ModuleConfigurationType> TYPES = new LinkedHashMap<String, ModuleConfigurationType>(); public static final ModuleConfigurationType FULFILLMENT_PRICING = new ModuleConfigurationType("FULFILLMENT_PRICING", "Fulfillment Pricing Module"); public static final ModuleConfigurationType TAX_CALCULATION = new ModuleConfigurationType("TAX_CALCULATION", "Tax Calculation Module"); public static final ModuleConfigurationType ADDRESS_VERIFICATION = new ModuleConfigurationType("ADDRESS_VERIFICATION", "Address Verification Module"); public static final ModuleConfigurationType PAYMENT_PROCESSOR = new ModuleConfigurationType("PAYMENT_PROCESSOR", "Payment Processor Module"); public static final ModuleConfigurationType CDN_PROVIDER = new ModuleConfigurationType("CDN_PROVIDER", "Content Delivery Network Module"); public static ModuleConfigurationType getInstance(final String type) { return TYPES.get(type); } private String type; private String friendlyType; public ModuleConfigurationType() { //do nothing } public ModuleConfigurationType(final String type, final String friendlyType) { this.friendlyType = friendlyType; setType(type); } @Override public String getType() { return type; } @Override public String getFriendlyType() { return friendlyType; } private void setType(final String type) { this.type = type; if (!TYPES.containsKey(type)) { TYPES.put(type, this); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((type == null) ? 0 : type.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ModuleConfigurationType other = (ModuleConfigurationType) obj; if (type == null) { if (other.type != null) return false; } else if (!type.equals(other.type)) return false; return true; } }
1no label
common_src_main_java_org_broadleafcommerce_common_config_service_type_ModuleConfigurationType.java
472
public class BroadleafExternalAuthenticationUserDetails extends User { private String firstName; private String lastName; private String email; public BroadleafExternalAuthenticationUserDetails(String username, String password, Collection<? extends GrantedAuthority> authorities) { super(username, password, authorities); } public String getFirstName() { return firstName; } public void setFirstName(String firstName) { this.firstName = firstName; } public String getLastName() { return lastName; } public void setLastName(String lastName) { this.lastName = lastName; } public String getEmail() { return email; } public void setEmail(String email) { this.email = email; } }
0true
common_src_main_java_org_broadleafcommerce_common_security_BroadleafExternalAuthenticationUserDetails.java
307
public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> { public ClusterHealthRequestBuilder(ClusterAdminClient clusterClient) { super((InternalClusterAdminClient) clusterClient, new ClusterHealthRequest()); } public ClusterHealthRequestBuilder setIndices(String... indices) { request.indices(indices); return this; } public ClusterHealthRequestBuilder setTimeout(TimeValue timeout) { request.timeout(timeout); return this; } public ClusterHealthRequestBuilder setTimeout(String timeout) { request.timeout(timeout); return this; } public ClusterHealthRequestBuilder setWaitForStatus(ClusterHealthStatus waitForStatus) { request.waitForStatus(waitForStatus); return this; } public ClusterHealthRequestBuilder setWaitForGreenStatus() { request.waitForGreenStatus(); return this; } public ClusterHealthRequestBuilder setWaitForYellowStatus() { request.waitForYellowStatus(); return this; } public ClusterHealthRequestBuilder setWaitForRelocatingShards(int waitForRelocatingShards) { request.waitForRelocatingShards(waitForRelocatingShards); return this; } public ClusterHealthRequestBuilder setWaitForActiveShards(int waitForActiveShards) { request.waitForActiveShards(waitForActiveShards); return this; } /** * Waits for N number of nodes. Use "12" for exact mapping, ">12" and "<12" for range. */ public ClusterHealthRequestBuilder setWaitForNodes(String waitForNodes) { request.waitForNodes(waitForNodes); return this; } public ClusterHealthRequestBuilder setWaitForEvents(Priority waitForEvents) { request.waitForEvents(waitForEvents); return this; } @Override protected void doExecute(ActionListener<ClusterHealthResponse> listener) { ((ClusterAdminClient) client).health(request, listener); } }
0true
src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterHealthRequestBuilder.java
1,500
public class BalanceConfigurationTests extends ElasticsearchAllocationTestCase { private final ESLogger logger = Loggers.getLogger(BalanceConfigurationTests.class); // TODO maybe we can randomize these numbers somehow final int numberOfNodes = 25; final int numberOfIndices = 12; final int numberOfShards = 2; final int numberOfReplicas = 2; @Test public void testIndexBalance() { /* Tests balance over indices only */ final float indexBalance = 1.0f; final float replicaBalance = 0.0f; final float primaryBalance = 0.0f; final float balanceTreshold = 1.0f; ImmutableSettings.Builder settings = settingsBuilder(); settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance); settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); ClusterState clusterState = initCluster(strategy); assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterState = addNode(clusterState, strategy); assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterState = removeNodes(clusterState, strategy); assertIndexBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); } @Test public void testReplicaBalance() { /* Tests balance over replicas only */ final float indexBalance = 0.0f; final float replicaBalance = 1.0f; final float primaryBalance = 0.0f; final float balanceTreshold = 1.0f; ImmutableSettings.Builder settings = settingsBuilder(); settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance); settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); ClusterState clusterState = initCluster(strategy); assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterState = addNode(clusterState, strategy); assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterState = removeNodes(clusterState, strategy); assertReplicaBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); } @Test public void testPrimaryBalance() { /* Tests balance over primaries only */ final float indexBalance = 0.0f; final float replicaBalance = 0.0f; final float primaryBalance = 1.0f; final float balanceTreshold = 1.0f; ImmutableSettings.Builder settings = settingsBuilder(); settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, primaryBalance); settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); ClusterState clusterstate = initCluster(strategy); assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterstate = addNode(clusterstate, strategy); assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterstate = removeNodes(clusterstate, strategy); assertPrimaryBalance(logger, clusterstate.getRoutingNodes(), numberOfNodes + 1 - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); } private ClusterState initCluster(AllocationService strategy) { MetaData.Builder metaDataBuilder = MetaData.builder(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); for (int i = 0; i < numberOfIndices; i++) { IndexMetaData.Builder index = IndexMetaData.builder("test" + i).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas); metaDataBuilder = metaDataBuilder.put(index); } MetaData metaData = metaDataBuilder.build(); for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) { routingTableBuilder.addAsNew(cursor.value); } RoutingTable routingTable = routingTableBuilder.build(); logger.info("start " + numberOfNodes + " nodes"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes; i++) { nodes.put(newNode("node" + i)); } ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build(); routingTable = strategy.reroute(clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); RoutingNodes routingNodes = clusterState.routingNodes(); logger.info("restart all the primary shards, replicas will start initializing"); routingNodes = clusterState.routingNodes(); routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); logger.info("start the replica shards"); routingNodes = clusterState.routingNodes(); routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); logger.info("complete rebalancing"); RoutingTable prev = routingTable; while (true) { routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); if (routingTable == prev) break; prev = routingTable; } return clusterState; } private ClusterState addNode(ClusterState clusterState, AllocationService strategy) { logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .put(newNode("node" + numberOfNodes))) .build(); RoutingTable routingTable = strategy.reroute(clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); RoutingNodes routingNodes = clusterState.routingNodes(); // move initializing to started RoutingTable prev = routingTable; while (true) { routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); if (routingTable == prev) break; prev = routingTable; } return clusterState; } private ClusterState removeNodes(ClusterState clusterState, AllocationService strategy) { logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) { nodes.remove("node" + i); } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); RoutingNodes routingNodes = clusterState.routingNodes(); logger.info("start all the primary shards, replicas will start initializing"); RoutingTable routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); logger.info("start the replica shards"); routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); logger.info("rebalancing"); routingTable = strategy.reroute(clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); logger.info("complete rebalancing"); RoutingTable prev = routingTable; while (true) { routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); if (routingTable == prev) break; prev = routingTable; } return clusterState; } private void assertReplicaBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) { final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1); final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold))); final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold))); for (RoutingNode node : nodes) { // logger.info(node.nodeId() + ": " + node.shardsWithState(INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")"); assertThat(node.shardsWithState(STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards)); assertThat(node.shardsWithState(STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards)); } } private void assertIndexBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) { final int numShards = numberOfShards * (numberOfReplicas + 1); final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold))); final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold))); for (String index : nodes.getRoutingTable().indicesRouting().keySet()) { for (RoutingNode node : nodes) { // logger.info(node.nodeId() +":"+index+ ": " + node.shardsWithState(index, INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")"); assertThat(node.shardsWithState(index, STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards)); assertThat(node.shardsWithState(index, STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards)); } } } private void assertPrimaryBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) { final int numShards = numberOfShards; final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold))); final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold))); for (String index : nodes.getRoutingTable().indicesRouting().keySet()) { for (RoutingNode node : nodes) { int primaries = 0; for (ShardRouting shard : node.shardsWithState(index, STARTED)) { primaries += shard.primary() ? 1 : 0; } // logger.info(node.nodeId() + ": " + primaries + " primaries ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")"); assertThat(primaries, Matchers.greaterThanOrEqualTo(minAvgNumberOfShards)); assertThat(primaries, Matchers.lessThanOrEqualTo(maxAvgNumberOfShards)); } } } @Test public void testPersistedSettings() { ImmutableSettings.Builder settings = settingsBuilder(); settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.2); settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.3); settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, 0.5); settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 2.0); final NodeSettingsService.Listener[] listeners = new NodeSettingsService.Listener[1]; NodeSettingsService service = new NodeSettingsService(settingsBuilder().build()) { @Override public void addListener(Listener listener) { assertNull("addListener was called twice while only one time was expected", listeners[0]); listeners[0] = listener; } }; BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.5f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); settings.put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); listeners[0].onRefreshSettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.5f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.5); settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.1); settings.put(BalancedShardsAllocator.SETTING_PRIMARY_BALANCE_FACTOR, 0.4); settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 3.0); listeners[0].onRefreshSettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.5f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.1f)); assertThat(allocator.getPrimaryBalance(), Matchers.equalTo(0.4f)); assertThat(allocator.getThreshold(), Matchers.equalTo(3.0f)); } @Test public void testNoRebalanceOnPrimaryOverload() { ImmutableSettings.Builder settings = settingsBuilder(); AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), new NodeSettingsService(ImmutableSettings.Builder.EMPTY_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), new NoneGatewayAllocator(), new ShardsAllocator() { @Override public boolean rebalance(RoutingAllocation allocation) { return false; } @Override public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return false; } @Override public void applyStartedShards(StartedRerouteAllocation allocation) { } @Override public void applyFailedShards(FailedRerouteAllocation allocation) { } /* * // this allocator tries to rebuild this scenario where a rebalance is * // triggered solely by the primary overload on node [1] where a shard * // is rebalanced to node 0 routing_nodes: -----node_id[0][V] --------[test][0], node[0], [R], s[STARTED] --------[test][4], node[0], [R], s[STARTED] -----node_id[1][V] --------[test][0], node[1], [P], s[STARTED] --------[test][1], node[1], [P], s[STARTED] --------[test][3], node[1], [R], s[STARTED] -----node_id[2][V] --------[test][1], node[2], [R], s[STARTED] --------[test][2], node[2], [R], s[STARTED] --------[test][4], node[2], [P], s[STARTED] -----node_id[3][V] --------[test][2], node[3], [P], s[STARTED] --------[test][3], node[3], [P], s[STARTED] ---- unassigned */ @Override public boolean allocateUnassigned(RoutingAllocation allocation) { RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned(); boolean changed = !unassigned.isEmpty(); for (MutableShardRouting sr : unassigned) { switch (sr.id()) { case 0: if (sr.primary()) { allocation.routingNodes().assign(sr, "node1"); } else { allocation.routingNodes().assign(sr, "node0"); } break; case 1: if (sr.primary()) { allocation.routingNodes().assign(sr, "node1"); } else { allocation.routingNodes().assign(sr, "node2"); } break; case 2: if (sr.primary()) { allocation.routingNodes().assign(sr, "node3"); } else { allocation.routingNodes().assign(sr, "node2"); } break; case 3: if (sr.primary()) { allocation.routingNodes().assign(sr, "node3"); } else { allocation.routingNodes().assign(sr, "node1"); } break; case 4: if (sr.primary()) { allocation.routingNodes().assign(sr, "node2"); } else { allocation.routingNodes().assign(sr, "node0"); } break; } } unassigned.clear(); return changed; } }), ClusterInfoService.EMPTY); MetaData.Builder metaDataBuilder = MetaData.builder(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1); metaDataBuilder = metaDataBuilder.put(indexMeta); MetaData metaData = metaDataBuilder.build(); for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) { routingTableBuilder.addAsNew(cursor.value); } RoutingTable routingTable = routingTableBuilder.build(); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < 4; i++) { DiscoveryNode node = newNode("node" + i); nodes.put(node); } ClusterState clusterState = ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build(); routingTable = strategy.reroute(clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); RoutingNodes routingNodes = clusterState.routingNodes(); for (RoutingNode routingNode : routingNodes) { for (MutableShardRouting mutableShardRouting : routingNode) { assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.INITIALIZING)); } } strategy = createAllocationService(settings.build()); logger.info("use the new allocator and check if it moves shards"); routingNodes = clusterState.routingNodes(); routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); for (RoutingNode routingNode : routingNodes) { for (MutableShardRouting mutableShardRouting : routingNode) { assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED)); } } logger.info("start the replica shards"); routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); for (RoutingNode routingNode : routingNodes) { for (MutableShardRouting mutableShardRouting : routingNode) { assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED)); } } logger.info("rebalancing"); routingTable = strategy.reroute(clusterState).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.routingNodes(); for (RoutingNode routingNode : routingNodes) { for (MutableShardRouting mutableShardRouting : routingNode) { assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED)); } } } }
0true
src_test_java_org_elasticsearch_cluster_routing_allocation_BalanceConfigurationTests.java
150
public interface Action<A> { void apply(A a); }
0true
src_main_java_jsr166e_extra_ReadMostlyVector.java
59
@SuppressWarnings("serial") protected static class CountableLock extends ReentrantReadWriteLock { protected int countLocks = 0; public CountableLock() { super(false); } }
0true
commons_src_main_java_com_orientechnologies_common_concur_lock_OLockManager.java
738
public abstract class CollectionRequest extends PartitionClientRequest implements Portable, SecureRequest { protected String serviceName; protected String name; public CollectionRequest() { } public CollectionRequest(String name) { this.name = name; } @Override protected int getPartition() { return getClientEngine().getPartitionService().getPartitionId(StringPartitioningStrategy.getPartitionKey(name)); } @Override public String getServiceName() { return serviceName; } public void setServiceName(String serviceName) { this.serviceName = serviceName; } @Override public int getFactoryId() { return CollectionPortableHook.F_ID; } public void write(PortableWriter writer) throws IOException { writer.writeUTF("s", serviceName); writer.writeUTF("n", name); } public void read(PortableReader reader) throws IOException { serviceName = reader.readUTF("s"); name = reader.readUTF("n"); } @Override public final Permission getRequiredPermission() { final String action = getRequiredAction(); if (ListService.SERVICE_NAME.equals(serviceName)) { return new ListPermission(name, action); } else if (SetService.SERVICE_NAME.equals(serviceName)) { return new SetPermission(name, action); } throw new IllegalArgumentException("No service matched!!!"); } public abstract String getRequiredAction(); }
0true
hazelcast_src_main_java_com_hazelcast_collection_client_CollectionRequest.java
864
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> { final AtomicArray<QuerySearchResult> queryResults; final AtomicArray<FetchSearchResult> fetchResults; final AtomicArray<IntArrayList> docIdsToLoad; private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) { super(request, listener); queryResults = new AtomicArray<QuerySearchResult>(firstResults.length()); fetchResults = new AtomicArray<FetchSearchResult>(firstResults.length()); docIdsToLoad = new AtomicArray<IntArrayList>(firstResults.length()); } @Override protected String firstPhaseName() { return "dfs"; } @Override protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<DfsSearchResult> listener) { searchService.sendExecuteDfs(node, request, listener); } @Override protected void moveToSecondPhase() { final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults); final AtomicInteger counter = new AtomicInteger(firstResults.asList().size()); int localOperations = 0; for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); if (node.id().equals(nodes.localNodeId())) { localOperations++; } else { QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } } if (localOperations > 0) { if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) { DfsSearchResult dfsResult = entry.value; DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); if (node.id().equals(nodes.localNodeId())) { QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } } } }); } else { boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) { final DfsSearchResult dfsResult = entry.value; final DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId()); if (node.id().equals(nodes.localNodeId())) { final QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs); try { if (localAsync) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } }); } else { executeQuery(entry.index, dfsResult, counter, querySearchRequest, node); } } catch (Throwable t) { onQueryFailure(t, querySearchRequest, entry.index, dfsResult, counter); } } } } } } void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final QuerySearchRequest querySearchRequest, DiscoveryNode node) { searchService.sendExecuteQuery(node, querySearchRequest, new SearchServiceListener<QuerySearchResult>() { @Override public void onResult(QuerySearchResult result) { result.shardTarget(dfsResult.shardTarget()); queryResults.set(shardIndex, result); if (counter.decrementAndGet() == 0) { executeFetchPhase(); } } @Override public void onFailure(Throwable t) { onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter); } }); } void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) { if (logger.isDebugEnabled()) { logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id()); } this.addShardFailure(shardIndex, dfsResult.shardTarget(), t); successulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { executeFetchPhase(); } } void executeFetchPhase() { try { innerExecuteFetchPhase(); } catch (Throwable e) { listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures())); } } void innerExecuteFetchPhase() { sortedShardList = searchPhaseController.sortDocs(queryResults); searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList); if (docIdsToLoad.asList().isEmpty()) { finishHim(); return; } final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size()); int localOperations = 0; for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); if (node.id().equals(nodes.localNodeId())) { localOperations++; } else { FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } } if (localOperations > 0) { if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); if (node.id().equals(nodes.localNodeId())) { FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } } } }); } else { boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD; for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) { final QuerySearchResult queryResult = queryResults.get(entry.index); final DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); if (node.id().equals(nodes.localNodeId())) { final FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value); try { if (localAsync) { threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() { @Override public void run() { executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } }); } else { executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } } catch (Throwable t) { onFetchFailure(t, fetchSearchRequest, entry.index, queryResult.shardTarget(), counter); } } } } } } void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final FetchSearchRequest fetchSearchRequest, DiscoveryNode node) { searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() { @Override public void onResult(FetchSearchResult result) { result.shardTarget(shardTarget); fetchResults.set(shardIndex, result); if (counter.decrementAndGet() == 0) { finishHim(); } } @Override public void onFailure(Throwable t) { onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter); } }); } void onFetchFailure(Throwable t, FetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) { if (logger.isDebugEnabled()) { logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id()); } this.addShardFailure(shardIndex, shardTarget, t); successulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } } void finishHim() { try { innerFinishHim(); } catch (Throwable e) { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", e, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } listener.onFailure(failure); } finally { releaseIrrelevantSearchContexts(queryResults, docIdsToLoad); } } void innerFinishHim() throws Exception { final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); } listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures())); } }
0true
src_main_java_org_elasticsearch_action_search_type_TransportSearchDfsQueryThenFetchAction.java
6,019
public final class DirectCandidateGenerator extends CandidateGenerator { private final DirectSpellChecker spellchecker; private final String field; private final SuggestMode suggestMode; private final TermsEnum termsEnum; private final IndexReader reader; private final long dictSize; private final double logBase = 5; private final long frequencyPlateau; private final Analyzer preFilter; private final Analyzer postFilter; private final double nonErrorLikelihood; private final boolean useTotalTermFrequency; private final CharsRef spare = new CharsRef(); private final BytesRef byteSpare = new BytesRef(); private final int numCandidates; public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, double nonErrorLikelihood, int numCandidates) throws IOException { this(spellchecker, field, suggestMode, reader, nonErrorLikelihood, numCandidates, null, null, MultiFields.getTerms(reader, field)); } public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, double nonErrorLikelihood, int numCandidates, Analyzer preFilter, Analyzer postFilter, Terms terms) throws IOException { if (terms == null) { throw new ElasticsearchIllegalArgumentException("generator field [" + field + "] doesn't exist"); } this.spellchecker = spellchecker; this.field = field; this.numCandidates = numCandidates; this.suggestMode = suggestMode; this.reader = reader; final long dictSize = terms.getSumTotalTermFreq(); this.useTotalTermFrequency = dictSize != -1; this.dictSize = dictSize == -1 ? reader.maxDoc() : dictSize; this.preFilter = preFilter; this.postFilter = postFilter; this.nonErrorLikelihood = nonErrorLikelihood; float thresholdFrequency = spellchecker.getThresholdFrequency(); this.frequencyPlateau = thresholdFrequency >= 1.0f ? (int) thresholdFrequency: (int)(dictSize * thresholdFrequency); termsEnum = terms.iterator(null); } /* (non-Javadoc) * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#isKnownWord(org.apache.lucene.util.BytesRef) */ @Override public boolean isKnownWord(BytesRef term) throws IOException { return frequency(term) > 0; } /* (non-Javadoc) * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#frequency(org.apache.lucene.util.BytesRef) */ @Override public long frequency(BytesRef term) throws IOException { term = preFilter(term, spare, byteSpare); return internalFrequency(term); } public long internalFrequency(BytesRef term) throws IOException { if (termsEnum.seekExact(term)) { return useTotalTermFrequency ? termsEnum.totalTermFreq() : termsEnum.docFreq(); } return 0; } public String getField() { return field; } /* (non-Javadoc) * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#drawCandidates(org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet, int) */ @Override public CandidateSet drawCandidates(CandidateSet set) throws IOException { Candidate original = set.originalTerm; BytesRef term = preFilter(original.term, spare, byteSpare); final long frequency = original.frequency; spellchecker.setThresholdFrequency(this.suggestMode == SuggestMode.SUGGEST_ALWAYS ? 0 : thresholdFrequency(frequency, dictSize)); SuggestWord[] suggestSimilar = spellchecker.suggestSimilar(new Term(field, term), numCandidates, reader, this.suggestMode); List<Candidate> candidates = new ArrayList<Candidate>(suggestSimilar.length); for (int i = 0; i < suggestSimilar.length; i++) { SuggestWord suggestWord = suggestSimilar[i]; BytesRef candidate = new BytesRef(suggestWord.string); postFilter(new Candidate(candidate, internalFrequency(candidate), suggestWord.score, score(suggestWord.freq, suggestWord.score, dictSize), false), spare, byteSpare, candidates); } set.addCandidates(candidates); return set; } protected BytesRef preFilter(final BytesRef term, final CharsRef spare, final BytesRef byteSpare) throws IOException { if (preFilter == null) { return term; } final BytesRef result = byteSpare; SuggestUtils.analyze(preFilter, term, field, new SuggestUtils.TokenConsumer() { @Override public void nextToken() throws IOException { this.fillBytesRef(result); } }, spare); return result; } protected void postFilter(final Candidate candidate, final CharsRef spare, BytesRef byteSpare, final List<Candidate> candidates) throws IOException { if (postFilter == null) { candidates.add(candidate); } else { final BytesRef result = byteSpare; SuggestUtils.analyze(postFilter, candidate.term, field, new SuggestUtils.TokenConsumer() { @Override public void nextToken() throws IOException { this.fillBytesRef(result); if (posIncAttr.getPositionIncrement() > 0 && result.bytesEquals(candidate.term)) { BytesRef term = BytesRef.deepCopyOf(result); long freq = frequency(term); candidates.add(new Candidate(BytesRef.deepCopyOf(term), freq, candidate.stringDistance, score(candidate.frequency, candidate.stringDistance, dictSize), false)); } else { candidates.add(new Candidate(BytesRef.deepCopyOf(result), candidate.frequency, nonErrorLikelihood, score(candidate.frequency, candidate.stringDistance, dictSize), false)); } } }, spare); } } private double score(long frequency, double errorScore, long dictionarySize) { return errorScore * (((double)frequency + 1) / ((double)dictionarySize +1)); } protected long thresholdFrequency(long termFrequency, long dictionarySize) { if (termFrequency > 0) { return (long) Math.max(0, Math.round(termFrequency * (Math.log10(termFrequency - frequencyPlateau) * (1.0 / Math.log10(logBase))) + 1)); } return 0; } public static class CandidateSet { public Candidate[] candidates; public final Candidate originalTerm; public CandidateSet(Candidate[] candidates, Candidate originalTerm) { this.candidates = candidates; this.originalTerm = originalTerm; } public void addCandidates(List<Candidate> candidates) { final Set<Candidate> set = new HashSet<DirectCandidateGenerator.Candidate>(candidates); for (int i = 0; i < this.candidates.length; i++) { set.add(this.candidates[i]); } this.candidates = set.toArray(new Candidate[set.size()]); } public void addOneCandidate(Candidate candidate) { Candidate[] candidates = new Candidate[this.candidates.length + 1]; System.arraycopy(this.candidates, 0, candidates, 0, this.candidates.length); candidates[candidates.length-1] = candidate; this.candidates = candidates; } } public static class Candidate { public static final Candidate[] EMPTY = new Candidate[0]; public final BytesRef term; public final double stringDistance; public final long frequency; public final double score; public final boolean userInput; public Candidate(BytesRef term, long frequency, double stringDistance, double score, boolean userInput) { this.frequency = frequency; this.term = term; this.stringDistance = stringDistance; this.score = score; this.userInput = userInput; } @Override public String toString() { return "Candidate [term=" + term.utf8ToString() + ", stringDistance=" + stringDistance + ", frequency=" + frequency + (userInput ? ", userInput" : "" ) + "]"; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((term == null) ? 0 : term.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Candidate other = (Candidate) obj; if (term == null) { if (other.term != null) return false; } else if (!term.equals(other.term)) return false; return true; } } @Override public Candidate createCandidate(BytesRef term, long frequency, double channelScore, boolean userInput) throws IOException { return new Candidate(term, frequency, channelScore, score(frequency, channelScore, dictSize), userInput); } }
1no label
src_main_java_org_elasticsearch_search_suggest_phrase_DirectCandidateGenerator.java
162
public class TestTxEntries { private final Random random = new Random(); private final long refTime = System.currentTimeMillis(); private final int refId = 1; private final int refMaster = 1; private final int refMe = 1; private final long startPosition = 1000; private final String storeDir = "dir"; @Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule(); /* * Starts a JVM, executes a tx that fails on prepare and rollbacks, * triggering a bug where an extra start entry for that tx is written * in the xa log. */ @Test public void testStartEntryWrittenOnceOnRollback() throws Exception { final GraphDatabaseService db = new TestGraphDatabaseFactory().setFileSystem( fs.get() ).newImpermanentDatabase( storeDir ); createSomeTransactions( db ); EphemeralFileSystemAbstraction snapshot = fs.snapshot( shutdownDb( db ) ); new TestGraphDatabaseFactory().setFileSystem( snapshot ).newImpermanentDatabase( storeDir ).shutdown(); } @Test public void startEntryShouldBeUniqueIfEitherValueChanges() throws Exception { // Positive Xid hashcode assertCorrectChecksumEquality( randomXid( Boolean.TRUE ) ); // Negative Xid hashcode assertCorrectChecksumEquality( randomXid( Boolean.FALSE ) ); } private void assertCorrectChecksumEquality( Xid refXid ) { Start ref = new Start( refXid, refId, refMaster, refMe, startPosition, refTime, 0l ); assertChecksumsEquals( ref, new Start( refXid, refId, refMaster, refMe, startPosition, refTime, 0l ) ); // Different Xids assertChecksumsNotEqual( ref, new Start( randomXid( null ), refId, refMaster, refMe, startPosition, refTime, 0l ) ); // Different master assertChecksumsNotEqual( ref, new Start( refXid, refId, refMaster+1, refMe, startPosition, refTime, 0l ) ); // Different me assertChecksumsNotEqual( ref, new Start( refXid, refId, refMaster, refMe+1, startPosition, refTime, 0l ) ); } private void assertChecksumsNotEqual( Start ref, Start other ) { assertFalse( ref.getChecksum() == other.getChecksum() ); } private void assertChecksumsEquals( Start ref, Start other ) { assertEquals( ref.getChecksum(), other.getChecksum() ); } private Xid randomXid( Boolean trueForPositive ) { while ( true ) { Xid xid = new XidImpl( randomBytes(), randomBytes() ); if ( trueForPositive == null || xid.hashCode() > 0 == trueForPositive.booleanValue() ) { return xid; } } } private byte[] randomBytes() { byte[] bytes = new byte[random.nextInt( 10 )+5]; for ( int i = 0; i < bytes.length; i++ ) { bytes[i] = (byte) random.nextInt( 255 ); } return bytes; } private void createSomeTransactions( GraphDatabaseService db ) { Transaction tx = db.beginTx(); Node node1 = db.createNode(); Node node2 = db.createNode(); node1.createRelationshipTo( node2, DynamicRelationshipType.withName( "relType1" ) ); tx.success(); tx.finish(); tx = db.beginTx(); node1.delete(); tx.success(); try { // Will throw exception, causing the tx to be rolledback. tx.finish(); } catch ( Exception nothingToSeeHereMoveAlong ) { // InvalidRecordException coming, node1 has rels } /* * The damage has already been done. The following just makes sure * the corrupting tx is flushed to disk, since we will exit * uncleanly. */ tx = db.beginTx(); node1.setProperty( "foo", "bar" ); tx.success(); tx.finish(); } }
0true
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_TestTxEntries.java
5,475
public class DfsPhase implements SearchPhase { private static ThreadLocal<ObjectOpenHashSet<Term>> cachedTermsSet = new ThreadLocal<ObjectOpenHashSet<Term>>() { @Override protected ObjectOpenHashSet<Term> initialValue() { return new ObjectOpenHashSet<Term>(); } }; @Override public Map<String, ? extends SearchParseElement> parseElements() { return ImmutableMap.of(); } @Override public void preProcess(SearchContext context) { } public void execute(SearchContext context) { final ObjectOpenHashSet<Term> termsSet = cachedTermsSet.get(); try { if (!context.queryRewritten()) { context.updateRewriteQuery(context.searcher().rewrite(context.query())); } if (!termsSet.isEmpty()) { termsSet.clear(); } context.query().extractTerms(new DelegateSet(termsSet)); for (RescoreSearchContext rescoreContext : context.rescore()) { rescoreContext.rescorer().extractTerms(context, rescoreContext, new DelegateSet(termsSet)); } Term[] terms = termsSet.toArray(Term.class); TermStatistics[] termStatistics = new TermStatistics[terms.length]; IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext(); for (int i = 0; i < terms.length; i++) { // LUCENE 4 UPGRADE: cache TermContext? TermContext termContext = TermContext.build(indexReaderContext, terms[i]); termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); } ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap(); for (Term term : terms) { assert term.field() != null : "field is null"; if (!fieldStatistics.containsKey(term.field())) { final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); fieldStatistics.put(term.field(), collectionStatistics); } } context.dfsResult().termsStatistics(terms, termStatistics) .fieldStatistics(fieldStatistics) .maxDoc(context.searcher().getIndexReader().maxDoc()); } catch (Exception e) { throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e); } finally { termsSet.clear(); // don't hold on to terms } } // We need to bridge to JCF world, b/c of Query#extractTerms private static class DelegateSet extends AbstractSet<Term> { private final ObjectOpenHashSet<Term> delegate; private DelegateSet(ObjectOpenHashSet<Term> delegate) { this.delegate = delegate; } @Override public boolean add(Term term) { return delegate.add(term); } @Override public boolean addAll(Collection<? extends Term> terms) { boolean result = false; for (Term term : terms) { result = delegate.add(term); } return result; } @Override public Iterator<Term> iterator() { final Iterator<ObjectCursor<Term>> iterator = delegate.iterator(); return new Iterator<Term>() { @Override public boolean hasNext() { return iterator.hasNext(); } @Override public Term next() { return iterator.next().value; } @Override public void remove() { throw new UnsupportedOperationException(); } }; } @Override public int size() { return delegate.size(); } } }
1no label
src_main_java_org_elasticsearch_search_dfs_DfsPhase.java
358
@SuppressWarnings("unchecked") public class ODatabaseDocumentTx extends ODatabaseRecordWrapperAbstract<ODatabaseRecordTx> implements ODatabaseDocument { public ODatabaseDocumentTx(final String iURL) { super(new ODatabaseRecordTx(iURL, ODocument.RECORD_TYPE)); } public ODatabaseDocumentTx(final ODatabaseRecordTx iSource) { super(iSource); } private void freezeIndexes(final List<OIndexAbstract<?>> indexesToFreeze, boolean throwException) { if (indexesToFreeze != null) { for (OIndexAbstract<?> indexToLock : indexesToFreeze) { indexToLock.freeze(throwException); } } } private void flushIndexes(List<OIndexAbstract<?>> indexesToFlush) { for (OIndexAbstract<?> index : indexesToFlush) { index.flush(); } } private List<OIndexAbstract<?>> prepareIndexesToFreeze(Collection<? extends OIndex<?>> indexes) { List<OIndexAbstract<?>> indexesToFreeze = null; if (indexes != null && !indexes.isEmpty()) { indexesToFreeze = new ArrayList<OIndexAbstract<?>>(indexes.size()); for (OIndex<?> index : indexes) { indexesToFreeze.add((OIndexAbstract<?>) index.getInternal()); } Collections.sort(indexesToFreeze, new Comparator<OIndex<?>>() { public int compare(OIndex<?> o1, OIndex<?> o2) { return o1.getName().compareTo(o2.getName()); } }); } return indexesToFreeze; } private void releaseIndexes(Collection<? extends OIndex<?>> indexesToRelease) { if (indexesToRelease != null) { Iterator<? extends OIndex<?>> it = indexesToRelease.iterator(); while (it.hasNext()) { it.next().getInternal().release(); it.remove(); } } } @Override public void freeze(final boolean throwException) { if (!(getStorage() instanceof OFreezableStorage)) { OLogManager.instance().error(this, "We can not freeze non local storage. " + "If you use remote client please use OServerAdmin instead."); return; } final long startTime = Orient.instance().getProfiler().startChrono(); final Collection<? extends OIndex<?>> indexes = getMetadata().getIndexManager().getIndexes(); final List<OIndexAbstract<?>> indexesToLock = prepareIndexesToFreeze(indexes); freezeIndexes(indexesToLock, true); flushIndexes(indexesToLock); super.freeze(throwException); Orient.instance().getProfiler() .stopChrono("db." + getName() + ".freeze", "Time to freeze the database", startTime, "db.*.freeze"); } @Override public void freeze() { if (!(getStorage() instanceof OFreezableStorage)) { OLogManager.instance().error(this, "We can not freeze non local storage. " + "If you use remote client please use OServerAdmin instead."); return; } final long startTime = Orient.instance().getProfiler().startChrono(); final Collection<? extends OIndex<?>> indexes = getMetadata().getIndexManager().getIndexes(); final List<OIndexAbstract<?>> indexesToLock = prepareIndexesToFreeze(indexes); freezeIndexes(indexesToLock, false); flushIndexes(indexesToLock); super.freeze(); Orient.instance().getProfiler() .stopChrono("db." + getName() + ".freeze", "Time to freeze the database", startTime, "db.*.freeze"); } @Override public void release() { if (!(getStorage() instanceof OFreezableStorage)) { OLogManager.instance().error(this, "We can not release non local storage. " + "If you use remote client please use OServerAdmin instead."); return; } final long startTime = Orient.instance().getProfiler().startChrono(); super.release(); Collection<? extends OIndex<?>> indexes = getMetadata().getIndexManager().getIndexes(); releaseIndexes(indexes); Orient.instance().getProfiler() .stopChrono("db." + getName() + ".release", "Time to release the database", startTime, "db.*.release"); } /** * Creates a new ODocument. */ @Override public ODocument newInstance() { return new ODocument(); } public ODocument newInstance(final String iClassName) { checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, iClassName); return new ODocument(iClassName); } public ORecordIteratorClass<ODocument> browseClass(final String iClassName) { return browseClass(iClassName, true); } public ORecordIteratorClass<ODocument> browseClass(final String iClassName, final boolean iPolymorphic) { if (getMetadata().getSchema().getClass(iClassName) == null) throw new IllegalArgumentException("Class '" + iClassName + "' not found in current database"); checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_READ, iClassName); return new ORecordIteratorClass<ODocument>(this, underlying, iClassName, iPolymorphic, true, false); } @Override public ORecordIteratorCluster<ODocument> browseCluster(final String iClusterName) { checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, iClusterName); return new ORecordIteratorCluster<ODocument>(this, underlying, getClusterIdByName(iClusterName), true); } @Override public ORecordIteratorCluster<ODocument> browseCluster(String iClusterName, OClusterPosition startClusterPosition, OClusterPosition endClusterPosition, boolean loadTombstones) { checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, iClusterName); return new ORecordIteratorCluster<ODocument>(this, underlying, getClusterIdByName(iClusterName), startClusterPosition, endClusterPosition, true, loadTombstones); } /** * Saves a document to the database. Behavior depends by the current running transaction if any. If no transaction is running then * changes apply immediately. If an Optimistic transaction is running then the record will be changed at commit time. The current * transaction will continue to see the record as modified, while others not. If a Pessimistic transaction is running, then an * exclusive lock is acquired against the record. Current transaction will continue to see the record as modified, while others * cannot access to it since it's locked. * <p/> * If MVCC is enabled and the version of the document is different by the version stored in the database, then a * {@link OConcurrentModificationException} exception is thrown.Before to save the document it must be valid following the * constraints declared in the schema if any (can work also in schema-less mode). To validate the document the * {@link ODocument#validate()} is called. * * @param iRecord * Record to save. * @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain. * @throws OConcurrentModificationException * if the version of the document is different by the version contained in the database. * @throws OValidationException * if the document breaks some validation constraints defined in the schema * @see #setMVCC(boolean), {@link #isMVCC()} */ @Override public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord) { return (RET) save(iRecord, OPERATION_MODE.SYNCHRONOUS, false, null, null); } /** * Saves a document to the database. Behavior depends by the current running transaction if any. If no transaction is running then * changes apply immediately. If an Optimistic transaction is running then the record will be changed at commit time. The current * transaction will continue to see the record as modified, while others not. If a Pessimistic transaction is running, then an * exclusive lock is acquired against the record. Current transaction will continue to see the record as modified, while others * cannot access to it since it's locked. * <p/> * If MVCC is enabled and the version of the document is different by the version stored in the database, then a * {@link OConcurrentModificationException} exception is thrown.Before to save the document it must be valid following the * constraints declared in the schema if any (can work also in schema-less mode). To validate the document the * {@link ODocument#validate()} is called. * * * * @param iRecord * Record to save. * @param iForceCreate * Flag that indicates that record should be created. If record with current rid already exists, exception is thrown * @param iRecordCreatedCallback * @param iRecordUpdatedCallback * @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain. * @throws OConcurrentModificationException * if the version of the document is different by the version contained in the database. * @throws OValidationException * if the document breaks some validation constraints defined in the schema * @see #setMVCC(boolean), {@link #isMVCC()} */ @Override public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord, final OPERATION_MODE iMode, boolean iForceCreate, final ORecordCallback<? extends Number> iRecordCreatedCallback, ORecordCallback<ORecordVersion> iRecordUpdatedCallback) { if (!(iRecord instanceof ODocument)) return (RET) super.save(iRecord, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback); ODocument doc = (ODocument) iRecord; doc.validate(); doc.convertAllMultiValuesToTrackedVersions(); try { if (iForceCreate || doc.getIdentity().isNew()) { // NEW RECORD if (doc.getClassName() != null) checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, doc.getClassName()); if (doc.getSchemaClass() != null && doc.getIdentity().getClusterId() < 0) { // CLASS FOUND: FORCE THE STORING IN THE CLUSTER CONFIGURED String clusterName = getClusterNameById(doc.getSchemaClass().getDefaultClusterId()); return (RET) super.save(doc, clusterName, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback); } } else { // UPDATE: CHECK ACCESS ON SCHEMA CLASS NAME (IF ANY) if (doc.getClassName() != null) checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_UPDATE, doc.getClassName()); } doc = super.save(doc, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback); } catch (OException e) { // PASS THROUGH throw e; } catch (Exception e) { OLogManager.instance().exception("Error on saving record %s of class '%s'", e, ODatabaseException.class, iRecord.getIdentity(), (doc.getClassName() != null ? doc.getClassName() : "?")); } return (RET) doc; } /** * Saves a document specifying a cluster where to store the record. Behavior depends by the current running transaction if any. If * no transaction is running then changes apply immediately. If an Optimistic transaction is running then the record will be * changed at commit time. The current transaction will continue to see the record as modified, while others not. If a Pessimistic * transaction is running, then an exclusive lock is acquired against the record. Current transaction will continue to see the * record as modified, while others cannot access to it since it's locked. * <p/> * If MVCC is enabled and the version of the document is different by the version stored in the database, then a * {@link OConcurrentModificationException} exception is thrown. Before to save the document it must be valid following the * constraints declared in the schema if any (can work also in schema-less mode). To validate the document the * {@link ODocument#validate()} is called. * * @param iRecord * Record to save * @param iClusterName * Cluster name where to save the record * @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain. * @throws OConcurrentModificationException * if the version of the document is different by the version contained in the database. * @throws OValidationException * if the document breaks some validation constraints defined in the schema * @see #setMVCC(boolean), {@link #isMVCC()}, ORecordSchemaAware#validate() */ @Override public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord, final String iClusterName) { return (RET) save(iRecord, iClusterName, OPERATION_MODE.SYNCHRONOUS, false, null, null); } /** * Saves a document specifying a cluster where to store the record. Behavior depends by the current running transaction if any. If * no transaction is running then changes apply immediately. If an Optimistic transaction is running then the record will be * changed at commit time. The current transaction will continue to see the record as modified, while others not. If a Pessimistic * transaction is running, then an exclusive lock is acquired against the record. Current transaction will continue to see the * record as modified, while others cannot access to it since it's locked. * <p/> * If MVCC is enabled and the version of the document is different by the version stored in the database, then a * {@link OConcurrentModificationException} exception is thrown. Before to save the document it must be valid following the * constraints declared in the schema if any (can work also in schema-less mode). To validate the document the * {@link ODocument#validate()} is called. * * * @param iRecord * Record to save * @param iClusterName * Cluster name where to save the record * @param iMode * Mode of save: synchronous (default) or asynchronous * @param iForceCreate * Flag that indicates that record should be created. If record with current rid already exists, exception is thrown * @param iRecordCreatedCallback * @param iRecordUpdatedCallback * @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain. * @throws OConcurrentModificationException * if the version of the document is different by the version contained in the database. * @throws OValidationException * if the document breaks some validation constraints defined in the schema * @see #setMVCC(boolean), {@link #isMVCC()}, ORecordSchemaAware#validate() */ @Override public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord, String iClusterName, final OPERATION_MODE iMode, boolean iForceCreate, final ORecordCallback<? extends Number> iRecordCreatedCallback, ORecordCallback<ORecordVersion> iRecordUpdatedCallback) { if (!(iRecord instanceof ODocument)) return (RET) super.save(iRecord, iClusterName, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback); ODocument doc = (ODocument) iRecord; if (iForceCreate || !doc.getIdentity().isValid()) { if (doc.getClassName() != null) checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, doc.getClassName()); if (iClusterName == null && doc.getSchemaClass() != null) // FIND THE RIGHT CLUSTER AS CONFIGURED IN CLASS iClusterName = getClusterNameById(doc.getSchemaClass().getDefaultClusterId()); int id = getClusterIdByName(iClusterName); if (id == -1) throw new IllegalArgumentException("Cluster name " + iClusterName + " is not configured"); final int[] clusterIds; if (doc.getSchemaClass() != null) { // CHECK IF THE CLUSTER IS PART OF THE CONFIGURED CLUSTERS clusterIds = doc.getSchemaClass().getClusterIds(); int i = 0; for (; i < clusterIds.length; ++i) if (clusterIds[i] == id) break; if (i == clusterIds.length) throw new IllegalArgumentException("Cluster name " + iClusterName + " is not configured to store the class " + doc.getClassName()); } else clusterIds = new int[] { id }; } else { // UPDATE: CHECK ACCESS ON SCHEMA CLASS NAME (IF ANY) if (doc.getClassName() != null) checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_UPDATE, doc.getClassName()); } doc.validate(); doc.convertAllMultiValuesToTrackedVersions(); doc = super.save(doc, iClusterName, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback); return (RET) doc; } /** * Deletes a document. Behavior depends by the current running transaction if any. If no transaction is running then the record is * deleted immediately. If an Optimistic transaction is running then the record will be deleted at commit time. The current * transaction will continue to see the record as deleted, while others not. If a Pessimistic transaction is running, then an * exclusive lock is acquired against the record. Current transaction will continue to see the record as deleted, while others * cannot access to it since it's locked. * <p/> * If MVCC is enabled and the version of the document is different by the version stored in the database, then a * {@link OConcurrentModificationException} exception is thrown. * * @param iRecord * @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain. * @see #setMVCC(boolean), {@link #isMVCC()} */ public ODatabaseDocumentTx delete(final ORecordInternal<?> iRecord) { if (iRecord == null) throw new ODatabaseException("Cannot delete null document"); // CHECK ACCESS ON SCHEMA CLASS NAME (IF ANY) if (iRecord instanceof ODocument && ((ODocument) iRecord).getClassName() != null) checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_DELETE, ((ODocument) iRecord).getClassName()); try { underlying.delete(iRecord); } catch (Exception e) { if (iRecord instanceof ODocument) OLogManager.instance().exception("Error on deleting record %s of class '%s'", e, ODatabaseException.class, iRecord.getIdentity(), ((ODocument) iRecord).getClassName()); else OLogManager.instance().exception("Error on deleting record %s", e, ODatabaseException.class, iRecord.getIdentity()); } return this; } /** * Returns the number of the records of the class iClassName. */ public long countClass(final String iClassName) { final OClass cls = getMetadata().getSchema().getClass(iClassName); if (cls == null) throw new IllegalArgumentException("Class '" + iClassName + "' not found in database"); return cls.count(); } public ODatabaseComplex<ORecordInternal<?>> commit() { try { return underlying.commit(); } finally { getTransaction().close(); } } public ODatabaseComplex<ORecordInternal<?>> rollback() { try { return underlying.rollback(); } finally { getTransaction().close(); } } public String getType() { return TYPE; } @Override public OSBTreeCollectionManager getSbTreeCollectionManager() { return underlying.getSbTreeCollectionManager(); } }
1no label
core_src_main_java_com_orientechnologies_orient_core_db_document_ODatabaseDocumentTx.java
1,466
public class OSQLFunctionBothE extends OSQLFunctionMove { public static final String NAME = "bothE"; public OSQLFunctionBothE() { super(NAME, 0, 1); } @Override protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) { return v2e(graph, iRecord, Direction.BOTH, iLabels); } }
1no label
graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionBothE.java
473
public class EnhancedTokenBasedRememberMeServices extends TokenBasedRememberMeServices { @Resource(name="blCookieUtils") protected CookieUtils cookieUtils; @Deprecated public EnhancedTokenBasedRememberMeServices() {} public EnhancedTokenBasedRememberMeServices(String key, UserDetailsService userDetailsService) { super(key, userDetailsService); } @Override protected void setCookie(String[] tokens, int maxAge, HttpServletRequest request, HttpServletResponse response) { MockResponse mockResponse = new MockResponse(); super.setCookie(tokens, maxAge, request, mockResponse); Cookie myCookie = mockResponse.getTempCookie(); cookieUtils.setCookieValue(response, myCookie.getName(), myCookie.getValue(), myCookie.getPath(), myCookie.getMaxAge(), myCookie.getSecure()); } private class MockResponse implements HttpServletResponse { private Cookie tempCookie; public void addCookie(Cookie arg0) { this.tempCookie = arg0; } public Cookie getTempCookie() { return tempCookie; } public void addDateHeader(String arg0, long arg1) { //do nothing } public void addHeader(String arg0, String arg1) { //do nothing } public void addIntHeader(String arg0, int arg1) { //do nothing } public boolean containsHeader(String arg0) { return false; } public String encodeRedirectUrl(String arg0) { return null; } public String encodeRedirectURL(String arg0) { return null; } public String encodeUrl(String arg0) { return null; } public String encodeURL(String arg0) { return null; } public void sendError(int arg0, String arg1) throws IOException { //do nothing } public void sendError(int arg0) throws IOException { //do nothing } public void sendRedirect(String arg0) throws IOException { //do nothing } public void setDateHeader(String arg0, long arg1) { //do nothing } public void setHeader(String arg0, String arg1) { //do nothing } public void setIntHeader(String arg0, int arg1) { //do nothing } public void setStatus(int arg0, String arg1) { //do nothing } public void setStatus(int arg0) { //do nothing } public void flushBuffer() throws IOException { //do nothing } public int getBufferSize() { return 0; } public String getCharacterEncoding() { return null; } public String getContentType() { return null; } public Locale getLocale() { return null; } public ServletOutputStream getOutputStream() throws IOException { return null; } public PrintWriter getWriter() throws IOException { return null; } public boolean isCommitted() { return false; } public void reset() { //do nothing } public void resetBuffer() { //do nothing } public void setBufferSize(int arg0) { //do nothing } public void setCharacterEncoding(String arg0) { //do nothing } public void setContentLength(int arg0) { //do nothing } public void setContentType(String arg0) { //do nothing } public void setLocale(Locale arg0) { //do nothing } } }
0true
common_src_main_java_org_broadleafcommerce_common_security_EnhancedTokenBasedRememberMeServices.java
3
public abstract class DaemonRunner<S> { private static final Logger log = LoggerFactory.getLogger(DaemonRunner.class); private Thread killerHook; protected abstract String getDaemonShortName(); protected abstract void killImpl(final S stat) throws IOException; protected abstract S startImpl() throws IOException; protected abstract S readStatusFromDisk(); /** * Read daemon status from disk, then try to start the dameon * if the status file says it is stopped. Do nothing if the * status read from disk says the daemon is running. * * After succesfully starting the daemon (no exceptions from * {@link #startImpl()}, register a shutdown hook with the VM * that will call {@link #killImpl(S)} on shutdown. * * @return status representing the daemon, either just-started * or already running */ public synchronized S start() { S stat = readStatusFromDisk(); if (stat != null) { log.info("{} already started", getDaemonShortName()); return stat; } try { stat = startImpl(); } catch (IOException e) { throw new RuntimeException(e); } registerKillerHook(stat); return stat; } /** * Read daemon status from disk, then try to kill the daemon * if the status file says it is running. Do nothing if the * status read from disk says the daemon is stopped. */ public synchronized void stop() { S stat = readStatusFromDisk(); if (null == stat) { log.info("{} is not running", getDaemonShortName()); return; } killAndUnregisterHook(stat); } private synchronized void registerKillerHook(final S stat) { if (null != killerHook) { log.debug("Daemon killer hook already registered: {}", killerHook); return; } killerHook = new Thread() { public void run() { killAndUnregisterHook(stat); } }; Runtime.getRuntime().addShutdownHook(killerHook); log.debug("Registered daemon killer hook: {}", killerHook); } private synchronized void killAndUnregisterHook(final S stat) { try { killImpl(stat); } catch (IOException e) { throw new RuntimeException(e); } if (null != killerHook) { try { Runtime.getRuntime().removeShutdownHook(killerHook); log.debug("Unregistered killer hook: {}", killerHook); } catch (IllegalStateException e) { /* Can receive "java.lang.IllegalStateException: Shutdown in progress" * when called from JVM shutdown (as opposed to called from the stop method). */ log.debug("Could not unregister killer hook: {}", e); } killerHook = null; } } /** * Run the parameter as an external process. Returns if the command starts * without throwing an exception and returns exit status 0. Throws an * exception if there's any problem invoking the command or if it does not * return zero exit status. * * Blocks indefinitely while waiting for the command to complete. * * @param argv * passed directly to {@link ProcessBuilder}'s constructor */ protected static void runCommand(String... argv) { final String cmd = Joiner.on(" ").join(argv); log.info("Executing {}", cmd); ProcessBuilder pb = new ProcessBuilder(argv); pb.redirectErrorStream(true); Process startup; try { startup = pb.start(); } catch (IOException e) { throw new RuntimeException(e); } StreamLogger sl = new StreamLogger(startup.getInputStream()); sl.setDaemon(true); sl.start(); try { int exitcode = startup.waitFor(); // wait for script to return if (0 == exitcode) { log.info("Command \"{}\" exited with status 0", cmd); } else { throw new RuntimeException("Command \"" + cmd + "\" exited with status " + exitcode); } } catch (InterruptedException e) { throw new RuntimeException(e); } try { sl.join(1000L); } catch (InterruptedException e) { log.warn("Failed to cleanup stdin handler thread after running command \"{}\"", cmd, e); } } /* * This could be retired in favor of ProcessBuilder.Redirect when we move to * source level 1.7. */ private static class StreamLogger extends Thread { private final BufferedReader reader; private static final Logger log = LoggerFactory.getLogger(StreamLogger.class); private StreamLogger(InputStream is) { this.reader = new BufferedReader(new InputStreamReader(is)); } @Override public void run() { String line; try { while (null != (line = reader.readLine())) { log.info("> {}", line); if (Thread.currentThread().isInterrupted()) { break; } } log.info("End of stream."); } catch (IOException e) { log.error("Unexpected IOException while reading stream {}", reader, e); } } } }
0true
titan-test_src_main_java_com_thinkaurelius_titan_DaemonRunner.java
239
highlighter = new XPostingsHighlighter() { @Override protected PassageFormatter getFormatter(String field) { return passageFormatter; } };
0true
src_test_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighterTests.java
343
public class NodesShutdownRequest extends MasterNodeOperationRequest<NodesShutdownRequest> { String[] nodesIds = Strings.EMPTY_ARRAY; TimeValue delay = TimeValue.timeValueSeconds(1); boolean exit = true; NodesShutdownRequest() { } public NodesShutdownRequest(String... nodesIds) { this.nodesIds = nodesIds; } public NodesShutdownRequest nodesIds(String... nodesIds) { this.nodesIds = nodesIds; return this; } /** * The delay for the shutdown to occur. Defaults to <tt>1s</tt>. */ public NodesShutdownRequest delay(TimeValue delay) { this.delay = delay; return this; } public TimeValue delay() { return this.delay; } /** * The delay for the shutdown to occur. Defaults to <tt>1s</tt>. */ public NodesShutdownRequest delay(String delay) { return delay(TimeValue.parseTimeValue(delay, null)); } /** * Should the JVM be exited as well or not. Defaults to <tt>true</tt>. */ public NodesShutdownRequest exit(boolean exit) { this.exit = exit; return this; } /** * Should the JVM be exited as well or not. Defaults to <tt>true</tt>. */ public boolean exit() { return exit; } @Override public ActionRequestValidationException validate() { return null; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); delay = readTimeValue(in); nodesIds = in.readStringArray(); exit = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); delay.writeTo(out); out.writeStringArrayNullable(nodesIds); out.writeBoolean(exit); } }
0true
src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_NodesShutdownRequest.java
904
class ShardSuggestResponse extends BroadcastShardOperationResponse { private final Suggest suggest; ShardSuggestResponse() { this.suggest = new Suggest(); } public ShardSuggestResponse(String index, int shardId, Suggest suggest) { super(index, shardId); this.suggest = suggest; } public Suggest getSuggest() { return this.suggest; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); suggest.readFrom(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); suggest.writeTo(out); } }
0true
src_main_java_org_elasticsearch_action_suggest_ShardSuggestResponse.java
180
@Component("blURLHandlerFilter") public class URLHandlerFilter extends OncePerRequestFilter { @Resource(name = "blURLHandlerService") private URLHandlerService urlHandlerService; @Override protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) throws ServletException, IOException { String contextPath = request.getContextPath(); String requestURIWithoutContext; if (request.getContextPath() != null) { requestURIWithoutContext = request.getRequestURI().substring(request.getContextPath().length()); } else { requestURIWithoutContext = request.getRequestURI(); } URLHandler handler = urlHandlerService.findURLHandlerByURI(requestURIWithoutContext); if (handler != null) { if (URLRedirectType.FORWARD == handler.getUrlRedirectType()) { request.getRequestDispatcher(handler.getNewURL()).forward(request, response); } else if (URLRedirectType.REDIRECT_PERM == handler.getUrlRedirectType()) { String url = UrlUtil.fixRedirectUrl(contextPath, handler.getNewURL()); response.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY); response.setHeader( "Location", url); response.setHeader( "Connection", "close" ); } else if (URLRedirectType.REDIRECT_TEMP == handler.getUrlRedirectType()) { String url = UrlUtil.fixRedirectUrl(contextPath, handler.getNewURL()); response.sendRedirect(url); } } else { filterChain.doFilter(request, response); } } /** * If the url does not include "//" then the system will ensure that the application context * is added to the start of the URL. * * @param url * @return */ protected String fixRedirectUrl(String contextPath, String url) { if (url.indexOf("//") < 0) { if (contextPath != null && (! "".equals(contextPath))) { if (! url.startsWith("/")) { url = "/" + url; } if (! url.startsWith(contextPath)) { url = contextPath + url; } } } return url; } }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_URLHandlerFilter.java
53
public class SchemaViolationException extends TitanException { public SchemaViolationException(String msg) { super(msg); } public SchemaViolationException(String msg, Object... args) { super(String.format(msg,args)); } }
0true
titan-core_src_main_java_com_thinkaurelius_titan_core_SchemaViolationException.java
78
{ @Override public void beforeCompletion() { throw secondException; } @Override public void afterCompletion( int status ) { } };
0true
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestTransactionImpl.java
5,760
public class FetchPhase implements SearchPhase { private final FetchSubPhase[] fetchSubPhases; @Inject public FetchPhase(HighlightPhase highlightPhase, ScriptFieldsFetchSubPhase scriptFieldsPhase, PartialFieldsFetchSubPhase partialFieldsPhase, MatchedQueriesFetchSubPhase matchedQueriesPhase, ExplainFetchSubPhase explainPhase, VersionFetchSubPhase versionPhase, FetchSourceSubPhase fetchSourceSubPhase, FieldDataFieldsFetchSubPhase fieldDataFieldsFetchSubPhase) { this.fetchSubPhases = new FetchSubPhase[]{scriptFieldsPhase, partialFieldsPhase, matchedQueriesPhase, explainPhase, highlightPhase, fetchSourceSubPhase, versionPhase, fieldDataFieldsFetchSubPhase}; } @Override public Map<String, ? extends SearchParseElement> parseElements() { ImmutableMap.Builder<String, SearchParseElement> parseElements = ImmutableMap.builder(); parseElements.put("fields", new FieldsParseElement()); for (FetchSubPhase fetchSubPhase : fetchSubPhases) { parseElements.putAll(fetchSubPhase.parseElements()); } return parseElements.build(); } @Override public void preProcess(SearchContext context) { } public void execute(SearchContext context) { FieldsVisitor fieldsVisitor; List<String> extractFieldNames = null; if (!context.hasFieldNames()) { if (context.hasPartialFields()) { // partial fields need the source, so fetch it fieldsVisitor = new UidAndSourceFieldsVisitor(); } else { // no fields specified, default to return source if no explicit indication if (!context.hasScriptFields() && !context.hasFetchSourceContext()) { context.fetchSourceContext(new FetchSourceContext(true)); } fieldsVisitor = context.sourceRequested() ? new UidAndSourceFieldsVisitor() : new JustUidFieldsVisitor(); } } else if (context.fieldNames().isEmpty()) { if (context.sourceRequested()) { fieldsVisitor = new UidAndSourceFieldsVisitor(); } else { fieldsVisitor = new JustUidFieldsVisitor(); } } else { boolean loadAllStored = false; Set<String> fieldNames = null; for (String fieldName : context.fieldNames()) { if (fieldName.equals("*")) { loadAllStored = true; continue; } if (fieldName.equals(SourceFieldMapper.NAME)) { if (context.hasFetchSourceContext()) { context.fetchSourceContext().fetchSource(true); } else { context.fetchSourceContext(new FetchSourceContext(true)); } continue; } FieldMappers x = context.smartNameFieldMappers(fieldName); if (x == null) { // Only fail if we know it is a object field, missing paths / fields shouldn't fail. if (context.smartNameObjectMapper(fieldName) != null) { throw new ElasticsearchIllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); } } else if (x.mapper().fieldType().stored()) { if (fieldNames == null) { fieldNames = new HashSet<String>(); } fieldNames.add(x.mapper().names().indexName()); } else { if (extractFieldNames == null) { extractFieldNames = newArrayList(); } extractFieldNames.add(fieldName); } } if (loadAllStored) { fieldsVisitor = new AllFieldsVisitor(); // load everything, including _source } else if (fieldNames != null) { boolean loadSource = extractFieldNames != null || context.sourceRequested(); fieldsVisitor = new CustomFieldsVisitor(fieldNames, loadSource); } else if (extractFieldNames != null || context.sourceRequested()) { fieldsVisitor = new UidAndSourceFieldsVisitor(); } else { fieldsVisitor = new JustUidFieldsVisitor(); } } InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()]; FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); for (int index = 0; index < context.docIdsToLoadSize(); index++) { int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index]; loadStoredFields(context, fieldsVisitor, docId); fieldsVisitor.postProcess(context.mapperService()); Map<String, SearchHitField> searchFields = null; if (!fieldsVisitor.fields().isEmpty()) { searchFields = new HashMap<String, SearchHitField>(fieldsVisitor.fields().size()); for (Map.Entry<String, List<Object>> entry : fieldsVisitor.fields().entrySet()) { searchFields.put(entry.getKey(), new InternalSearchHitField(entry.getKey(), entry.getValue())); } } DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type()); Text typeText; if (documentMapper == null) { typeText = new StringAndBytesText(fieldsVisitor.uid().type()); } else { typeText = documentMapper.typeText(); } InternalSearchHit searchHit = new InternalSearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields); hits[index] = searchHit; int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves()); AtomicReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); int subDoc = docId - subReaderContext.docBase; // go over and extract fields that are not mapped / stored context.lookup().setNextReader(subReaderContext); context.lookup().setNextDocId(subDoc); if (fieldsVisitor.source() != null) { context.lookup().source().setNextSource(fieldsVisitor.source()); } if (extractFieldNames != null) { for (String extractFieldName : extractFieldNames) { List<Object> values = context.lookup().source().extractRawValues(extractFieldName); if (!values.isEmpty()) { if (searchHit.fieldsOrNull() == null) { searchHit.fields(new HashMap<String, SearchHitField>(2)); } SearchHitField hitField = searchHit.fields().get(extractFieldName); if (hitField == null) { hitField = new InternalSearchHitField(extractFieldName, new ArrayList<Object>(2)); searchHit.fields().put(extractFieldName, hitField); } for (Object value : values) { hitField.values().add(value); } } } } hitContext.reset(searchHit, subReaderContext, subDoc, context.searcher().getIndexReader(), docId, fieldsVisitor); for (FetchSubPhase fetchSubPhase : fetchSubPhases) { if (fetchSubPhase.hitExecutionNeeded(context)) { fetchSubPhase.hitExecute(context, hitContext); } } } for (FetchSubPhase fetchSubPhase : fetchSubPhases) { if (fetchSubPhase.hitsExecutionNeeded(context)) { fetchSubPhase.hitsExecute(context, hits); } } context.fetchResult().hits(new InternalSearchHits(hits, context.queryResult().topDocs().totalHits, context.queryResult().topDocs().getMaxScore())); } private void loadStoredFields(SearchContext context, FieldsVisitor fieldVisitor, int docId) { fieldVisitor.reset(); try { context.searcher().doc(docId, fieldVisitor); } catch (IOException e) { throw new FetchPhaseExecutionException(context, "Failed to fetch doc id [" + docId + "]", e); } } }
1no label
src_main_java_org_elasticsearch_search_fetch_FetchPhase.java
660
@Repository("blProductDao") public class ProductDaoImpl implements ProductDao { @PersistenceContext(unitName="blPU") protected EntityManager em; @Resource(name="blEntityConfiguration") protected EntityConfiguration entityConfiguration; protected Long currentDateResolution = 10000L; protected Date cachedDate = SystemTime.asDate(); @Override public Product save(Product product) { return em.merge(product); } @Override public Product readProductById(Long productId) { return em.find(ProductImpl.class, productId); } @Override public List<Product> readProductsByIds(List<Long> productIds) { if (productIds == null || productIds.size() == 0) { return null; } // Set up the criteria query that specifies we want to return Products CriteriaBuilder builder = em.getCriteriaBuilder(); CriteriaQuery<Product> criteria = builder.createQuery(Product.class); Root<ProductImpl> product = criteria.from(ProductImpl.class); criteria.select(product); // We only want results that match the product IDs criteria.where(product.get("id").as(Long.class).in(productIds)); TypedQuery<Product> query = em.createQuery(criteria); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public List<Product> readProductsByName(String searchName) { TypedQuery<Product> query = em.createNamedQuery("BC_READ_PRODUCTS_BY_NAME", Product.class); query.setParameter("name", searchName + '%'); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public List<Product> readProductsByName(@Nonnull String searchName, @Nonnull int limit, @Nonnull int offset) { TypedQuery<Product> query = em.createNamedQuery("BC_READ_PRODUCTS_BY_NAME", Product.class); query.setParameter("name", searchName + '%'); query.setFirstResult(offset); query.setMaxResults(limit); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } protected Date getCurrentDateAfterFactoringInDateResolution() { Date returnDate = SystemTime.getCurrentDateWithinTimeResolution(cachedDate, currentDateResolution); if (returnDate != cachedDate) { if (SystemTime.shouldCacheDate()) { cachedDate = returnDate; } } return returnDate; } @Override public List<Product> readActiveProductsByCategory(Long categoryId) { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); return readActiveProductsByCategoryInternal(categoryId, currentDate); } @Override @Deprecated public List<Product> readActiveProductsByCategory(Long categoryId, Date currentDate) { return readActiveProductsByCategoryInternal(categoryId, currentDate); } protected List<Product> readActiveProductsByCategoryInternal(Long categoryId, Date currentDate) { TypedQuery<Product> query = em.createNamedQuery("BC_READ_ACTIVE_PRODUCTS_BY_CATEGORY", Product.class); query.setParameter("categoryId", categoryId); query.setParameter("currentDate", currentDate); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public List<Product> readFilteredActiveProductsByQuery(String query, ProductSearchCriteria searchCriteria) { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); return readFilteredActiveProductsByQueryInternal(query, currentDate, searchCriteria); } @Override @Deprecated public List<Product> readFilteredActiveProductsByQuery(String query, Date currentDate, ProductSearchCriteria searchCriteria) { return readFilteredActiveProductsByQueryInternal(query, currentDate, searchCriteria); } protected List<Product> readFilteredActiveProductsByQueryInternal(String query, Date currentDate, ProductSearchCriteria searchCriteria) { // Set up the criteria query that specifies we want to return Products CriteriaBuilder builder = em.getCriteriaBuilder(); CriteriaQuery<Product> criteria = builder.createQuery(Product.class); // The root of our search is Product since we are searching Root<ProductImpl> product = criteria.from(ProductImpl.class); // We also want to filter on attributes from sku and productAttributes Join<Product, Sku> sku = product.join("defaultSku"); // Product objects are what we want back criteria.select(product); // We only want results that match the search query List<Predicate> restrictions = new ArrayList<Predicate>(); String lq = query.toLowerCase(); restrictions.add( builder.or( builder.like(builder.lower(sku.get("name").as(String.class)), '%' + lq + '%'), builder.like(builder.lower(sku.get("longDescription").as(String.class)), '%' + lq + '%') ) ); attachProductSearchCriteria(searchCriteria, product, sku, restrictions); attachActiveRestriction(currentDate, product, sku, restrictions); attachOrderBy(searchCriteria, product, sku, criteria); // Execute the query with the restrictions criteria.where(restrictions.toArray(new Predicate[restrictions.size()])); TypedQuery<Product> typedQuery = em.createQuery(criteria); //don't cache - not really practical for open ended search return typedQuery.getResultList(); } @Override public List<Product> readFilteredActiveProductsByCategory(Long categoryId, ProductSearchCriteria searchCriteria) { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); return readFilteredActiveProductsByCategoryInternal(categoryId, currentDate, searchCriteria); } @Override @Deprecated public List<Product> readFilteredActiveProductsByCategory(Long categoryId, Date currentDate, ProductSearchCriteria searchCriteria) { return readFilteredActiveProductsByCategoryInternal(categoryId, currentDate, searchCriteria); } protected List<Product> readFilteredActiveProductsByCategoryInternal(Long categoryId, Date currentDate, ProductSearchCriteria searchCriteria) { // Set up the criteria query that specifies we want to return Products CriteriaBuilder builder = em.getCriteriaBuilder(); CriteriaQuery<Product> criteria = builder.createQuery(Product.class); Root<ProductImpl> product = criteria.from(ProductImpl.class); // We want to filter on attributes from product and sku Join<Product, Sku> sku = product.join("defaultSku"); ListJoin<Product, CategoryProductXref> categoryXref = product.joinList("allParentCategoryXrefs"); // Product objects are what we want back criteria.select(product); // We only want results from the selected category List<Predicate> restrictions = new ArrayList<Predicate>(); restrictions.add(builder.equal(categoryXref.get("categoryProductXref").get("category").get("id"), categoryId)); attachProductSearchCriteria(searchCriteria, product, sku, restrictions); attachActiveRestriction(currentDate, product, sku, restrictions); attachOrderBy(searchCriteria, product, sku, criteria); // Execute the query with the restrictions criteria.where(restrictions.toArray(new Predicate[restrictions.size()])); TypedQuery<Product> typedQuery = em.createQuery(criteria); //don't cache - not really practical for open ended search return typedQuery.getResultList(); } protected void attachActiveRestriction(Date currentDate, Path<? extends Product> product, Path<? extends Sku> sku, List<Predicate> restrictions) { CriteriaBuilder builder = em.getCriteriaBuilder(); // Add the product archived status flag restriction restrictions.add(builder.or( builder.isNull(product.get("archiveStatus").get("archived")), builder.equal(product.get("archiveStatus").get("archived"), 'N'))); // Add the active start/end date restrictions restrictions.add(builder.lessThan(sku.get("activeStartDate").as(Date.class), currentDate)); restrictions.add(builder.or( builder.isNull(sku.get("activeEndDate")), builder.greaterThan(sku.get("activeEndDate").as(Date.class), currentDate))); } protected void attachOrderBy(ProductSearchCriteria searchCriteria, From<?, ? extends Product> product, Path<? extends Sku> sku, CriteriaQuery<?> criteria) { if (StringUtils.isNotBlank(searchCriteria.getSortQuery())) { CriteriaBuilder builder = em.getCriteriaBuilder(); List<Order> sorts = new ArrayList<Order>(); String sortQueries = searchCriteria.getSortQuery(); for (String sortQuery : sortQueries.split(",")) { String[] sort = sortQuery.split(" "); if (sort.length == 2) { String key = sort[0]; boolean asc = sort[1].toLowerCase().contains("asc"); // Determine whether we should use the product path or the sku path Path<?> pathToUse; if (key.contains("defaultSku.")) { pathToUse = sku; key = key.substring("defaultSku.".length()); } else if (key.contains("product.")) { pathToUse = product; key = key.substring("product.".length()); } else { // We don't know which path this facet is built on - resolves previous bug that attempted // to attach search facet to any query parameter continue; } if (asc) { sorts.add(builder.asc(pathToUse.get(key))); } else { sorts.add(builder.desc(pathToUse.get(key))); } } } criteria.orderBy(sorts.toArray(new Order[sorts.size()])); } } protected void attachProductSearchCriteria(ProductSearchCriteria searchCriteria, From<?, ? extends Product> product, From<?, ? extends Sku> sku, List<Predicate> restrictions) { CriteriaBuilder builder = em.getCriteriaBuilder(); // Build out the filter criteria from the users request for (Entry<String, String[]> entry : searchCriteria.getFilterCriteria().entrySet()) { String key = entry.getKey(); List<String> eqValues = new ArrayList<String>(); List<String[]> rangeValues = new ArrayList<String[]>(); // Determine which path is the appropriate one to use Path<?> pathToUse; if (key.contains("defaultSku.")) { pathToUse = sku; key = key.substring("defaultSku.".length()); } else if (key.contains("productAttributes.")) { pathToUse = product.join("productAttributes"); key = key.substring("productAttributes.".length()); restrictions.add(builder.equal(pathToUse.get("name").as(String.class), key)); key = "value"; } else if (key.contains("product.")) { pathToUse = product; key = key.substring("product.".length()); } else { // We don't know which path this facet is built on - resolves previous bug that attempted // to attach search facet to any query parameter continue; } // Values can be equality checks (ie manufacturer=Dave's) or range checks, which take the form // key=range[minRange:maxRange]. Figure out what type of check this is for (String value : entry.getValue()) { if (value.contains("range[")) { String[] rangeValue = new String[] { value.substring(value.indexOf("[") + 1, value.indexOf(":")), value.substring(value.indexOf(":") + 1, value.indexOf("]")) }; rangeValues.add(rangeValue); } else { eqValues.add(value); } } // Add the equality range restriction with the "in" builder. That means that the query string // ?manufacturer=Dave&manufacturer=Bob would match either Dave or Bob if (eqValues.size() > 0) { restrictions.add(pathToUse.get(key).in(eqValues)); } // If we have any range restrictions, we need to build those too. Ranges are also "or"ed together, // such that specifying range[0:5] and range[10:null] for the same field would match items // that were valued between 0 and 5 OR over 10 for that field List<Predicate> rangeRestrictions = new ArrayList<Predicate>(); for (String[] range : rangeValues) { BigDecimal min = new BigDecimal(range[0]); BigDecimal max = null; if (range[1] != null && !range[1].equals("null")) { max = new BigDecimal(range[1]); } Predicate minRange = builder.greaterThan(pathToUse.get(key).as(BigDecimal.class), min); Predicate maxRange = null; if (max != null) { maxRange = builder.lessThan(pathToUse.get(key).as(BigDecimal.class), max); rangeRestrictions.add(builder.and(minRange, maxRange)); } else { rangeRestrictions.add(minRange); } } if (rangeRestrictions.size() > 0) { restrictions.add(builder.or(rangeRestrictions.toArray(new Predicate[rangeRestrictions.size()]))); } } } @Override public List<Product> readActiveProductsByCategory(Long categoryId, int limit, int offset) { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); return readActiveProductsByCategoryInternal(categoryId, currentDate, limit, offset); } @Override @Deprecated public List<Product> readActiveProductsByCategory(Long categoryId, Date currentDate, int limit, int offset) { return readActiveProductsByCategoryInternal(categoryId, currentDate, limit, offset); } public List<Product> readActiveProductsByCategoryInternal(Long categoryId, Date currentDate, int limit, int offset) { TypedQuery<Product> query = em.createNamedQuery("BC_READ_ACTIVE_PRODUCTS_BY_CATEGORY", Product.class); query.setParameter("categoryId", categoryId); query.setParameter("currentDate", currentDate); query.setFirstResult(offset); query.setMaxResults(limit); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public List<Product> readProductsByCategory(Long categoryId) { TypedQuery<Product> query = em.createNamedQuery("BC_READ_PRODUCTS_BY_CATEGORY", Product.class); query.setParameter("categoryId", categoryId); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public List<Product> readProductsByCategory(Long categoryId, int limit, int offset) { TypedQuery<Product> query = em.createNamedQuery("BC_READ_PRODUCTS_BY_CATEGORY", Product.class); query.setParameter("categoryId", categoryId); query.setFirstResult(offset); query.setMaxResults(limit); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public void delete(Product product){ ((Status) product).setArchived('Y'); em.merge(product); } @Override public Product create(ProductType productType) { return (Product) entityConfiguration.createEntityInstance(productType.getType()); } @Override public List<ProductBundle> readAutomaticProductBundles() { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); TypedQuery<ProductBundle> query = em.createNamedQuery("BC_READ_AUTOMATIC_PRODUCT_BUNDLES", ProductBundle.class); query.setParameter("currentDate", currentDate); query.setParameter("autoBundle", Boolean.TRUE); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public Long getCurrentDateResolution() { return currentDateResolution; } @Override public void setCurrentDateResolution(Long currentDateResolution) { this.currentDateResolution = currentDateResolution; } @Override public List<Product> findProductByURI(String uri) { String urlKey = uri.substring(uri.lastIndexOf('/')); Query query; query = em.createNamedQuery("BC_READ_PRODUCTS_BY_OUTGOING_URL"); query.setParameter("url", uri); query.setParameter("urlKey", urlKey); query.setParameter("currentDate", getCurrentDateAfterFactoringInDateResolution()); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); @SuppressWarnings("unchecked") List<Product> results = (List<Product>) query.getResultList(); return results; } @Override public List<Product> readAllActiveProducts(int page, int pageSize) { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); return readAllActiveProductsInternal(page, pageSize, currentDate); } @Override @Deprecated public List<Product> readAllActiveProducts(int page, int pageSize, Date currentDate) { return readAllActiveProductsInternal(page, pageSize, currentDate); } protected List<Product> readAllActiveProductsInternal(int page, int pageSize, Date currentDate) { CriteriaQuery<Product> criteria = getCriteriaForActiveProducts(currentDate); int firstResult = page * pageSize; TypedQuery<Product> query = em.createQuery(criteria); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.setFirstResult(firstResult).setMaxResults(pageSize).getResultList(); } @Override public List<Product> readAllActiveProducts() { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); return readAllActiveProductsInternal(currentDate); } @Override @Deprecated public List<Product> readAllActiveProducts(Date currentDate) { return readAllActiveProductsInternal(currentDate); } protected List<Product> readAllActiveProductsInternal(Date currentDate) { CriteriaQuery<Product> criteria = getCriteriaForActiveProducts(currentDate); TypedQuery<Product> query = em.createQuery(criteria); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getResultList(); } @Override public Long readCountAllActiveProducts() { Date currentDate = getCurrentDateAfterFactoringInDateResolution(); return readCountAllActiveProductsInternal(currentDate); } @Override @Deprecated public Long readCountAllActiveProducts(Date currentDate) { return readCountAllActiveProductsInternal(currentDate); } protected Long readCountAllActiveProductsInternal(Date currentDate) { // Set up the criteria query that specifies we want to return a Long CriteriaBuilder builder = em.getCriteriaBuilder(); CriteriaQuery<Long> criteria = builder.createQuery(Long.class); // The root of our search is Product Root<ProductImpl> product = criteria.from(ProductImpl.class); // We need to filter on active date on the sku Join<Product, Sku> sku = product.join("defaultSku"); // We want the count of products criteria.select(builder.count(product)); // Ensure the product is currently active List<Predicate> restrictions = new ArrayList<Predicate>(); attachActiveRestriction(currentDate, product, sku, restrictions); // Add the restrictions to the criteria query criteria.where(restrictions.toArray(new Predicate[restrictions.size()])); TypedQuery<Long> query = em.createQuery(criteria); query.setHint(QueryHints.HINT_CACHEABLE, true); query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog"); return query.getSingleResult(); } protected CriteriaQuery<Product> getCriteriaForActiveProducts(Date currentDate) { // Set up the criteria query that specifies we want to return Products CriteriaBuilder builder = em.getCriteriaBuilder(); CriteriaQuery<Product> criteria = builder.createQuery(Product.class); // The root of our search is Product Root<ProductImpl> product = criteria.from(ProductImpl.class); // We need to filter on active date on the sku Join<Product, Sku> sku = product.join("defaultSku"); product.fetch("defaultSku"); // Product objects are what we want back criteria.select(product); // Ensure the product is currently active List<Predicate> restrictions = new ArrayList<Predicate>(); attachActiveRestriction(currentDate, product, sku, restrictions); // Add the restrictions to the criteria query criteria.where(restrictions.toArray(new Predicate[restrictions.size()])); return criteria; } }
1no label
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_dao_ProductDaoImpl.java
37
public class HighlyAvailableGraphDatabase extends InternalAbstractGraphDatabase { private RequestContextFactory requestContextFactory; private Slaves slaves; private ClusterMembers members; private DelegateInvocationHandler masterDelegateInvocationHandler; private Master master; private HighAvailabilityMemberStateMachine memberStateMachine; private UpdatePuller updatePuller; private LastUpdateTime lastUpdateTime; private HighAvailabilityMemberContext memberContext; private ClusterClient clusterClient; private ClusterMemberEvents clusterEvents; private ClusterMemberAvailability clusterMemberAvailability; private long stateSwitchTimeoutMillis; private final LifeSupport paxosLife = new LifeSupport(); private DelegateInvocationHandler clusterEventsDelegateInvocationHandler; private DelegateInvocationHandler memberContextDelegateInvocationHandler; private DelegateInvocationHandler clusterMemberAvailabilityDelegateInvocationHandler; private HighAvailabilityModeSwitcher highAvailabilityModeSwitcher; public HighlyAvailableGraphDatabase( String storeDir, Map<String, String> params, Iterable<KernelExtensionFactory<?>> kernelExtensions, Iterable<CacheProvider> cacheProviders, Iterable<TransactionInterceptorProvider> txInterceptorProviders ) { this( storeDir, params, new GraphDatabaseDependencies( null, Arrays.<Class<?>>asList( GraphDatabaseSettings.class, ClusterSettings.class, HaSettings.class ), kernelExtensions, cacheProviders, txInterceptorProviders ) ); } public HighlyAvailableGraphDatabase( String storeDir, Map<String, String> params, Dependencies dependencies ) { super( storeDir, params, dependencies ); run(); } @Override protected void create() { life.add( new BranchedDataMigrator( storeDir ) ); masterDelegateInvocationHandler = new DelegateInvocationHandler( Master.class ); master = (Master) Proxy.newProxyInstance( Master.class.getClassLoader(), new Class[]{Master.class}, masterDelegateInvocationHandler ); super.create(); kernelEventHandlers.registerKernelEventHandler( new HaKernelPanicHandler( xaDataSourceManager, (TxManager) txManager, availabilityGuard, logging, masterDelegateInvocationHandler ) ); life.add( updatePuller = new UpdatePuller( (HaXaDataSourceManager) xaDataSourceManager, master, requestContextFactory, txManager, availabilityGuard, lastUpdateTime, config, msgLog ) ); stateSwitchTimeoutMillis = config.get( HaSettings.state_switch_timeout ); life.add( paxosLife ); life.add( new DatabaseAvailability( txManager, availabilityGuard ) ); life.add( new StartupWaiter() ); diagnosticsManager.appendProvider( new HighAvailabilityDiagnostics( memberStateMachine, clusterClient ) ); } @Override protected AvailabilityGuard createAvailabilityGuard() { // 3 conditions: DatabaseAvailability, HighAvailabilityMemberStateMachine, and HA Kernel Panic return new AvailabilityGuard( Clock.SYSTEM_CLOCK, 3 ); } @Override protected void createDatabaseAvailability() { // Skip this, it's done manually in create() to ensure it is as late as possible } public void start() { life.start(); } public void stop() { life.stop(); } @Override protected org.neo4j.graphdb.Transaction beginTx( ForceMode forceMode ) { if (!availabilityGuard.isAvailable( stateSwitchTimeoutMillis )) { throw new TransactionFailureException( "Timeout waiting for database to allow new transactions. " + availabilityGuard.describeWhoIsBlocking() ); } return super.beginTx( forceMode ); } @Override public IndexManager index() { if (!availabilityGuard.isAvailable( stateSwitchTimeoutMillis )) { throw new TransactionFailureException( "Timeout waiting for database to allow new transactions. " + availabilityGuard.describeWhoIsBlocking() ); } return super.index(); } @Override protected Logging createLogging() { Logging loggingService = life.add( new LogbackWeakDependency().tryLoadLogbackService( config, NEW_LOGGER_CONTEXT, DEFAULT_TO_CLASSIC ) ); // Set Netty logger InternalLoggerFactory.setDefaultFactory( new NettyLoggerFactory( loggingService ) ); return loggingService; } @Override protected TransactionStateFactory createTransactionStateFactory() { return new TransactionStateFactory( logging ) { @Override public TransactionState create( Transaction tx ) { return new WritableTransactionState( snapshot( lockManager ), nodeManager, logging, tx, snapshot( txHook ), snapshot( txIdGenerator ) ); } }; } @Override protected XaDataSourceManager createXaDataSourceManager() { XaDataSourceManager toReturn = new HaXaDataSourceManager( logging.getMessagesLog( HaXaDataSourceManager.class ) ); requestContextFactory = new RequestContextFactory( config.get( ClusterSettings.server_id ), toReturn, dependencyResolver ); return toReturn; } @Override protected RemoteTxHook createTxHook() { clusterEventsDelegateInvocationHandler = new DelegateInvocationHandler( ClusterMemberEvents.class ); memberContextDelegateInvocationHandler = new DelegateInvocationHandler( HighAvailabilityMemberContext.class ); clusterMemberAvailabilityDelegateInvocationHandler = new DelegateInvocationHandler( ClusterMemberAvailability.class ); clusterEvents = (ClusterMemberEvents) Proxy.newProxyInstance( ClusterMemberEvents.class.getClassLoader(), new Class[]{ClusterMemberEvents.class, Lifecycle.class}, clusterEventsDelegateInvocationHandler ); memberContext = (HighAvailabilityMemberContext) Proxy.newProxyInstance( HighAvailabilityMemberContext.class.getClassLoader(), new Class[]{HighAvailabilityMemberContext.class}, memberContextDelegateInvocationHandler ); clusterMemberAvailability = (ClusterMemberAvailability) Proxy.newProxyInstance( ClusterMemberAvailability.class.getClassLoader(), new Class[]{ClusterMemberAvailability.class}, clusterMemberAvailabilityDelegateInvocationHandler ); ElectionCredentialsProvider electionCredentialsProvider = config.get( HaSettings.slave_only ) ? new NotElectableElectionCredentialsProvider() : new DefaultElectionCredentialsProvider( config.get( ClusterSettings.server_id ), new OnDiskLastTxIdGetter( new File( getStoreDir() ) ), new HighAvailabilityMemberInfoProvider() { @Override public HighAvailabilityMemberState getHighAvailabilityMemberState() { return memberStateMachine.getCurrentState(); } } ); ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory(); clusterClient = new ClusterClient( ClusterClient.adapt( config ), logging, electionCredentialsProvider, objectStreamFactory, objectStreamFactory ); PaxosClusterMemberEvents localClusterEvents = new PaxosClusterMemberEvents( clusterClient, clusterClient, clusterClient, clusterClient, logging, new Predicate<PaxosClusterMemberEvents.ClusterMembersSnapshot>() { @Override public boolean accept( PaxosClusterMemberEvents.ClusterMembersSnapshot item ) { for ( MemberIsAvailable member : item.getCurrentAvailableMembers() ) { if ( member.getRoleUri().getScheme().equals( "ha" ) ) { if ( HighAvailabilityModeSwitcher.getServerId( member.getRoleUri() ) == config.get( ClusterSettings.server_id ) ) { msgLog.error( String.format( "Instance %s has the same serverId as ours (%d) - will not " + "join this cluster", member.getRoleUri(), config.get( ClusterSettings.server_id ) ) ); return true; } } } return true; } }, new HANewSnapshotFunction(), objectStreamFactory, objectStreamFactory ); // Force a reelection after we enter the cluster // and when that election is finished refresh the snapshot clusterClient.addClusterListener( new ClusterListener.Adapter() { boolean hasRequestedElection = true; // This ensures that the election result is (at least) from our // request or thereafter @Override public void enteredCluster( ClusterConfiguration clusterConfiguration ) { clusterClient.performRoleElections(); } @Override public void elected( String role, InstanceId instanceId, URI electedMember ) { if ( hasRequestedElection && role.equals( ClusterConfiguration.COORDINATOR ) ) { clusterClient.removeClusterListener( this ); } } } ); HighAvailabilityMemberContext localMemberContext = new SimpleHighAvailabilityMemberContext( clusterClient .getServerId() ); PaxosClusterMemberAvailability localClusterMemberAvailability = new PaxosClusterMemberAvailability( clusterClient.getServerId(), clusterClient, clusterClient, logging, objectStreamFactory, objectStreamFactory ); memberContextDelegateInvocationHandler.setDelegate( localMemberContext ); clusterEventsDelegateInvocationHandler.setDelegate( localClusterEvents ); clusterMemberAvailabilityDelegateInvocationHandler.setDelegate( localClusterMemberAvailability ); members = new ClusterMembers( clusterClient, clusterClient, clusterEvents, new InstanceId( config.get( ClusterSettings.server_id ) ) ); memberStateMachine = new HighAvailabilityMemberStateMachine( memberContext, availabilityGuard, members, clusterEvents, clusterClient, logging.getMessagesLog( HighAvailabilityMemberStateMachine.class ) ); HighAvailabilityConsoleLogger highAvailabilityConsoleLogger = new HighAvailabilityConsoleLogger( logging .getConsoleLog( HighAvailabilityConsoleLogger.class ), new InstanceId( config.get( ClusterSettings .server_id ) ) ); availabilityGuard.addListener( highAvailabilityConsoleLogger ); clusterEvents.addClusterMemberListener( highAvailabilityConsoleLogger ); clusterClient.addClusterListener( highAvailabilityConsoleLogger ); paxosLife.add( clusterClient ); paxosLife.add( memberStateMachine ); paxosLife.add( clusterEvents ); paxosLife.add( localClusterMemberAvailability ); DelegateInvocationHandler<RemoteTxHook> txHookDelegate = new DelegateInvocationHandler<>( RemoteTxHook.class ); RemoteTxHook txHook = (RemoteTxHook) Proxy.newProxyInstance( RemoteTxHook.class.getClassLoader(), new Class[]{RemoteTxHook.class}, txHookDelegate ); new TxHookModeSwitcher( memberStateMachine, txHookDelegate, masterDelegateInvocationHandler, new TxHookModeSwitcher.RequestContextFactoryResolver() { @Override public RequestContextFactory get() { return requestContextFactory; } }, logging.getMessagesLog( TxHookModeSwitcher.class ), dependencyResolver ); return txHook; } @Override public void assertSchemaWritesAllowed() throws InvalidTransactionTypeKernelException { if (!isMaster()) { throw new InvalidTransactionTypeKernelException( "Modifying the database schema can only be done on the master server, " + "this server is a slave. Please issue schema modification commands directly to the master." ); } } @Override protected TxIdGenerator createTxIdGenerator() { DelegateInvocationHandler<TxIdGenerator> txIdGeneratorDelegate = new DelegateInvocationHandler<>( TxIdGenerator.class ); TxIdGenerator txIdGenerator = (TxIdGenerator) Proxy.newProxyInstance( TxIdGenerator.class.getClassLoader(), new Class[]{TxIdGenerator.class}, txIdGeneratorDelegate ); slaves = life.add( new HighAvailabilitySlaves( members, clusterClient, new DefaultSlaveFactory( xaDataSourceManager, logging, monitors, config.get( HaSettings.com_chunk_size ).intValue() ) ) ); new TxIdGeneratorModeSwitcher( memberStateMachine, txIdGeneratorDelegate, (HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler, requestContextFactory, msgLog, config, slaves, txManager, jobScheduler ); return txIdGenerator; } @Override protected IdGeneratorFactory createIdGeneratorFactory() { idGeneratorFactory = new HaIdGeneratorFactory( masterDelegateInvocationHandler, logging, requestContextFactory ); highAvailabilityModeSwitcher = new HighAvailabilityModeSwitcher( clusterClient, masterDelegateInvocationHandler, clusterMemberAvailability, memberStateMachine, this, (HaIdGeneratorFactory) idGeneratorFactory, config, logging, updateableSchemaState, kernelExtensions.listFactories(), monitors, requestContextFactory ); /* * We always need the mode switcher and we need it to restart on switchover. */ paxosLife.add( highAvailabilityModeSwitcher ); /* * We don't really switch to master here. We just need to initialize the idGenerator so the initial store * can be started (if required). In any case, the rest of the database is in pending state, so nothing will * happen until events start arriving and that will set us to the proper state anyway. */ ((HaIdGeneratorFactory) idGeneratorFactory).switchToMaster(); return idGeneratorFactory; } @Override protected LockManager createLockManager() { DelegateInvocationHandler<LockManager> lockManagerDelegate = new DelegateInvocationHandler<>( LockManager.class ); LockManager lockManager = (LockManager) Proxy.newProxyInstance( LockManager.class.getClassLoader(), new Class[]{LockManager.class}, lockManagerDelegate ); new LockManagerModeSwitcher( memberStateMachine, lockManagerDelegate, (HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler, requestContextFactory, txManager, txHook, availabilityGuard, config ); return lockManager; } @Override protected TokenCreator createRelationshipTypeCreator() { DelegateInvocationHandler<TokenCreator> relationshipTypeCreatorDelegate = new DelegateInvocationHandler<>( TokenCreator.class ); TokenCreator relationshipTypeCreator = (TokenCreator) Proxy.newProxyInstance( TokenCreator.class.getClassLoader(), new Class[]{TokenCreator.class}, relationshipTypeCreatorDelegate ); new RelationshipTypeCreatorModeSwitcher( memberStateMachine, relationshipTypeCreatorDelegate, (HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler, requestContextFactory, logging ); return relationshipTypeCreator; } @Override protected TokenCreator createPropertyKeyCreator() { DelegateInvocationHandler<TokenCreator> propertyKeyCreatorDelegate = new DelegateInvocationHandler<>( TokenCreator.class ); TokenCreator propertyTokenCreator = (TokenCreator) Proxy.newProxyInstance( TokenCreator.class.getClassLoader(), new Class[]{TokenCreator.class}, propertyKeyCreatorDelegate ); new PropertyKeyCreatorModeSwitcher( memberStateMachine, propertyKeyCreatorDelegate, (HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler, requestContextFactory, logging ); return propertyTokenCreator; } @Override protected TokenCreator createLabelIdCreator() { DelegateInvocationHandler<TokenCreator> labelIdCreatorDelegate = new DelegateInvocationHandler<>( TokenCreator.class ); TokenCreator labelIdCreator = (TokenCreator) Proxy.newProxyInstance( TokenCreator.class.getClassLoader(), new Class[]{TokenCreator.class}, labelIdCreatorDelegate ); new LabelTokenCreatorModeSwitcher( memberStateMachine, labelIdCreatorDelegate, (HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler, requestContextFactory, logging ); return labelIdCreator; } @Override protected Caches createCaches() { return new HaCaches( logging.getMessagesLog( Caches.class ), monitors ); } @Override protected KernelData createKernelData() { this.lastUpdateTime = new LastUpdateTime(); return new HighlyAvailableKernelData( this, members, new ClusterDatabaseInfoProvider( members, new OnDiskLastTxIdGetter( new File( getStoreDir() ) ), lastUpdateTime ) ); } @Override protected Factory<byte[]> createXidGlobalIdFactory() { final int serverId = config.get( ClusterSettings.server_id ); return new Factory<byte[]>() { @Override public byte[] newInstance() { return getNewGlobalId( DEFAULT_SEED, serverId ); } }; } @Override protected void registerRecovery() { memberStateMachine.addHighAvailabilityMemberListener( new HighAvailabilityMemberListener() { @Override public void masterIsElected( HighAvailabilityMemberChangeEvent event ) { } @Override public void masterIsAvailable( HighAvailabilityMemberChangeEvent event ) { if ( event.getOldState().equals( HighAvailabilityMemberState.TO_MASTER ) && event.getNewState().equals( HighAvailabilityMemberState.MASTER ) ) { doAfterRecoveryAndStartup( true ); } } @Override public void slaveIsAvailable( HighAvailabilityMemberChangeEvent event ) { if ( event.getOldState().equals( HighAvailabilityMemberState.TO_SLAVE ) && event.getNewState().equals( HighAvailabilityMemberState.SLAVE ) ) { doAfterRecoveryAndStartup( false ); } } @Override public void instanceStops( HighAvailabilityMemberChangeEvent event ) { } private void doAfterRecoveryAndStartup( boolean isMaster ) { try { synchronized ( xaDataSourceManager ) { HighlyAvailableGraphDatabase.this.doAfterRecoveryAndStartup( isMaster ); } } catch ( Throwable throwable ) { msgLog.error( "Post recovery error", throwable ); try { memberStateMachine.stop(); } catch ( Throwable throwable1 ) { msgLog.warn( "Could not stop", throwable1 ); } try { memberStateMachine.start(); } catch ( Throwable throwable1 ) { msgLog.warn( "Could not start", throwable1 ); } } } } ); } @Override public String toString() { return getClass().getSimpleName() + "[" + storeDir + "]"; } public String getInstanceState() { return memberStateMachine.getCurrentState().name(); } public String role() { return members.getSelf().getHARole(); } public boolean isMaster() { return memberStateMachine.getCurrentState() == HighAvailabilityMemberState.MASTER; } @Override public DependencyResolver getDependencyResolver() { return new DependencyResolver.Adapter() { @Override public <T> T resolveDependency( Class<T> type, SelectionStrategy selector ) { T result; try { result = dependencyResolver.resolveDependency( type, selector ); } catch ( IllegalArgumentException e ) { if ( ClusterMemberEvents.class.isAssignableFrom( type ) ) { result = type.cast( clusterEvents ); } else if ( ClusterMemberAvailability.class.isAssignableFrom( type ) ) { result = type.cast( clusterMemberAvailability ); } else if ( UpdatePuller.class.isAssignableFrom( type ) ) { result = type.cast( updatePuller ); } else if ( Slaves.class.isAssignableFrom( type ) ) { result = type.cast( slaves ); } else if ( ClusterClient.class.isAssignableFrom( type ) ) { result = type.cast( clusterClient ); } else if ( BindingNotifier.class.isAssignableFrom( type ) ) { result = type.cast( clusterClient ); } else if ( ClusterMembers.class.isAssignableFrom( type ) ) { result = type.cast( members ); } else if ( RequestContextFactory.class.isAssignableFrom( type ) ) { result = type.cast( requestContextFactory ); } else { throw e; } } return selector.select( type, option( result ) ); } }; } /** * At end of startup, wait for instance to become either master or slave. * <p/> * This helps users who expect to be able to access the instance after * the constructor is run. */ private class StartupWaiter extends LifecycleAdapter { @Override public void start() throws Throwable { availabilityGuard.isAvailable( stateSwitchTimeoutMillis ); } } }
1no label
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
41
@SuppressWarnings("unchecked") public class OMultiValue { /** * Checks if a class is a multi-value type. * * @param iType * Class to check * @return true if it's an array, a collection or a map, otherwise false */ public static boolean isMultiValue(final Class<?> iType) { return (iType.isArray() || Collection.class.isAssignableFrom(iType) || Map.class.isAssignableFrom(iType) || OMultiCollectionIterator.class .isAssignableFrom(iType)); } /** * Checks if the object is a multi-value type. * * @param iObject * Object to check * @return true if it's an array, a collection or a map, otherwise false */ public static boolean isMultiValue(final Object iObject) { return iObject == null ? false : isMultiValue(iObject.getClass()); } public static boolean isIterable(final Object iObject) { return iObject == null ? false : iObject instanceof Iterable<?> ? true : iObject instanceof Iterator<?>; } /** * Returns the size of the multi-value object * * @param iObject * Multi-value object (array, collection or map) * @return the size of the multi value object */ public static int getSize(final Object iObject) { if (iObject == null) return 0; if (iObject instanceof OSizeable) return ((OSizeable) iObject).size(); if (!isMultiValue(iObject)) return 0; if (iObject instanceof Collection<?>) return ((Collection<Object>) iObject).size(); if (iObject instanceof Map<?, ?>) return ((Map<?, Object>) iObject).size(); if (iObject.getClass().isArray()) return Array.getLength(iObject); return 0; } /** * Returns the first item of the Multi-value object (array, collection or map) * * @param iObject * Multi-value object (array, collection or map) * @return The first item if any */ public static Object getFirstValue(final Object iObject) { if (iObject == null) return null; if (!isMultiValue(iObject) || getSize(iObject) == 0) return null; try { if (iObject instanceof List<?>) return ((List<Object>) iObject).get(0); else if (iObject instanceof Collection<?>) return ((Collection<Object>) iObject).iterator().next(); else if (iObject instanceof Map<?, ?>) return ((Map<?, Object>) iObject).values().iterator().next(); else if (iObject.getClass().isArray()) return Array.get(iObject, 0); } catch (Exception e) { // IGNORE IT OLogManager.instance().debug(iObject, "Error on reading the first item of the Multi-value field '%s'", iObject); } return null; } /** * Returns the last item of the Multi-value object (array, collection or map) * * @param iObject * Multi-value object (array, collection or map) * @return The last item if any */ public static Object getLastValue(final Object iObject) { if (iObject == null) return null; if (!isMultiValue(iObject)) return null; try { if (iObject instanceof List<?>) return ((List<Object>) iObject).get(((List<Object>) iObject).size() - 1); else if (iObject instanceof Collection<?>) { Object last = null; for (Object o : (Collection<Object>) iObject) last = o; return last; } else if (iObject instanceof Map<?, ?>) { Object last = null; for (Object o : ((Map<?, Object>) iObject).values()) last = o; return last; } else if (iObject.getClass().isArray()) return Array.get(iObject, Array.getLength(iObject) - 1); } catch (Exception e) { // IGNORE IT OLogManager.instance().debug(iObject, "Error on reading the last item of the Multi-value field '%s'", iObject); } return null; } /** * Returns the iIndex item of the Multi-value object (array, collection or map) * * @param iObject * Multi-value object (array, collection or map) * @param iIndex * integer as the position requested * @return The first item if any */ public static Object getValue(final Object iObject, final int iIndex) { if (iObject == null) return null; if (!isMultiValue(iObject)) return null; if (iIndex > getSize(iObject)) return null; try { if (iObject instanceof List<?>) return ((List<?>) iObject).get(iIndex); else if (iObject instanceof Set<?>) { int i = 0; for (Object o : ((Set<?>) iObject)) { if (i++ == iIndex) { return o; } } } else if (iObject instanceof Map<?, ?>) { int i = 0; for (Object o : ((Map<?, ?>) iObject).values()) { if (i++ == iIndex) { return o; } } } else if (iObject.getClass().isArray()) return Array.get(iObject, iIndex); } catch (Exception e) { // IGNORE IT OLogManager.instance().debug(iObject, "Error on reading the first item of the Multi-value field '%s'", iObject); } return null; } /** * Returns an Iterable<Object> object to browse the multi-value instance (array, collection or map) * * @param iObject * Multi-value object (array, collection or map) */ public static Iterable<Object> getMultiValueIterable(final Object iObject) { if (iObject == null) return null; if (iObject instanceof Iterable<?>) return (Iterable<Object>) iObject; else if (iObject instanceof Collection<?>) return ((Collection<Object>) iObject); else if (iObject instanceof Map<?, ?>) return ((Map<?, Object>) iObject).values(); else if (iObject.getClass().isArray()) return new OIterableObjectArray<Object>(iObject); else if (iObject instanceof Iterator<?>) { final List<Object> temp = new ArrayList<Object>(); for (Iterator<Object> it = (Iterator<Object>) iObject; it.hasNext();) temp.add(it.next()); return temp; } return null; } /** * Returns an Iterator<Object> object to browse the multi-value instance (array, collection or map) * * @param iObject * Multi-value object (array, collection or map) */ public static Iterator<Object> getMultiValueIterator(final Object iObject) { if (iObject == null) return null; if (iObject instanceof Iterator<?>) return (Iterator<Object>) iObject; if (!isMultiValue(iObject)) return null; if (iObject instanceof Collection<?>) return ((Collection<Object>) iObject).iterator(); if (iObject instanceof Map<?, ?>) return ((Map<?, Object>) iObject).values().iterator(); if (iObject.getClass().isArray()) return new OIterableObjectArray<Object>(iObject).iterator(); return new OIterableObject<Object>(iObject); } /** * Returns a stringified version of the multi-value object. * * @param iObject * Multi-value object (array, collection or map) * @return a stringified version of the multi-value object. */ public static String toString(final Object iObject) { final StringBuilder sb = new StringBuilder(); if (iObject instanceof Collection<?>) { final Collection<Object> coll = (Collection<Object>) iObject; sb.append('['); for (final Iterator<Object> it = coll.iterator(); it.hasNext();) { try { Object e = it.next(); sb.append(e == iObject ? "(this Collection)" : e); if (it.hasNext()) sb.append(", "); } catch (NoSuchElementException ex) { // IGNORE THIS } } return sb.append(']').toString(); } else if (iObject instanceof Map<?, ?>) { final Map<String, Object> map = (Map<String, Object>) iObject; Entry<String, Object> e; sb.append('{'); for (final Iterator<Entry<String, Object>> it = map.entrySet().iterator(); it.hasNext();) { try { e = it.next(); sb.append(e.getKey()); sb.append(":"); sb.append(e.getValue() == iObject ? "(this Map)" : e.getValue()); if (it.hasNext()) sb.append(", "); } catch (NoSuchElementException ex) { // IGNORE THIS } } return sb.append('}').toString(); } return iObject.toString(); } /** * Utility function that add a value to the main object. It takes care about collections/array and single values. * * @param iObject * MultiValue where to add value(s) * @param iToAdd * Single value, array of values or collections of values. Map are not supported. * @return */ public static Object add(final Object iObject, final Object iToAdd) { if (iObject != null) { if (iObject instanceof Collection<?>) { // COLLECTION - ? final Collection<Object> coll = (Collection<Object>) iObject; if (iToAdd instanceof Collection<?>) { // COLLECTION - COLLECTION for (Object o : (Collection<Object>) iToAdd) { if (isMultiValue(o)) add(coll, o); else coll.add(o); } } else if (iToAdd != null && iToAdd.getClass().isArray()) { // ARRAY - COLLECTION for (int i = 0; i < Array.getLength(iToAdd); ++i) { Object o = Array.get(iToAdd, i); if (isMultiValue(o)) add(coll, o); else coll.add(o); } } else if (iToAdd instanceof Map<?, ?>) { // MAP for (Entry<Object, Object> entry : ((Map<Object, Object>) iToAdd).entrySet()) coll.add(entry.getValue()); } else if (iToAdd instanceof Iterable<?>) { // ITERABLE for (Object o : (Iterable<?>) iToAdd) coll.add(o); } else if (iToAdd instanceof Iterator<?>) { // ITERATOR for (Iterator<?> it = (Iterator<?>) iToAdd; it.hasNext();) coll.add(it.next()); } else coll.add(iToAdd); } else if (iObject.getClass().isArray()) { // ARRAY - ? final Object[] copy; if (iToAdd instanceof Collection<?>) { // ARRAY - COLLECTION final int tot = Array.getLength(iObject) + ((Collection<Object>) iToAdd).size(); copy = Arrays.copyOf((Object[]) iObject, tot); final Iterator<Object> it = ((Collection<Object>) iToAdd).iterator(); for (int i = Array.getLength(iObject); i < tot; ++i) copy[i] = it.next(); } else if (iToAdd != null && iToAdd.getClass().isArray()) { // ARRAY - ARRAY final int tot = Array.getLength(iObject) + Array.getLength(iToAdd); copy = Arrays.copyOf((Object[]) iObject, tot); System.arraycopy(iToAdd, 0, iObject, Array.getLength(iObject), Array.getLength(iToAdd)); } else { copy = Arrays.copyOf((Object[]) iObject, Array.getLength(iObject) + 1); copy[copy.length - 1] = iToAdd; } return copy; } else throw new IllegalArgumentException("Object " + iObject + " is not a multi value"); } return iObject; } /** * Utility function that remove a value from the main object. It takes care about collections/array and single values. * * @param iObject * MultiValue where to add value(s) * @param iToRemove * Single value, array of values or collections of values. Map are not supported. * @param iAllOccurrences * True if the all occurrences must be removed or false of only the first one (Like java.util.Collection.remove()) * @return */ public static Object remove(Object iObject, Object iToRemove, final boolean iAllOccurrences) { if (iObject != null) { if (iObject instanceof OMultiCollectionIterator<?>) { final Collection<Object> list = new LinkedList<Object>(); for (Object o : ((OMultiCollectionIterator<?>) iObject)) list.add(o); iObject = list; } if (iToRemove instanceof OMultiCollectionIterator<?>) { // TRANSFORM IN SET ONCE TO OPTIMIZE LOOPS DURING REMOVE final Set<Object> set = new HashSet<Object>(); for (Object o : ((OMultiCollectionIterator<?>) iToRemove)) set.add(o); iToRemove = set; } if (iObject instanceof Collection<?>) { // COLLECTION - ? final Collection<Object> coll = (Collection<Object>) iObject; if (iToRemove instanceof Collection<?>) { // COLLECTION - COLLECTION for (Object o : (Collection<Object>) iToRemove) { if (isMultiValue(o)) remove(coll, o, iAllOccurrences); else coll.remove(o); } } else if (iToRemove != null && iToRemove.getClass().isArray()) { // ARRAY - COLLECTION for (int i = 0; i < Array.getLength(iToRemove); ++i) { Object o = Array.get(iToRemove, i); if (isMultiValue(o)) remove(coll, o, iAllOccurrences); else coll.remove(o); } } else if (iToRemove instanceof Map<?, ?>) { // MAP for (Entry<Object, Object> entry : ((Map<Object, Object>) iToRemove).entrySet()) coll.remove(entry.getKey()); } else if (iToRemove instanceof Iterator<?>) { // ITERATOR if (iToRemove instanceof OMultiCollectionIterator<?>) ((OMultiCollectionIterator<?>) iToRemove).reset(); if (iAllOccurrences) { OMultiCollectionIterator<?> it = (OMultiCollectionIterator<?>) iToRemove; batchRemove(coll, it); } else { for (Iterator<?> it = (Iterator<?>) iToRemove; it.hasNext();) { final Object itemToRemove = it.next(); while (coll.remove(itemToRemove)) if (!iAllOccurrences) // REMOVE ONLY THE FIRST ITEM break; // REMOVE ALL THE ITEM } } } else coll.remove(iToRemove); } else if (iObject.getClass().isArray()) { // ARRAY - ? final Object[] copy; if (iToRemove instanceof Collection<?>) { // ARRAY - COLLECTION final int sourceTot = Array.getLength(iObject); final int tot = sourceTot - ((Collection<Object>) iToRemove).size(); copy = new Object[tot]; int k = 0; for (int i = 0; i < sourceTot; ++i) { Object o = Array.get(iObject, i); if (o != null) { boolean found = false; for (Object toRemove : (Collection<Object>) iToRemove) { if (o.equals(toRemove)) { // SKIP found = true; break; } } if (!found) copy[k++] = o; } } } else if (iToRemove != null && iToRemove.getClass().isArray()) { throw new UnsupportedOperationException("Cannot execute remove() against an array"); } else { throw new UnsupportedOperationException("Cannot execute remove() against an array"); } return copy; } else throw new IllegalArgumentException("Object " + iObject + " is not a multi value"); } return iObject; } private static void batchRemove(Collection<Object> coll, Iterator<?> it) { int approximateRemainingSize; if (it instanceof OSizeable) { approximateRemainingSize = ((OSizeable) it).size(); } else { approximateRemainingSize = -1; } while (it.hasNext()) { Set batch = prepareBatch(it, approximateRemainingSize); coll.removeAll(batch); approximateRemainingSize -= batch.size(); } } private static Set prepareBatch(Iterator<?> it, int approximateRemainingSize) { final HashSet batch; if (approximateRemainingSize > -1) { if (approximateRemainingSize > 10000) batch = new HashSet(13400); else batch = new HashSet((int) (approximateRemainingSize / 0.75)); } else { batch = new HashSet(); } int count = 0; while (count < 10000 && it.hasNext()) { batch.add(it.next()); count++; } return batch; } public static Object[] array(final Object iValue) { return array(iValue, Object.class); } public static <T> T[] array(final Object iValue, final Class<? extends T> iClass) { return array(iValue, iClass, null); } public static <T> T[] array(final Object iValue, final Class<? extends T> iClass, final OCallable<Object, Object> iCallback) { if (iValue == null) return null; final T[] result; if (isMultiValue(iValue)) { // CREATE STATIC ARRAY AND FILL IT result = (T[]) Array.newInstance(iClass, getSize(iValue)); int i = 0; for (Iterator<T> it = (Iterator<T>) getMultiValueIterator(iValue); it.hasNext(); ++i) result[i] = (T) convert(it.next(), iCallback); } else if (isIterable(iValue)) { // SIZE UNKNOWN: USE A LIST AS TEMPORARY OBJECT final List<T> temp = new ArrayList<T>(); for (Iterator<T> it = (Iterator<T>) getMultiValueIterator(iValue); it.hasNext();) temp.add((T) convert(it.next(), iCallback)); if (iClass.equals(Object.class)) result = (T[]) temp.toArray(); else // CONVERT THEM result = temp.toArray((T[]) Array.newInstance(iClass, getSize(iValue))); } else { result = (T[]) Array.newInstance(iClass, 1); result[0] = (T) (T) convert(iValue, iCallback); } return result; } public static Object convert(final Object iObject, final OCallable<Object, Object> iCallback) { return iCallback != null ? iCallback.call(iObject) : iObject; } public static boolean equals(final Collection<Object> col1, final Collection<Object> col2) { if (col1.size() != col2.size()) return false; return col1.containsAll(col2) && col2.containsAll(col1); } }
0true
commons_src_main_java_com_orientechnologies_common_collection_OMultiValue.java
466
final long indexTwoSize = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<Long>() { public Long call() { return indexTwo.getSize(); } });
0true
core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseCompare.java