Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
88 |
public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
419 |
public final class ClientMapProxy<K, V> extends ClientProxy implements IMap<K, V> {
private final String name;
private volatile ClientNearCache<Data> nearCache;
private final AtomicBoolean nearCacheInitialized = new AtomicBoolean();
public ClientMapProxy(String instanceName, String serviceName, String name) {
super(instanceName, serviceName, name);
this.name = name;
}
@Override
public boolean containsKey(Object key) {
initNearCache();
final Data keyData = toData(key);
if (nearCache != null) {
Object cached = nearCache.get(keyData);
if (cached != null) {
if (cached.equals(ClientNearCache.NULL_OBJECT)) {
return false;
}
return true;
}
}
MapContainsKeyRequest request = new MapContainsKeyRequest(name, keyData);
Boolean result = invoke(request, keyData);
return result;
}
@Override
public boolean containsValue(Object value) {
Data valueData = toData(value);
MapContainsValueRequest request = new MapContainsValueRequest(name, valueData);
Boolean result = invoke(request);
return result;
}
@Override
public V get(Object key) {
initNearCache();
final Data keyData = toData(key);
if (nearCache != null) {
Object cached = nearCache.get(keyData);
if (cached != null) {
if (cached.equals(ClientNearCache.NULL_OBJECT)) {
return null;
}
return (V) cached;
}
}
MapGetRequest request = new MapGetRequest(name, keyData);
final V result = invoke(request, keyData);
if (nearCache != null) {
nearCache.put(keyData, result);
}
return result;
}
@Override
public V put(K key, V value) {
return put(key, value, -1, null);
}
@Override
public V remove(Object key) {
final Data keyData = toData(key);
invalidateNearCache(keyData);
MapRemoveRequest request = new MapRemoveRequest(name, keyData, ThreadUtil.getThreadId());
return invoke(request, keyData);
}
@Override
public boolean remove(Object key, Object value) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapRemoveIfSameRequest request = new MapRemoveIfSameRequest(name, keyData, valueData, ThreadUtil.getThreadId());
Boolean result = invoke(request, keyData);
return result;
}
@Override
public void delete(Object key) {
final Data keyData = toData(key);
invalidateNearCache(keyData);
MapDeleteRequest request = new MapDeleteRequest(name, keyData, ThreadUtil.getThreadId());
invoke(request, keyData);
}
@Override
public void flush() {
MapFlushRequest request = new MapFlushRequest(name);
invoke(request);
}
@Override
public Future<V> getAsync(final K key) {
initNearCache();
final Data keyData = toData(key);
if (nearCache != null) {
Object cached = nearCache.get(keyData);
if (cached != null && !ClientNearCache.NULL_OBJECT.equals(cached)) {
return new CompletedFuture(getContext().getSerializationService(),
cached, getContext().getExecutionService().getAsyncExecutor());
}
}
final MapGetRequest request = new MapGetRequest(name, keyData);
try {
final ICompletableFuture future = getContext().getInvocationService().invokeOnKeyOwner(request, keyData);
final DelegatingFuture<V> delegatingFuture = new DelegatingFuture<V>(future, getContext().getSerializationService());
delegatingFuture.andThen(new ExecutionCallback<V>() {
@Override
public void onResponse(V response) {
if (nearCache != null) {
nearCache.put(keyData, response);
}
}
@Override
public void onFailure(Throwable t) {
}
});
return delegatingFuture;
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
@Override
public Future<V> putAsync(final K key, final V value) {
return putAsync(key, value, -1, null);
}
@Override
public Future<V> putAsync(final K key, final V value, final long ttl, final TimeUnit timeunit) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapPutRequest request = new MapPutRequest(name, keyData, valueData,
ThreadUtil.getThreadId(), getTimeInMillis(ttl, timeunit));
try {
final ICompletableFuture future = getContext().getInvocationService().invokeOnKeyOwner(request, keyData);
return new DelegatingFuture<V>(future, getContext().getSerializationService());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
@Override
public Future<V> removeAsync(final K key) {
final Data keyData = toData(key);
invalidateNearCache(keyData);
MapRemoveRequest request = new MapRemoveRequest(name, keyData, ThreadUtil.getThreadId());
try {
final ICompletableFuture future = getContext().getInvocationService().invokeOnKeyOwner(request, keyData);
return new DelegatingFuture<V>(future, getContext().getSerializationService());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
@Override
public boolean tryRemove(K key, long timeout, TimeUnit timeunit) {
final Data keyData = toData(key);
invalidateNearCache(keyData);
MapTryRemoveRequest request = new MapTryRemoveRequest(name, keyData,
ThreadUtil.getThreadId(), timeunit.toMillis(timeout));
Boolean result = invoke(request, keyData);
return result;
}
@Override
public boolean tryPut(K key, V value, long timeout, TimeUnit timeunit) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapTryPutRequest request = new MapTryPutRequest(name, keyData, valueData,
ThreadUtil.getThreadId(), timeunit.toMillis(timeout));
Boolean result = invoke(request, keyData);
return result;
}
@Override
public V put(K key, V value, long ttl, TimeUnit timeunit) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapPutRequest request = new MapPutRequest(name, keyData, valueData,
ThreadUtil.getThreadId(), getTimeInMillis(ttl, timeunit));
return invoke(request, keyData);
}
@Override
public void putTransient(K key, V value, long ttl, TimeUnit timeunit) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapPutTransientRequest request = new MapPutTransientRequest(name, keyData, valueData,
ThreadUtil.getThreadId(), getTimeInMillis(ttl, timeunit));
invoke(request);
}
@Override
public V putIfAbsent(K key, V value) {
return putIfAbsent(key, value, -1, null);
}
@Override
public V putIfAbsent(K key, V value, long ttl, TimeUnit timeunit) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapPutIfAbsentRequest request = new MapPutIfAbsentRequest(name, keyData, valueData,
ThreadUtil.getThreadId(), getTimeInMillis(ttl, timeunit));
return invoke(request, keyData);
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
final Data keyData = toData(key);
final Data oldValueData = toData(oldValue);
final Data newValueData = toData(newValue);
invalidateNearCache(keyData);
MapReplaceIfSameRequest request = new MapReplaceIfSameRequest(name, keyData, oldValueData, newValueData,
ThreadUtil.getThreadId());
Boolean result = invoke(request, keyData);
return result;
}
@Override
public V replace(K key, V value) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapReplaceRequest request = new MapReplaceRequest(name, keyData, valueData,
ThreadUtil.getThreadId());
return invoke(request, keyData);
}
@Override
public void set(K key, V value, long ttl, TimeUnit timeunit) {
final Data keyData = toData(key);
final Data valueData = toData(value);
invalidateNearCache(keyData);
MapSetRequest request = new MapSetRequest(name, keyData, valueData,
ThreadUtil.getThreadId(), getTimeInMillis(ttl, timeunit));
invoke(request, keyData);
}
@Override
public void lock(K key) {
final Data keyData = toData(key);
MapLockRequest request = new MapLockRequest(name, keyData, ThreadUtil.getThreadId());
invoke(request, keyData);
}
@Override
public void lock(K key, long leaseTime, TimeUnit timeUnit) {
final Data keyData = toData(key);
MapLockRequest request = new MapLockRequest(name, keyData,
ThreadUtil.getThreadId(), getTimeInMillis(leaseTime, timeUnit), -1);
invoke(request, keyData);
}
@Override
public boolean isLocked(K key) {
final Data keyData = toData(key);
MapIsLockedRequest request = new MapIsLockedRequest(name, keyData);
Boolean result = invoke(request, keyData);
return result;
}
@Override
public boolean tryLock(K key) {
try {
return tryLock(key, 0, null);
} catch (InterruptedException e) {
return false;
}
}
@Override
public boolean tryLock(K key, long time, TimeUnit timeunit) throws InterruptedException {
final Data keyData = toData(key);
MapLockRequest request = new MapLockRequest(name, keyData,
ThreadUtil.getThreadId(), Long.MAX_VALUE, getTimeInMillis(time, timeunit));
Boolean result = invoke(request, keyData);
return result;
}
@Override
public void unlock(K key) {
final Data keyData = toData(key);
MapUnlockRequest request = new MapUnlockRequest(name, keyData, ThreadUtil.getThreadId(), false);
invoke(request, keyData);
}
@Override
public void forceUnlock(K key) {
final Data keyData = toData(key);
MapUnlockRequest request = new MapUnlockRequest(name, keyData, ThreadUtil.getThreadId(), true);
invoke(request, keyData);
}
@Override
public String addLocalEntryListener(EntryListener<K, V> listener) {
throw new UnsupportedOperationException("Locality is ambiguous for client!!!");
}
@Override
public String addLocalEntryListener(EntryListener<K, V> listener,
Predicate<K, V> predicate, boolean includeValue) {
throw new UnsupportedOperationException("Locality is ambiguous for client!!!");
}
@Override
public String addLocalEntryListener(EntryListener<K, V> listener,
Predicate<K, V> predicate, K key, boolean includeValue) {
throw new UnsupportedOperationException("Locality is ambiguous for client!!!");
}
public String addInterceptor(MapInterceptor interceptor) {
MapAddInterceptorRequest request = new MapAddInterceptorRequest(name, interceptor);
return invoke(request);
}
@Override
public void removeInterceptor(String id) {
MapRemoveInterceptorRequest request = new MapRemoveInterceptorRequest(name, id);
invoke(request);
}
@Override
public String addEntryListener(EntryListener<K, V> listener, boolean includeValue) {
MapAddEntryListenerRequest request = new MapAddEntryListenerRequest(name, includeValue);
EventHandler<PortableEntryEvent> handler = createHandler(listener, includeValue);
return listen(request, handler);
}
@Override
public boolean removeEntryListener(String id) {
final MapRemoveEntryListenerRequest request = new MapRemoveEntryListenerRequest(name, id);
return stopListening(request, id);
}
@Override
public String addEntryListener(EntryListener<K, V> listener, K key, boolean includeValue) {
final Data keyData = toData(key);
MapAddEntryListenerRequest request = new MapAddEntryListenerRequest(name, keyData, includeValue);
EventHandler<PortableEntryEvent> handler = createHandler(listener, includeValue);
return listen(request, keyData, handler);
}
@Override
public String addEntryListener(EntryListener<K, V> listener, Predicate<K, V> predicate, K key, boolean includeValue) {
final Data keyData = toData(key);
MapAddEntryListenerRequest request = new MapAddEntryListenerRequest(name, keyData, includeValue, predicate);
EventHandler<PortableEntryEvent> handler = createHandler(listener, includeValue);
return listen(request, keyData, handler);
}
@Override
public String addEntryListener(EntryListener<K, V> listener, Predicate<K, V> predicate, boolean includeValue) {
MapAddEntryListenerRequest request = new MapAddEntryListenerRequest(name, null, includeValue, predicate);
EventHandler<PortableEntryEvent> handler = createHandler(listener, includeValue);
return listen(request, null, handler);
}
@Override
public EntryView<K, V> getEntryView(K key) {
final Data keyData = toData(key);
MapGetEntryViewRequest request = new MapGetEntryViewRequest(name, keyData);
SimpleEntryView entryView = invoke(request, keyData);
if (entryView == null) {
return null;
}
final Data value = (Data) entryView.getValue();
entryView.setKey(key);
entryView.setValue(toObject(value));
//TODO putCache
return entryView;
}
@Override
public boolean evict(K key) {
final Data keyData = toData(key);
MapEvictRequest request = new MapEvictRequest(name, keyData, ThreadUtil.getThreadId());
Boolean result = invoke(request);
return result;
}
@Override
public Set<K> keySet() {
MapKeySetRequest request = new MapKeySetRequest(name);
MapKeySet mapKeySet = invoke(request);
Set<Data> keySetData = mapKeySet.getKeySet();
Set<K> keySet = new HashSet<K>(keySetData.size());
for (Data data : keySetData) {
final K key = toObject(data);
keySet.add(key);
}
return keySet;
}
@Override
public Map<K, V> getAll(Set<K> keys) {
initNearCache();
Set<Data> keySet = new HashSet(keys.size());
Map<K, V> result = new HashMap<K, V>();
for (Object key : keys) {
keySet.add(toData(key));
}
if (nearCache != null) {
final Iterator<Data> iterator = keySet.iterator();
while (iterator.hasNext()) {
Data key = iterator.next();
Object cached = nearCache.get(key);
if (cached != null && !ClientNearCache.NULL_OBJECT.equals(cached)) {
result.put((K) toObject(key), (V) cached);
iterator.remove();
}
}
}
if (keys.isEmpty()) {
return result;
}
MapGetAllRequest request = new MapGetAllRequest(name, keySet);
MapEntrySet mapEntrySet = invoke(request);
Set<Entry<Data, Data>> entrySet = mapEntrySet.getEntrySet();
for (Entry<Data, Data> dataEntry : entrySet) {
final V value = (V) toObject(dataEntry.getValue());
final K key = (K) toObject(dataEntry.getKey());
result.put(key, value);
if (nearCache != null) {
nearCache.put(dataEntry.getKey(), value);
}
}
return result;
}
@Override
public Collection<V> values() {
MapValuesRequest request = new MapValuesRequest(name);
MapValueCollection mapValueCollection = invoke(request);
Collection<Data> collectionData = mapValueCollection.getValues();
Collection<V> collection = new ArrayList<V>(collectionData.size());
for (Data data : collectionData) {
final V value = toObject(data);
collection.add(value);
}
return collection;
}
@Override
public Set<Entry<K, V>> entrySet() {
MapEntrySetRequest request = new MapEntrySetRequest(name);
MapEntrySet result = invoke(request);
Set<Entry<K, V>> entrySet = new HashSet<Entry<K, V>>();
Set<Entry<Data, Data>> entries = result.getEntrySet();
for (Entry<Data, Data> dataEntry : entries) {
Data keyData = dataEntry.getKey();
Data valueData = dataEntry.getValue();
K key = toObject(keyData);
V value = toObject(valueData);
entrySet.add(new AbstractMap.SimpleEntry<K, V>(key, value));
}
return entrySet;
}
@Override
public Set<K> keySet(Predicate predicate) {
PagingPredicate pagingPredicate = null;
if (predicate instanceof PagingPredicate) {
pagingPredicate = (PagingPredicate) predicate;
pagingPredicate.setIterationType(IterationType.KEY);
if (pagingPredicate.getPage() > 0 && pagingPredicate.getAnchor() == null) {
pagingPredicate.previousPage();
keySet(pagingPredicate);
pagingPredicate.nextPage();
}
}
MapQueryRequest request = new MapQueryRequest(name, predicate, IterationType.KEY);
QueryResultSet result = invoke(request);
if (pagingPredicate == null) {
final HashSet<K> keySet = new HashSet<K>();
for (Object o : result) {
final K key = toObject(o);
keySet.add(key);
}
return keySet;
}
final Comparator<Entry> comparator = SortingUtil.newComparator(pagingPredicate.getComparator(), IterationType.KEY);
final SortedQueryResultSet sortedResult = new SortedQueryResultSet(comparator, IterationType.KEY,
pagingPredicate.getPageSize());
final Iterator<Entry> iterator = result.rawIterator();
while (iterator.hasNext()) {
final Entry entry = iterator.next();
final K key = toObject(entry.getKey());
final V value = toObject(entry.getValue());
sortedResult.add(new AbstractMap.SimpleImmutableEntry<K, V>(key, value));
}
PagingPredicateAccessor.setPagingPredicateAnchor(pagingPredicate, sortedResult.last());
return (Set<K>) sortedResult;
}
@Override
public Set<Entry<K, V>> entrySet(Predicate predicate) {
PagingPredicate pagingPredicate = null;
if (predicate instanceof PagingPredicate) {
pagingPredicate = (PagingPredicate) predicate;
pagingPredicate.setIterationType(IterationType.ENTRY);
if (pagingPredicate.getPage() > 0 && pagingPredicate.getAnchor() == null) {
pagingPredicate.previousPage();
entrySet(pagingPredicate);
pagingPredicate.nextPage();
}
}
MapQueryRequest request = new MapQueryRequest(name, predicate, IterationType.ENTRY);
QueryResultSet result = invoke(request);
Set entrySet;
if (pagingPredicate == null) {
entrySet = new HashSet<Entry<K, V>>(result.size());
} else {
entrySet = new SortedQueryResultSet(pagingPredicate.getComparator(), IterationType.ENTRY,
pagingPredicate.getPageSize());
}
for (Object data : result) {
AbstractMap.SimpleImmutableEntry<Data, Data> dataEntry = (AbstractMap.SimpleImmutableEntry<Data, Data>) data;
K key = toObject(dataEntry.getKey());
V value = toObject(dataEntry.getValue());
entrySet.add(new AbstractMap.SimpleEntry<K, V>(key, value));
}
if (pagingPredicate != null) {
PagingPredicateAccessor.setPagingPredicateAnchor(pagingPredicate, ((SortedQueryResultSet) entrySet).last());
}
return entrySet;
}
@Override
public Collection<V> values(Predicate predicate) {
PagingPredicate pagingPredicate = null;
if (predicate instanceof PagingPredicate) {
pagingPredicate = (PagingPredicate) predicate;
pagingPredicate.setIterationType(IterationType.VALUE);
if (pagingPredicate.getPage() > 0 && pagingPredicate.getAnchor() == null) {
pagingPredicate.previousPage();
values(pagingPredicate);
pagingPredicate.nextPage();
}
}
MapQueryRequest request = new MapQueryRequest(name, predicate, IterationType.VALUE);
QueryResultSet result = invoke(request);
if (pagingPredicate == null) {
final ArrayList<V> values = new ArrayList<V>(result.size());
for (Object data : result) {
V value = toObject(data);
values.add(value);
}
return values;
}
List<Entry<Object, V>> valueEntryList = new ArrayList<Entry<Object, V>>(result.size());
final Iterator<Entry> iterator = result.rawIterator();
while (iterator.hasNext()) {
final Entry entry = iterator.next();
K key = toObject(entry.getKey());
V value = toObject(entry.getValue());
valueEntryList.add(new AbstractMap.SimpleImmutableEntry<Object, V>(key, value));
}
Collections.sort(valueEntryList, SortingUtil.newComparator(pagingPredicate.getComparator(), IterationType.VALUE));
if (valueEntryList.size() > pagingPredicate.getPageSize()) {
valueEntryList = valueEntryList.subList(0, pagingPredicate.getPageSize());
}
Entry anchor = null;
if (valueEntryList.size() != 0) {
anchor = valueEntryList.get(valueEntryList.size() - 1);
}
PagingPredicateAccessor.setPagingPredicateAnchor(pagingPredicate, anchor);
final ArrayList<V> values = new ArrayList<V>(valueEntryList.size());
for (Entry<Object, V> objectVEntry : valueEntryList) {
values.add(objectVEntry.getValue());
}
return values;
}
@Override
public Set<K> localKeySet() {
throw new UnsupportedOperationException("Locality is ambiguous for client!!!");
}
@Override
public Set<K> localKeySet(Predicate predicate) {
throw new UnsupportedOperationException("Locality is ambiguous for client!!!");
}
@Override
public void addIndex(String attribute, boolean ordered) {
MapAddIndexRequest request = new MapAddIndexRequest(name, attribute, ordered);
invoke(request);
}
@Override
public LocalMapStats getLocalMapStats() {
initNearCache();
LocalMapStatsImpl localMapStats = new LocalMapStatsImpl();
if (nearCache != null) {
localMapStats.setNearCacheStats(nearCache.getNearCacheStats());
}
return localMapStats;
}
@Override
public Object executeOnKey(K key, EntryProcessor entryProcessor) {
final Data keyData = toData(key);
MapExecuteOnKeyRequest request = new MapExecuteOnKeyRequest(name, entryProcessor, keyData);
return invoke(request, keyData);
}
public void submitToKey(K key, EntryProcessor entryProcessor, final ExecutionCallback callback) {
final Data keyData = toData(key);
final MapExecuteOnKeyRequest request = new MapExecuteOnKeyRequest(name, entryProcessor, keyData);
try {
final ClientCallFuture future = (ClientCallFuture) getContext().getInvocationService().
invokeOnKeyOwner(request, keyData);
future.andThen(callback);
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
public Future submitToKey(K key, EntryProcessor entryProcessor) {
final Data keyData = toData(key);
final MapExecuteOnKeyRequest request = new MapExecuteOnKeyRequest(name, entryProcessor, keyData);
try {
final ICompletableFuture future = getContext().getInvocationService().invokeOnKeyOwner(request, keyData);
return new DelegatingFuture(future, getContext().getSerializationService());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
@Override
public Map<K, Object> executeOnEntries(EntryProcessor entryProcessor) {
MapExecuteOnAllKeysRequest request = new MapExecuteOnAllKeysRequest(name, entryProcessor);
MapEntrySet entrySet = invoke(request);
Map<K, Object> result = new HashMap<K, Object>();
for (Entry<Data, Data> dataEntry : entrySet.getEntrySet()) {
final Data keyData = dataEntry.getKey();
final Data valueData = dataEntry.getValue();
K key = toObject(keyData);
result.put(key, toObject(valueData));
}
return result;
}
@Override
public Map<K, Object> executeOnEntries(EntryProcessor entryProcessor, Predicate predicate) {
MapExecuteWithPredicateRequest request = new MapExecuteWithPredicateRequest(name, entryProcessor, predicate);
MapEntrySet entrySet = invoke(request);
Map<K, Object> result = new HashMap<K, Object>();
for (Entry<Data, Data> dataEntry : entrySet.getEntrySet()) {
final Data keyData = dataEntry.getKey();
final Data valueData = dataEntry.getValue();
K key = toObject(keyData);
result.put(key, toObject(valueData));
}
return result;
}
@Override
public Map<K, Object> executeOnKeys(Set<K> keys, EntryProcessor entryProcessor) {
Set<Data> dataKeys = new HashSet<Data>(keys.size());
for (K key : keys) {
dataKeys.add(toData(key));
}
MapExecuteOnKeysRequest request = new MapExecuteOnKeysRequest(name, entryProcessor, dataKeys);
MapEntrySet entrySet = invoke(request);
Map<K, Object> result = new HashMap<K, Object>();
for (Entry<Data, Data> dataEntry : entrySet.getEntrySet()) {
final Data keyData = dataEntry.getKey();
final Data valueData = dataEntry.getValue();
K key = toObject(keyData);
result.put(key, toObject(valueData));
}
return result;
}
@Override
public void set(K key, V value) {
set(key, value, -1, null);
}
@Override
public int size() {
MapSizeRequest request = new MapSizeRequest(name);
Integer result = invoke(request);
return result;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
MapEntrySet entrySet = new MapEntrySet();
for (Entry<? extends K, ? extends V> entry : m.entrySet()) {
final Data keyData = toData(entry.getKey());
invalidateNearCache(keyData);
entrySet.add(new AbstractMap.SimpleImmutableEntry<Data, Data>(keyData, toData(entry.getValue())));
}
MapPutAllRequest request = new MapPutAllRequest(name, entrySet);
invoke(request);
}
@Override
public void clear() {
MapClearRequest request = new MapClearRequest(name);
invoke(request);
}
@Override
protected void onDestroy() {
destroyNearCache();
}
private void destroyNearCache() {
if (nearCache != null) {
nearCache.destroy();
}
}
@Override
protected void onShutdown() {
destroyNearCache();
}
protected long getTimeInMillis(final long time, final TimeUnit timeunit) {
return timeunit != null ? timeunit.toMillis(time) : time;
}
private EventHandler<PortableEntryEvent> createHandler(final EntryListener<K, V> listener, final boolean includeValue) {
return new EventHandler<PortableEntryEvent>() {
public void handle(PortableEntryEvent event) {
V value = null;
V oldValue = null;
if (includeValue) {
value = toObject(event.getValue());
oldValue = toObject(event.getOldValue());
}
K key = toObject(event.getKey());
Member member = getContext().getClusterService().getMember(event.getUuid());
EntryEvent<K, V> entryEvent = new EntryEvent<K, V>(name, member,
event.getEventType().getType(), key, oldValue, value);
switch (event.getEventType()) {
case ADDED:
listener.entryAdded(entryEvent);
break;
case REMOVED:
listener.entryRemoved(entryEvent);
break;
case UPDATED:
listener.entryUpdated(entryEvent);
break;
case EVICTED:
listener.entryEvicted(entryEvent);
break;
default:
throw new IllegalArgumentException("Not a known event type " + event.getEventType());
}
}
@Override
public void onListenerRegister() {
}
};
}
private void invalidateNearCache(Data key) {
if (nearCache != null) {
nearCache.invalidate(key);
}
}
private void initNearCache() {
if (nearCacheInitialized.compareAndSet(false, true)) {
final NearCacheConfig nearCacheConfig = getContext().getClientConfig().getNearCacheConfig(name);
if (nearCacheConfig == null) {
return;
}
ClientNearCache<Data> nearCacheInternal = new ClientNearCache<Data>(
name, ClientNearCacheType.Map, getContext(), nearCacheConfig);
nearCache = nearCacheInternal;
}
}
@Override
public String toString() {
return "IMap{" + "name='" + getName() + '\'' + '}';
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientMapProxy.java
|
5,302 |
return new Comparator<Terms.Bucket>() {
@Override
public int compare(Terms.Bucket o1, Terms.Bucket o2) {
double v1 = ((MetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o1).bucketOrd);
double v2 = ((MetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o2).bucketOrd);
// some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to
// the bottom
if (v1 == Double.NaN) {
return asc ? 1 : -1;
}
return asc ? Double.compare(v1, v2) : Double.compare(v2, v1);
}
};
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_terms_InternalOrder.java
|
2,765 |
@ChannelHandler.Sharable
public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
private final NettyHttpServerTransport serverTransport;
public HttpRequestHandler(NettyHttpServerTransport serverTransport) {
this.serverTransport = serverTransport;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
HttpRequest request = (HttpRequest) e.getMessage();
// the netty HTTP handling always copy over the buffer to its own buffer, either in NioWorker internally
// when reading, or using a cumalation buffer
serverTransport.dispatchRequest(new NettyHttpRequest(request, e.getChannel()), new NettyHttpChannel(serverTransport, e.getChannel(), request));
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
serverTransport.exceptionCaught(ctx, e);
}
}
| 1no label
|
src_main_java_org_elasticsearch_http_netty_HttpRequestHandler.java
|
2,667 |
public class DefaultPortableReader implements PortableReader {
protected final ClassDefinition cd;
private final PortableSerializer serializer;
private final BufferObjectDataInput in;
private final int finalPosition;
private final int offset;
private boolean raw;
public DefaultPortableReader(PortableSerializer serializer, BufferObjectDataInput in, ClassDefinition cd) {
this.in = in;
this.serializer = serializer;
this.cd = cd;
try {
// final position after portable is read
finalPosition = in.readInt();
} catch (IOException e) {
throw new HazelcastSerializationException(e);
}
this.offset = in.position();
}
public int getVersion() {
return cd.getVersion();
}
public boolean hasField(String fieldName) {
return cd.hasField(fieldName);
}
public Set<String> getFieldNames() {
return cd.getFieldNames();
}
public FieldType getFieldType(String fieldName) {
return cd.getFieldType(fieldName);
}
public int getFieldClassId(String fieldName) {
return cd.getFieldClassId(fieldName);
}
public int readInt(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readInt(pos);
}
public long readLong(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readLong(pos);
}
public String readUTF(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return in.readUTF();
} finally {
in.position(currentPos);
}
}
public boolean readBoolean(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readBoolean(pos);
}
public byte readByte(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readByte(pos);
}
public char readChar(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readChar(pos);
}
public double readDouble(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readDouble(pos);
}
public float readFloat(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readFloat(pos);
}
public short readShort(String fieldName) throws IOException {
int pos = getPosition(fieldName);
return in.readShort(pos);
}
public byte[] readByteArray(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return IOUtil.readByteArray(in);
} finally {
in.position(currentPos);
}
}
public char[] readCharArray(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return in.readCharArray();
} finally {
in.position(currentPos);
}
}
public int[] readIntArray(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return in.readIntArray();
} finally {
in.position(currentPos);
}
}
public long[] readLongArray(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return in.readLongArray();
} finally {
in.position(currentPos);
}
}
public double[] readDoubleArray(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return in.readDoubleArray();
} finally {
in.position(currentPos);
}
}
public float[] readFloatArray(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return in.readFloatArray();
} finally {
in.position(currentPos);
}
}
public short[] readShortArray(String fieldName) throws IOException {
final int currentPos = in.position();
try {
int pos = getPosition(fieldName);
in.position(pos);
return in.readShortArray();
} finally {
in.position(currentPos);
}
}
public Portable readPortable(String fieldName) throws IOException {
FieldDefinition fd = cd.get(fieldName);
if (fd == null) {
throw throwUnknownFieldException(fieldName);
}
final int currentPos = in.position();
try {
int pos = getPosition(fd);
in.position(pos);
final boolean NULL = in.readBoolean();
if (!NULL) {
final PortableContextAwareInputStream ctxIn = (PortableContextAwareInputStream) in;
try {
ctxIn.setFactoryId(fd.getFactoryId());
ctxIn.setClassId(fd.getClassId());
return serializer.readAndInitialize(in);
} finally {
ctxIn.setFactoryId(cd.getFactoryId());
ctxIn.setClassId(cd.getClassId());
}
}
return null;
} finally {
in.position(currentPos);
}
}
private HazelcastSerializationException throwUnknownFieldException(String fieldName) {
return new HazelcastSerializationException("Unknown field name: '" + fieldName
+ "' for ClassDefinition {id: " + cd.getClassId() + ", version: " + cd.getVersion() + "}");
}
public Portable[] readPortableArray(String fieldName) throws IOException {
FieldDefinition fd = cd.get(fieldName);
if (fd == null) {
throw throwUnknownFieldException(fieldName);
}
final int currentPos = in.position();
try {
int pos = getPosition(fd);
in.position(pos);
final int len = in.readInt();
final Portable[] portables = new Portable[len];
if (len > 0) {
final int offset = in.position();
final PortableContextAwareInputStream ctxIn = (PortableContextAwareInputStream) in;
try {
ctxIn.setFactoryId(fd.getFactoryId());
ctxIn.setClassId(fd.getClassId());
for (int i = 0; i < len; i++) {
final int start = in.readInt(offset + i * 4);
in.position(start);
portables[i] = serializer.readAndInitialize(in);
}
} finally {
ctxIn.setFactoryId(cd.getFactoryId());
ctxIn.setClassId(cd.getClassId());
}
}
return portables;
} finally {
in.position(currentPos);
}
}
protected int getPosition(String fieldName) throws IOException {
if (raw) {
throw new HazelcastSerializationException("Cannot read Portable fields after getRawDataInput() is called!");
}
FieldDefinition fd = cd.get(fieldName);
if (fd == null) {
throw throwUnknownFieldException(fieldName);
}
return getPosition(fd);
}
protected int getPosition(FieldDefinition fd) throws IOException {
return in.readInt(offset + fd.getIndex() * 4);
}
public ObjectDataInput getRawDataInput() throws IOException {
if (!raw) {
int pos = in.readInt(offset + cd.getFieldCount() * 4);
in.position(pos);
}
raw = true;
return in;
}
void end() throws IOException {
in.position(finalPosition);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_serialization_DefaultPortableReader.java
|
1,015 |
@SuppressWarnings({"unchecked", "rawtypes"})
public class OChainedIndexProxy<T> implements OIndex<T> {
private final OIndex<T> index;
private final List<OIndex<?>> indexChain;
private final OIndex<?> lastIndex;
private final boolean isOneValue;
/**
* Create proxies that support maximum number of different operations. In case when several different indexes which support
* different operations (e.g. indexes of {@code UNIQUE} and {@code FULLTEXT} types) are possible, the creates the only one index
* of each type.
*
* @param index - the index which proxies created for
* @param longChain - property chain from the query, which should be evaluated
* @param database - current database instance
* @return proxies needed to process query.
*/
public static <T> Collection<OChainedIndexProxy<T>> createdProxy(OIndex<T> index, OSQLFilterItemField.FieldChain longChain,
ODatabaseComplex<?> database) {
Collection<OChainedIndexProxy<T>> proxies = new ArrayList<OChainedIndexProxy<T>>();
for (List<OIndex<?>> indexChain : getIndexesForChain(index, longChain, database)) {
proxies.add(new OChainedIndexProxy<T>(index, indexChain));
}
return proxies;
}
private OChainedIndexProxy(OIndex<T> index, List<OIndex<?>> indexChain) {
this.index = index;
this.indexChain = Collections.unmodifiableList(indexChain);
lastIndex = indexChain.get(indexChain.size() - 1);
isOneValue = isAllOneValue(indexChain);
}
private boolean isAllOneValue(List<OIndex<?>> indexChain) {
for (OIndex<?> oIndex : indexChain) {
if (!(oIndex.getInternal() instanceof OIndexOneValue))
return false;
}
return true;
}
public String getDatabaseName() {
return index.getDatabaseName();
}
public List<String> getIndexNames() {
final ArrayList<String> names = new ArrayList<String>(indexChain.size());
for (OIndex<?> oIndex : indexChain) {
names.add(oIndex.getName());
}
return names;
}
@Override
public String getName() {
final StringBuilder res = new StringBuilder("IndexChain{");
final List<String> indexNames = getIndexNames();
for (int i = 0; i < indexNames.size(); i++) {
String indexName = indexNames.get(i);
if (i > 0)
res.append(", ");
res.append(indexName);
}
res.append("}");
return res.toString();
}
/**
* {@inheritDoc}
*/
@Override
public T get(Object iKey) {
final Object lastIndexResult = lastIndex.get(iKey);
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
applyTailIndexes(lastIndexResult, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
});
if (isOneValue)
return (T) (result.isEmpty() ? null : result.iterator().next());
return (T) result;
}
public long count(Object iKey) {
return index.count(iKey);
}
/**
* {@inheritDoc}
*/
@Override
public Collection<OIdentifiable> getValuesBetween(Object iRangeFrom, Object iRangeTo) {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
final Object lastIndexValuesBetween = lastIndex.getValuesBetween(iRangeFrom, iRangeTo);
applyTailIndexes(lastIndexValuesBetween, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
});
return result;
}
/**
* {@inheritDoc}
*/
@Override
public Collection<OIdentifiable> getValuesBetween(Object iRangeFrom, boolean iFromInclusive, Object iRangeTo, boolean iToInclusive) {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
final Object lastIndexValuesBetween = lastIndex.getValuesBetween(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive);
applyTailIndexes(lastIndexValuesBetween, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
});
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void getValuesBetween(Object iRangeFrom, boolean iFromInclusive, Object iRangeTo, boolean iToInclusive,
IndexValuesResultListener resultListener) {
final Object result = lastIndex.getValuesBetween(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive);
applyTailIndexes(result, resultListener);
}
@Override
public long count(final Object iRangeFrom, final boolean iFromInclusive, final Object iRangeTo, final boolean iToInclusive,
final int maxValuesToFetch) {
return lastIndex.count(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive, maxValuesToFetch);
}
/**
* {@inheritDoc}
*/
@Override
public Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive) {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
final Object lastIndexValuesMajor = lastIndex.getValuesMajor(fromKey, isInclusive);
applyTailIndexes(lastIndexValuesMajor, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
});
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void getValuesMajor(Object fromKey, boolean isInclusive, IndexValuesResultListener resultListener) {
final Object result = lastIndex.getValuesMajor(fromKey, isInclusive);
applyTailIndexes(result, resultListener);
}
/**
* {@inheritDoc}
*/
@Override
public Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive) {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
final Object lastIndexValuesMinor = lastIndex.getValuesMinor(toKey, isInclusive);
applyTailIndexes(lastIndexValuesMinor, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
});
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void getValuesMinor(Object toKey, boolean isInclusive, IndexValuesResultListener resultListener) {
final Object result = lastIndex.getValuesMinor(toKey, isInclusive);
applyTailIndexes(result, resultListener);
}
/**
* {@inheritDoc}
*/
@Override
public Collection<OIdentifiable> getValues(Collection<?> iKeys) {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
final Object lastIndexResult = lastIndex.getValues(iKeys);
applyTailIndexes(lastIndexResult, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
});
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void getValues(Collection<?> iKeys, IndexValuesResultListener resultListener) {
final Object result = lastIndex.getValues(iKeys);
applyTailIndexes(result, resultListener);
}
/**
* Returns internal index of last chain index, because proxy applicable to all operations that last index applicable.
*/
public OIndexInternal<T> getInternal() {
return (OIndexInternal<T>) lastIndex.getInternal();
}
/**
* {@inheritDoc}
*/
public OIndexDefinition getDefinition() {
return lastIndex.getDefinition();
}
private void applyTailIndexes(final Object result, IndexValuesResultListener resultListener) {
final OIndex<?> previousIndex = indexChain.get(indexChain.size() - 2);
Set<Comparable> currentKeys = prepareKeys(previousIndex, result);
for (int j = indexChain.size() - 2; j > 0; j--) {
Set<Comparable> newKeys = new TreeSet<Comparable>();
final OIndex<?> currentIndex = indexChain.get(j);
for (Comparable currentKey : currentKeys) {
final Object currentResult = currentIndex.get(currentKey);
final Set<Comparable> preparedKeys;
preparedKeys = prepareKeys(indexChain.get(j - 1), currentResult);
newKeys.addAll(preparedKeys);
}
updateStatistic(currentIndex);
currentKeys = newKeys;
}
applyMainIndex(currentKeys, resultListener);
}
private Set<Comparable> convertResult(Object result, Class<?> targetType) {
final Set<Comparable> newKeys;
if (result instanceof Set) {
newKeys = new TreeSet<Comparable>();
for (Object o : ((Set) result)) {
newKeys.add((Comparable) OType.convert(o, targetType));
}
return newKeys;
} else {
return Collections.singleton((Comparable) OType.convert(result, targetType));
}
}
/**
* Make type conversion of keys for specific index.
*
* @param index - index for which keys prepared for.
* @param keys - which should be prepared.
* @return keys converted to necessary type.
*/
private Set<Comparable> prepareKeys(OIndex<?> index, Object keys) {
final Class<?> targetType = index.getKeyTypes()[0].getDefaultJavaType();
return convertResult(keys, targetType);
}
private void applyMainIndex(Iterable<Comparable> currentKeys, IndexValuesResultListener resultListener) {
keysLoop:
for (Comparable key : currentKeys) {
final T result = index.get(index.getDefinition().createValue(key));
if (result instanceof Set) {
for (T o : (Set<T>) result) {
if (!resultListener.addResult((OIdentifiable) o))
break keysLoop;
}
} else {
if (!resultListener.addResult((OIdentifiable) result))
break;
}
}
updateStatistic(index);
}
private static Iterable<List<OIndex<?>>> getIndexesForChain(OIndex<?> index, OSQLFilterItemField.FieldChain fieldChain,
ODatabaseComplex<?> database) {
List<OIndex<?>> baseIndexes = prepareBaseIndexes(index, fieldChain, database);
Collection<OIndex<?>> lastIndexes = prepareLastIndexVariants(index, fieldChain, database);
Collection<List<OIndex<?>>> result = new ArrayList<List<OIndex<?>>>();
for (OIndex<?> lastIndex : lastIndexes) {
final List<OIndex<?>> indexes = new ArrayList<OIndex<?>>(fieldChain.getItemCount());
indexes.addAll(baseIndexes);
indexes.add(lastIndex);
result.add(indexes);
}
return result;
}
private static Collection<OIndex<?>> prepareLastIndexVariants(OIndex<?> index, OSQLFilterItemField.FieldChain fieldChain,
ODatabaseComplex<?> database) {
OClass oClass = database.getMetadata().getSchema().getClass(index.getDefinition().getClassName());
for (int i = 0; i < fieldChain.getItemCount() - 1; i++) {
oClass = oClass.getProperty(fieldChain.getItemName(i)).getLinkedClass();
}
final Set<OIndex<?>> involvedIndexes = new TreeSet<OIndex<?>>(new Comparator<OIndex<?>>() {
public int compare(OIndex<?> o1, OIndex<?> o2) {
return o1.getDefinition().getParamCount() - o2.getDefinition().getParamCount();
}
});
involvedIndexes.addAll(oClass.getInvolvedIndexes(fieldChain.getItemName(fieldChain.getItemCount() - 1)));
final Collection<Class<? extends OIndex>> indexTypes = new HashSet<Class<? extends OIndex>>(3);
final Collection<OIndex<?>> result = new ArrayList<OIndex<?>>();
for (OIndex<?> involvedIndex : involvedIndexes) {
if (!indexTypes.contains(involvedIndex.getInternal().getClass())) {
result.add(involvedIndex);
indexTypes.add(involvedIndex.getInternal().getClass());
}
}
return result;
}
private static List<OIndex<?>> prepareBaseIndexes(OIndex<?> index, OSQLFilterItemField.FieldChain fieldChain,
ODatabaseComplex<?> database) {
List<OIndex<?>> result = new ArrayList<OIndex<?>>(fieldChain.getItemCount() - 1);
result.add(index);
OClass oClass = database.getMetadata().getSchema().getClass(index.getDefinition().getClassName());
oClass = oClass.getProperty(fieldChain.getItemName(0)).getLinkedClass();
for (int i = 1; i < fieldChain.getItemCount() - 1; i++) {
final Set<OIndex<?>> involvedIndexes = oClass.getInvolvedIndexes(fieldChain.getItemName(i));
final OIndex<?> bestIndex = findBestIndex(involvedIndexes);
result.add(bestIndex);
oClass = oClass.getProperty(fieldChain.getItemName(i)).getLinkedClass();
}
return result;
}
private static OIndex<?> findBestIndex(Iterable<OIndex<?>> involvedIndexes) {
OIndex<?> bestIndex = null;
for (OIndex<?> index : involvedIndexes) {
bestIndex = index;
OIndexInternal<?> bestInternalIndex = index.getInternal();
if (bestInternalIndex instanceof OIndexUnique || bestInternalIndex instanceof OIndexNotUnique) {
return index;
}
}
return bestIndex;
}
/**
* Register statistic information about usage of index in {@link OProfiler}.
*
* @param index which usage is registering.
*/
private void updateStatistic(OIndex<?> index) {
final OProfilerMBean profiler = Orient.instance().getProfiler();
if (profiler.isRecording()) {
Orient.instance().getProfiler()
.updateCounter(profiler.getDatabaseMetric(index.getDatabaseName(), "query.indexUsed"), "Used index in query", +1);
final int paramCount = index.getDefinition().getParamCount();
if (paramCount > 1) {
final String profiler_prefix = profiler.getDatabaseMetric(index.getDatabaseName(), "query.compositeIndexUsed");
profiler.updateCounter(profiler_prefix, "Used composite index in query", +1);
profiler.updateCounter(profiler_prefix + "." + paramCount, "Used composite index in query with " + paramCount + " params",
+1);
}
}
}
public void checkEntry(final OIdentifiable iRecord, final Object iKey) {
index.checkEntry(iRecord, iKey);
}
//
// Following methods are not allowed for proxy.
//
@Override
public OIndex<T> create(String name, OIndexDefinition indexDefinition, String clusterIndexName, Set<String> clustersToIndex,
boolean rebuild, OProgressListener progressListener) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public void getEntriesMajor(Object fromKey, boolean isInclusive, IndexEntriesResultListener resultListener) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public void getEntriesMinor(Object toKey, boolean isInclusive, IndexEntriesResultListener resultListener) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public Collection<ODocument> getEntriesBetween(Object iRangeFrom, Object iRangeTo, boolean iInclusive) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public void getEntriesBetween(Object iRangeFrom, Object iRangeTo, boolean iInclusive, IndexEntriesResultListener resultListener) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public Collection<ODocument> getEntriesBetween(Object iRangeFrom, Object iRangeTo) {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public Collection<ODocument> getEntries(Collection<?> iKeys) {
throw new UnsupportedOperationException("Not allowed operation");
}
public void getEntries(Collection<?> iKeys, IndexEntriesResultListener resultListener) {
throw new UnsupportedOperationException("Not allowed operation");
}
public boolean contains(Object iKey) {
throw new UnsupportedOperationException("Not allowed operation");
}
public void unload() {
throw new UnsupportedOperationException("Not allowed operation");
}
public OType[] getKeyTypes() {
throw new UnsupportedOperationException("Not allowed operation");
}
public Iterator<Map.Entry<Object, T>> iterator() {
throw new UnsupportedOperationException("Not allowed operation");
}
public Iterator<Map.Entry<Object, T>> inverseIterator() {
throw new UnsupportedOperationException("Not allowed operation");
}
public Iterator<OIdentifiable> valuesIterator() {
throw new UnsupportedOperationException("Not allowed operation");
}
public Iterator<OIdentifiable> valuesInverseIterator() {
throw new UnsupportedOperationException("Not allowed operation");
}
public OIndex<T> put(Object iKey, OIdentifiable iValue) {
throw new UnsupportedOperationException("Not allowed operation");
}
public boolean remove(Object key) {
throw new UnsupportedOperationException("Not allowed operation");
}
public boolean remove(Object iKey, OIdentifiable iRID) {
throw new UnsupportedOperationException("Not allowed operation");
}
public int remove(OIdentifiable iRID) {
throw new UnsupportedOperationException("Not allowed operation");
}
public OIndex<T> clear() {
throw new UnsupportedOperationException("Not allowed operation");
}
public Iterable<Object> keys() {
throw new UnsupportedOperationException("Not allowed operation");
}
public long getSize() {
throw new UnsupportedOperationException("Not allowed operation");
}
public long getKeySize() {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public void flush() {
throw new UnsupportedOperationException("Not allowed operation");
}
public OIndex<T> delete() {
throw new UnsupportedOperationException("Not allowed operation");
}
@Override
public void deleteWithoutIndexLoad(String indexName) {
throw new UnsupportedOperationException("Not allowed operation");
}
public String getType() {
throw new UnsupportedOperationException("Not allowed operation");
}
public boolean isAutomatic() {
throw new UnsupportedOperationException("Not allowed operation");
}
public long rebuild() {
throw new UnsupportedOperationException("Not allowed operation");
}
public long rebuild(OProgressListener iProgressListener) {
throw new UnsupportedOperationException("Not allowed operation");
}
public ODocument getConfiguration() {
throw new UnsupportedOperationException("Not allowed operation");
}
public ORID getIdentity() {
throw new UnsupportedOperationException("Not allowed operation");
}
public void commit(ODocument iDocument) {
throw new UnsupportedOperationException("Not allowed operation");
}
public Set<String> getClusters() {
throw new UnsupportedOperationException("Not allowed operation");
}
public boolean supportsOrderedIterations() {
return false;
}
@Override
public boolean isRebuiding() {
return false;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_OChainedIndexProxy.java
|
216 |
public class ClientOutSelectorImpl extends ClientAbstractIOSelector {
public ClientOutSelectorImpl(ThreadGroup threadGroup) {
super(threadGroup, "OutSelector");
}
@Override
protected void handleSelectionKey(SelectionKey sk) {
if (sk.isValid() && sk.isWritable()) {
sk.interestOps(sk.interestOps() & ~SelectionKey.OP_WRITE);
final SelectionHandler handler = (SelectionHandler) sk.attachment();
handler.handle();
}
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientOutSelectorImpl.java
|
5,057 |
public class SearchContextMissingException extends ElasticsearchException {
private final long id;
public SearchContextMissingException(long id) {
super("No search context found for id [" + id + "]");
this.id = id;
}
public long id() {
return this.id;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_SearchContextMissingException.java
|
1,972 |
@Entity
@EntityListeners(value = { TemporalTimestampListener.class })
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CUSTOMER_ADDRESS", uniqueConstraints = @UniqueConstraint(name = "CSTMR_ADDR_UNIQUE_CNSTRNT", columnNames = { "CUSTOMER_ID", "ADDRESS_NAME" }))
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "address.firstName", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.EXCLUDED, booleanOverrideValue = true)),
@AdminPresentationMergeOverride(name = "address.lastName", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.EXCLUDED, booleanOverrideValue = true)),
@AdminPresentationMergeOverride(name = "address.addressLine1", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.PROMINENT, booleanOverrideValue = true))
}
)
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE)
public class CustomerAddressImpl implements CustomerAddress {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "CustomerAddressId")
@GenericGenerator(
name="CustomerAddressId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="CustomerAddressImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.profile.core.domain.CustomerAddressImpl")
}
)
@Column(name = "CUSTOMER_ADDRESS_ID")
protected Long id;
@Column(name = "ADDRESS_NAME")
@AdminPresentation(friendlyName = "CustomerAddressImpl_Address_Name", order=1,
group = "CustomerAddressImpl_Identification", groupOrder = 1, prominent = true, gridOrder = 1)
protected String addressName;
@ManyToOne(cascade = {CascadeType.PERSIST, CascadeType.MERGE}, targetEntity = CustomerImpl.class, optional=false)
@JoinColumn(name = "CUSTOMER_ID")
@AdminPresentation(excluded = true, visibility = VisibilityEnum.HIDDEN_ALL)
protected Customer customer;
@ManyToOne(cascade = CascadeType.ALL, targetEntity = AddressImpl.class, optional=false)
@JoinColumn(name = "ADDRESS_ID")
@Index(name="CUSTOMERADDRESS_ADDRESS_INDEX", columnNames={"ADDRESS_ID"})
protected Address address;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getAddressName() {
return addressName;
}
@Override
public void setAddressName(String addressName) {
this.addressName = addressName;
}
@Override
public Customer getCustomer() {
return customer;
}
@Override
public void setCustomer(Customer customer) {
this.customer = customer;
}
@Override
public Address getAddress() {
return address;
}
@Override
public void setAddress(Address address) {
this.address = address;
}
@Override
public String toString() {
return (addressName == null)
? address.getFirstName() + " - " + address.getAddressLine1()
: addressName;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((address == null) ? 0 : address.hashCode());
result = prime * result + ((addressName == null) ? 0 : addressName.hashCode());
result = prime * result + ((customer == null) ? 0 : customer.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
CustomerAddressImpl other = (CustomerAddressImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (address == null) {
if (other.address != null) {
return false;
}
} else if (!address.equals(other.address)) {
return false;
}
if (addressName == null) {
if (other.addressName != null) {
return false;
}
} else if (!addressName.equals(other.addressName)) {
return false;
}
if (customer == null) {
if (other.customer != null) {
return false;
}
} else if (!customer.equals(other.customer)) {
return false;
}
return true;
}
}
| 1no label
|
core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_domain_CustomerAddressImpl.java
|
54 |
@SuppressWarnings("serial")
static final class ForEachKeyTask<K,V>
extends BulkTask<K,V,Void> {
final Action<? super K> action;
ForEachKeyTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
Action<? super K> action) {
super(p, b, i, f, t);
this.action = action;
}
public final void compute() {
final Action<? super K> action;
if ((action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachKeyTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
action).fork();
}
for (Node<K,V> p; (p = advance()) != null;)
action.apply(p.key);
propagateCompletion();
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
294 |
public interface ShardOperationFailedException extends Streamable, Serializable {
/**
* The index the operation failed on. Might return <tt>null</tt> if it can't be derived.
*/
String index();
/**
* The index the operation failed on. Might return <tt>-1</tt> if it can't be derived.
*/
int shardId();
/**
* The reason of the failure.
*/
String reason();
/**
* The status of the failure.
*/
RestStatus status();
}
| 0true
|
src_main_java_org_elasticsearch_action_ShardOperationFailedException.java
|
831 |
public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, SearchRequestBuilder> {
private SearchSourceBuilder sourceBuilder;
public SearchRequestBuilder(Client client) {
super((InternalClient) client, new SearchRequest());
}
/**
* Sets the indices the search will be executed on.
*/
public SearchRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
*/
public SearchRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
/**
* The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
*/
public SearchRequestBuilder setSearchType(SearchType searchType) {
request.searchType(searchType);
return this;
}
/**
* The a string representation search type to execute, defaults to {@link SearchType#DEFAULT}. Can be
* one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
* "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
*/
public SearchRequestBuilder setSearchType(String searchType) throws ElasticsearchIllegalArgumentException {
request.searchType(searchType);
return this;
}
/**
* If set, will enable scrolling of the search request.
*/
public SearchRequestBuilder setScroll(Scroll scroll) {
request.scroll(scroll);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public SearchRequestBuilder setScroll(TimeValue keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public SearchRequestBuilder setScroll(String keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* An optional timeout to control how long search is allowed to take.
*/
public SearchRequestBuilder setTimeout(TimeValue timeout) {
sourceBuilder().timeout(timeout);
return this;
}
/**
* An optional timeout to control how long search is allowed to take.
*/
public SearchRequestBuilder setTimeout(String timeout) {
sourceBuilder().timeout(timeout);
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public SearchRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public SearchRequestBuilder setRouting(String... routing) {
request.routing(routing);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public SearchRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* Controls the the search operation threading model.
*/
public SearchRequestBuilder setOperationThreading(SearchOperationThreading operationThreading) {
request.operationThreading(operationThreading);
return this;
}
/**
* Sets the string representation of the operation threading model. Can be one of
* "no_threads", "single_thread" and "thread_per_shard".
*/
public SearchRequestBuilder setOperationThreading(String operationThreading) {
request.operationThreading(operationThreading);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
public SearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return this;
}
/**
* Constructs a new search source builder with a search query.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public SearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
sourceBuilder().query(queryBuilder);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(String query) {
sourceBuilder().query(query);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(BytesReference queryBinary) {
sourceBuilder().query(queryBinary);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(byte[] queryBinary) {
sourceBuilder().query(queryBinary);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(byte[] queryBinary, int queryBinaryOffset, int queryBinaryLength) {
sourceBuilder().query(queryBinary, queryBinaryOffset, queryBinaryLength);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(XContentBuilder query) {
sourceBuilder().query(query);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(Map query) {
sourceBuilder().query(query);
return this;
}
/**
* Sets a filter that will be executed after the query has been executed and only has affect on the search hits
* (not aggregations or facets). This filter is always executed as last filtering mechanism.
*/
public SearchRequestBuilder setPostFilter(FilterBuilder postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(String postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(BytesReference postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(byte[] postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(byte[] postFilter, int postFilterOffset, int postFilterLength) {
sourceBuilder().postFilter(postFilter, postFilterOffset, postFilterLength);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(XContentBuilder postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(Map postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets the minimum score below which docs will be filtered out.
*/
public SearchRequestBuilder setMinScore(float minScore) {
sourceBuilder().minScore(minScore);
return this;
}
/**
* From index to start the search from. Defaults to <tt>0</tt>.
*/
public SearchRequestBuilder setFrom(int from) {
sourceBuilder().from(from);
return this;
}
/**
* The number of search hits to return. Defaults to <tt>10</tt>.
*/
public SearchRequestBuilder setSize(int size) {
sourceBuilder().size(size);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with an
* explanation of the hit (ranking).
*/
public SearchRequestBuilder setExplain(boolean explain) {
sourceBuilder().explain(explain);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with its
* version.
*/
public SearchRequestBuilder setVersion(boolean version) {
sourceBuilder().version(version);
return this;
}
/**
* Sets the boost a specific index will receive when the query is executeed against it.
*
* @param index The index to apply the boost against
* @param indexBoost The boost to apply to the index
*/
public SearchRequestBuilder addIndexBoost(String index, float indexBoost) {
sourceBuilder().indexBoost(index, indexBoost);
return this;
}
/**
* The stats groups this request will be aggregated under.
*/
public SearchRequestBuilder setStats(String... statsGroups) {
sourceBuilder().stats(statsGroups);
return this;
}
/**
* Sets no fields to be loaded, resulting in only id and type to be returned per field.
*/
public SearchRequestBuilder setNoFields() {
sourceBuilder().noFields();
return this;
}
/**
* Indicates whether the response should contain the stored _source for every hit
*
* @param fetch
* @return
*/
public SearchRequestBuilder setFetchSource(boolean fetch) {
sourceBuilder().fetchSource(fetch);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include An optional include (optionally wildcarded) pattern to filter the returned _source
* @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
*/
public SearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
sourceBuilder().fetchSource(include, exclude);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
* @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
*/
public SearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
sourceBuilder().fetchSource(includes, excludes);
return this;
}
/**
* Adds a field to load and return (note, it must be stored) as part of the search request.
* If none are specified, the source of the document will be return.
*/
public SearchRequestBuilder addField(String field) {
sourceBuilder().field(field);
return this;
}
/**
* Adds a field data based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The field to get from the field data cache
*/
public SearchRequestBuilder addFieldDataField(String name) {
sourceBuilder().fieldDataField(name);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param script The script to use
*/
public SearchRequestBuilder addScriptField(String name, String script) {
sourceBuilder().scriptField(name, script);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param script The script to use
* @param params Parameters that the script can use.
*/
public SearchRequestBuilder addScriptField(String name, String script, Map<String, Object> params) {
sourceBuilder().scriptField(name, script, params);
return this;
}
/**
* Adds a partial field based on _source, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @deprecated since 1.0.0
* use {@link org.elasticsearch.action.search.SearchRequestBuilder#setFetchSource(String, String)} instead
*
* @param name The name of the field
* @param include An optional include (optionally wildcarded) pattern from _source
* @param exclude An optional exclude (optionally wildcarded) pattern from _source
*/
@Deprecated
public SearchRequestBuilder addPartialField(String name, @Nullable String include, @Nullable String exclude) {
sourceBuilder().partialField(name, include, exclude);
return this;
}
/**
* Adds a partial field based on _source, with an "includes" and/or "excludes set which can include simple wildcard
* elements.
*
* @deprecated since 1.0.0
* use {@link org.elasticsearch.action.search.SearchRequestBuilder#setFetchSource(String[], String[])} instead
*
* @param name The name of the field
* @param includes An optional list of includes (optionally wildcarded) patterns from _source
* @param excludes An optional list of excludes (optionally wildcarded) patterns from _source
*/
@Deprecated
public SearchRequestBuilder addPartialField(String name, @Nullable String[] includes, @Nullable String[] excludes) {
sourceBuilder().partialField(name, includes, excludes);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param lang The language of the script
* @param script The script to use
* @param params Parameters that the script can use (can be <tt>null</tt>).
*/
public SearchRequestBuilder addScriptField(String name, String lang, String script, Map<String, Object> params) {
sourceBuilder().scriptField(name, lang, script, params);
return this;
}
/**
* Adds a sort against the given field name and the sort ordering.
*
* @param field The name of the field
* @param order The sort ordering
*/
public SearchRequestBuilder addSort(String field, SortOrder order) {
sourceBuilder().sort(field, order);
return this;
}
/**
* Adds a generic sort builder.
*
* @see org.elasticsearch.search.sort.SortBuilders
*/
public SearchRequestBuilder addSort(SortBuilder sort) {
sourceBuilder().sort(sort);
return this;
}
/**
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
* <tt>false</tt>.
*/
public SearchRequestBuilder setTrackScores(boolean trackScores) {
sourceBuilder().trackScores(trackScores);
return this;
}
/**
* Adds the fields to load and return as part of the search request. If none are specified,
* the source of the document will be returned.
*/
public SearchRequestBuilder addFields(String... fields) {
sourceBuilder().fields(fields);
return this;
}
/**
* Adds a facet to the search operation.
*/
public SearchRequestBuilder addFacet(FacetBuilder facet) {
sourceBuilder().facet(facet);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(BytesReference facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(byte[] facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(byte[] facets, int facetsOffset, int facetsLength) {
sourceBuilder().facets(facets, facetsOffset, facetsLength);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(XContentBuilder facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(Map facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Adds an get to the search operation.
*/
public SearchRequestBuilder addAggregation(AbstractAggregationBuilder aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(BytesReference aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(byte[] aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(byte[] aggregations, int aggregationsOffset, int aggregationsLength) {
sourceBuilder().facets(aggregations, aggregationsOffset, aggregationsLength);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(XContentBuilder aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(Map aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Adds a field to be highlighted with default fragment size of 100 characters, and
* default number of fragments of 5.
*
* @param name The field to highlight
*/
public SearchRequestBuilder addHighlightedField(String name) {
highlightBuilder().field(name);
return this;
}
/**
* Adds a field to be highlighted with a provided fragment size (in characters), and
* default number of fragments of 5.
*
* @param name The field to highlight
* @param fragmentSize The size of a fragment in characters
*/
public SearchRequestBuilder addHighlightedField(String name, int fragmentSize) {
highlightBuilder().field(name, fragmentSize);
return this;
}
/**
* Adds a field to be highlighted with a provided fragment size (in characters), and
* a provided (maximum) number of fragments.
*
* @param name The field to highlight
* @param fragmentSize The size of a fragment in characters
* @param numberOfFragments The (maximum) number of fragments
*/
public SearchRequestBuilder addHighlightedField(String name, int fragmentSize, int numberOfFragments) {
highlightBuilder().field(name, fragmentSize, numberOfFragments);
return this;
}
/**
* Adds a field to be highlighted with a provided fragment size (in characters),
* a provided (maximum) number of fragments and an offset for the highlight.
*
* @param name The field to highlight
* @param fragmentSize The size of a fragment in characters
* @param numberOfFragments The (maximum) number of fragments
*/
public SearchRequestBuilder addHighlightedField(String name, int fragmentSize, int numberOfFragments,
int fragmentOffset) {
highlightBuilder().field(name, fragmentSize, numberOfFragments, fragmentOffset);
return this;
}
/**
* Adds a highlighted field.
*/
public SearchRequestBuilder addHighlightedField(HighlightBuilder.Field field) {
highlightBuilder().field(field);
return this;
}
/**
* Set a tag scheme that encapsulates a built in pre and post tags. The allows schemes
* are <tt>styled</tt> and <tt>default</tt>.
*
* @param schemaName The tag scheme name
*/
public SearchRequestBuilder setHighlighterTagsSchema(String schemaName) {
highlightBuilder().tagsSchema(schemaName);
return this;
}
/**
* Explicitly set the pre tags that will be used for highlighting.
*/
public SearchRequestBuilder setHighlighterPreTags(String... preTags) {
highlightBuilder().preTags(preTags);
return this;
}
/**
* Explicitly set the post tags that will be used for highlighting.
*/
public SearchRequestBuilder setHighlighterPostTags(String... postTags) {
highlightBuilder().postTags(postTags);
return this;
}
/**
* The order of fragments per field. By default, ordered by the order in the
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
* by score of the fragments.
*/
public SearchRequestBuilder setHighlighterOrder(String order) {
highlightBuilder().order(order);
return this;
}
/**
* The encoder to set for highlighting
*/
public SearchRequestBuilder setHighlighterEncoder(String encoder) {
highlightBuilder().encoder(encoder);
return this;
}
/**
* Sets a query to be used for highlighting all fields instead of the search query.
*/
public SearchRequestBuilder setHighlighterQuery(QueryBuilder highlightQuery) {
highlightBuilder().highlightQuery(highlightQuery);
return this;
}
public SearchRequestBuilder setHighlighterRequireFieldMatch(boolean requireFieldMatch) {
highlightBuilder().requireFieldMatch(requireFieldMatch);
return this;
}
/**
* The highlighter type to use.
*/
public SearchRequestBuilder setHighlighterType(String type) {
highlightBuilder().highlighterType(type);
return this;
}
/**
* Sets the size of the fragment to return from the beginning of the field if there are no matches to
* highlight and the field doesn't also define noMatchSize.
* @param noMatchSize integer to set or null to leave out of request. default is null.
* @return this builder for chaining
*/
public SearchRequestBuilder setHighlighterNoMatchSize(Integer noMatchSize) {
highlightBuilder().noMatchSize(noMatchSize);
return this;
}
public SearchRequestBuilder setHighlighterOptions(Map<String, Object> options) {
highlightBuilder().options(options);
return this;
}
/**
* Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#setText(String)}.
*/
public SearchRequestBuilder setSuggestText(String globalText) {
suggestBuilder().setText(globalText);
return this;
}
/**
* Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#addSuggestion(org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder)}.
*/
public SearchRequestBuilder addSuggestion(SuggestBuilder.SuggestionBuilder<?> suggestion) {
suggestBuilder().addSuggestion(suggestion);
return this;
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder.Rescorer, int)}.
* @param rescorer rescorer configuration
* @return this for chaining
*/
public SearchRequestBuilder setRescorer(RescoreBuilder.Rescorer rescorer) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer);
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder.Rescorer, int)}.
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public SearchRequestBuilder setRescorer(RescoreBuilder.Rescorer rescorer, int window) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer, window);
}
/**
* Adds a new rescorer.
* @param rescorer rescorer configuration
* @return this for chaining
*/
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer) {
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer));
return this;
}
/**
* Adds a new rescorer.
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer, int window) {
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer).windowSize(window));
return this;
}
/**
* Clears all rescorers from the builder.
* @return this for chaining
*/
public SearchRequestBuilder clearRescorers() {
sourceBuilder().clearRescorers();
return this;
}
/**
* Sets the rescore window for all rescorers that don't specify a window when added.
* @param window rescore window
* @return this for chaining
*/
public SearchRequestBuilder setRescoreWindow(int window) {
sourceBuilder().defaultRescoreWindowSize(window);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(String)}.
*/
public SearchRequestBuilder setSource(String source) {
request.source(source);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(String source) {
request.extraSource(source);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(BytesReference)}.
*/
public SearchRequestBuilder setSource(BytesReference source) {
request.source(source, false);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(BytesReference)}.
*/
public SearchRequestBuilder setSource(BytesReference source, boolean unsafe) {
request.source(source, unsafe);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(byte[])}.
*/
public SearchRequestBuilder setSource(byte[] source) {
request.source(source);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(BytesReference source) {
request.extraSource(source, false);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(BytesReference source, boolean unsafe) {
request.extraSource(source, unsafe);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(byte[] source) {
request.extraSource(source);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(byte[])}.
*/
public SearchRequestBuilder setSource(byte[] source, int offset, int length) {
request.source(source, offset, length);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(byte[] source, int offset, int length) {
request.extraSource(source, offset, length);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(byte[])}.
*/
public SearchRequestBuilder setSource(XContentBuilder builder) {
request.source(builder);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(XContentBuilder builder) {
request.extraSource(builder);
return this;
}
/**
* Sets the source of the request as a map. Note, setting anything other than the
* search type will cause this source to be overridden, consider using
* {@link #setExtraSource(java.util.Map)}.
*/
public SearchRequestBuilder setSource(Map source) {
request.source(source);
return this;
}
public SearchRequestBuilder setExtraSource(Map source) {
request.extraSource(source);
return this;
}
/**
* Sets the source builder to be used with this request. Note, any operations done
* on this require builder before are discarded as this internal builder replaces
* what has been built up until this point.
*/
public SearchRequestBuilder internalBuilder(SearchSourceBuilder sourceBuilder) {
this.sourceBuilder = sourceBuilder;
return this;
}
/**
* Returns the internal search source builder used to construct the request.
*/
public SearchSourceBuilder internalBuilder() {
return sourceBuilder();
}
@Override
public String toString() {
return internalBuilder().toString();
}
@Override
public SearchRequest request() {
if (sourceBuilder != null) {
request.source(sourceBuilder());
}
return request;
}
@Override
protected void doExecute(ActionListener<SearchResponse> listener) {
if (sourceBuilder != null) {
request.source(sourceBuilder());
}
((Client) client).search(request, listener);
}
private SearchSourceBuilder sourceBuilder() {
if (sourceBuilder == null) {
sourceBuilder = new SearchSourceBuilder();
}
return sourceBuilder;
}
private HighlightBuilder highlightBuilder() {
return sourceBuilder().highlighter();
}
private SuggestBuilder suggestBuilder() {
return sourceBuilder().suggest();
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_SearchRequestBuilder.java
|
118 |
public class OSystemVariableResolver implements OVariableParserListener {
public static final String VAR_BEGIN = "${";
public static final String VAR_END = "}";
private static OSystemVariableResolver instance = new OSystemVariableResolver();
public static String resolveSystemVariables(final String iPath) {
if (iPath == null)
return null;
return (String) OVariableParser.resolveVariables(iPath, VAR_BEGIN, VAR_END, instance);
}
public String resolve(final String variable) {
String resolved = System.getProperty(variable);
if (resolved == null)
// TRY TO FIND THE VARIABLE BETWEEN SYSTEM'S ENVIRONMENT PROPERTIES
resolved = System.getenv(variable);
return resolved;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_parser_OSystemVariableResolver.java
|
257 |
public static enum Type {
NONE,
MERGE,
ALL;
public static Type fromString(String type) throws ElasticsearchIllegalArgumentException {
if ("none".equalsIgnoreCase(type)) {
return NONE;
} else if ("merge".equalsIgnoreCase(type)) {
return MERGE;
} else if ("all".equalsIgnoreCase(type)) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
}
}
| 0true
|
src_main_java_org_apache_lucene_store_StoreRateLimiting.java
|
91 |
public interface ObjectToLong<A> { long apply(A a); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
91 |
public class ReadTransactionLogWritingTest
{
@Test
public void shouldNotWriteAnyLogCommandInPureReadTransaction() throws Exception
{
// WHEN
executeTransaction( getRelationships() );
executeTransaction( getProperties() );
executeTransaction( getById() );
executeTransaction( getNodesFromRelationship() );
// THEN
int actualCount = countLogEntries();
assertEquals( "There were " + (actualCount-logEntriesWrittenBeforeReadOperations) +
" log entries written during one or more pure read transactions",
logEntriesWrittenBeforeReadOperations, actualCount );
}
public final @Rule DatabaseRule dbr = new ImpermanentDatabaseRule()
{
@Override
protected void configure( GraphDatabaseBuilder builder )
{
builder.setConfig( GraphDatabaseSettings.cache_type, "none" );
};
};
private final Label label = label( "Test" );
private Node node;
private Relationship relationship;
private int logEntriesWrittenBeforeReadOperations;
@Before
public void createDataset()
{
GraphDatabaseAPI db = dbr.getGraphDatabaseAPI();
long nodeId, relationshipId;
try ( Transaction tx = db.beginTx() )
{
node = db.createNode( label );
node.setProperty( "short", 123 );
node.setProperty( "long", longString( 300 ) );
nodeId = node.getId();
relationship = node.createRelationshipTo( db.createNode(), MyRelTypes.TEST );
relationship.setProperty( "short", 123 );
relationship.setProperty( "long", longString( 300 ) );
relationshipId = relationship.getId();
tx.success();
}
db.getDependencyResolver().resolveDependency( XaDataSourceManager.class ).rotateLogicalLogs();
logEntriesWrittenBeforeReadOperations = countLogEntries();
}
private int countLogEntries()
{
GraphDatabaseAPI db = dbr.getGraphDatabaseAPI();
FileSystemAbstraction fs = db.getDependencyResolver().resolveDependency( FileSystemAbstraction.class );
File storeDir = new File( db.getStoreDir() );
try
{
CountingLogHook<LogEntry> logicalLogCounter = new CountingLogHook<>();
filterNeostoreLogicalLog( fs, storeDir.getPath(), logicalLogCounter );
// Not so nice, but there's no other way. We cannot look at the file since log records in the txlog
// are buffered and they get flushed for 2PC (at least up until the commit record).
// If we're going for a restart instead then we can count them but they will however disappear
// for the next session.
int txLogRecordCount = db.getDependencyResolver()
.resolveDependency( TxManager.class ).getTxLog().getRecordCount();
return logicalLogCounter.getCount() + txLogRecordCount;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private String longString( int length )
{
char[] characters = new char[length];
for ( int i = 0; i < length; i++ )
{
characters[i] = (char) ('a' + i%10);
}
return new String( characters );
}
private void executeTransaction( Runnable runnable )
{
executeTransaction( runnable, true );
executeTransaction( runnable, false );
}
private void executeTransaction( Runnable runnable, boolean success )
{
try ( Transaction tx = dbr.getGraphDatabaseService().beginTx() )
{
runnable.run();
if ( success )
{
tx.success();
}
}
}
private Runnable getRelationships()
{
return new Runnable()
{
@Override
public void run()
{
assertEquals( 1, count( node.getRelationships() ) );
}
};
}
private Runnable getNodesFromRelationship()
{
return new Runnable()
{
@Override
public void run()
{
relationship.getEndNode();
relationship.getStartNode();
relationship.getNodes();
relationship.getOtherNode( node );
}
};
}
private Runnable getById()
{
return new Runnable()
{
@Override
public void run()
{
dbr.getGraphDatabaseService().getNodeById( node.getId() );
dbr.getGraphDatabaseService().getRelationshipById( relationship.getId() );
}
};
}
private Runnable getProperties()
{
return new Runnable()
{
@Override
public void run()
{
getAllProperties( node );
getAllProperties( relationship );
}
private void getAllProperties( PropertyContainer entity )
{
for ( String key : entity.getPropertyKeys() )
{
entity.getProperty( key );
}
}
};
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_ReadTransactionLogWritingTest.java
|
3,118 |
public class SegmentsStats implements Streamable, ToXContent {
private long count;
private long memoryInBytes;
public SegmentsStats() {
}
public void add(long count, long memoryInBytes) {
this.count += count;
this.memoryInBytes += memoryInBytes;
}
public void add(SegmentsStats mergeStats) {
if (mergeStats == null) {
return;
}
this.count += mergeStats.count;
this.memoryInBytes += mergeStats.memoryInBytes;
}
/**
* The the segments count.
*/
public long getCount() {
return this.count;
}
/**
* Estimation of the memory usage used by a segment.
*/
public long getMemoryInBytes() {
return this.memoryInBytes;
}
public ByteSizeValue getMemory() {
return new ByteSizeValue(memoryInBytes);
}
public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException {
SegmentsStats stats = new SegmentsStats();
stats.readFrom(in);
return stats;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.SEGMENTS);
builder.field(Fields.COUNT, count);
builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, memoryInBytes);
builder.endObject();
return builder;
}
static final class Fields {
static final XContentBuilderString SEGMENTS = new XContentBuilderString("segments");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
static final XContentBuilderString MEMORY = new XContentBuilderString("memory");
static final XContentBuilderString MEMORY_IN_BYTES = new XContentBuilderString("memory_in_bytes");
}
@Override
public void readFrom(StreamInput in) throws IOException {
count = in.readVLong();
memoryInBytes = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(count);
out.writeLong(memoryInBytes);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_engine_SegmentsStats.java
|
2,107 |
public class CLibrary {
private static ESLogger logger = Loggers.getLogger(CLibrary.class);
public static final int MCL_CURRENT = 1;
public static final int MCL_FUTURE = 2;
public static final int ENOMEM = 12;
static {
try {
Native.register("c");
} catch (NoClassDefFoundError e) {
logger.warn("jna not found. native methods (mlockall) will be disabled.");
} catch (UnsatisfiedLinkError e) {
logger.debug("unable to link C library. native methods (mlockall) will be disabled.");
}
}
public static native int mlockall(int flags);
public static native int munlockall();
private CLibrary() {
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_jna_CLibrary.java
|
160 |
public interface StructuredContentService extends SandBoxItemListener {
/**
* Returns the StructuredContent item associated with the passed in id.
*
* @param contentId - The id of the content item.
* @return The associated structured content item.
*/
public StructuredContent findStructuredContentById(Long contentId);
/**
* Returns the <code>StructuredContentType</code> associated with the passed in id.
*
* @param id - The id of the content type.
* @return The associated <code>StructuredContentType</code>.
*/
public StructuredContentType findStructuredContentTypeById(Long id);
/**
* Returns the <code>StructuredContentType</code> associated with the passed in
* String value.
*
* @param name - The name of the content type.
* @return The associated <code>StructuredContentType</code>.
*/
public StructuredContentType findStructuredContentTypeByName(String name);
/**
*
* @return a list of all <code>StructuredContentType</code>s
*/
public List<StructuredContentType> retrieveAllStructuredContentTypes();
/**
* Returns the fields associated with the passed in contentId.
* This is preferred over the direct access from the ContentItem so that the
* two items can be cached distinctly
*
* @param contentId - The id of the content.
* @return Map of fields for this content id
*/
public Map<String,StructuredContentField> findFieldsByContentId(Long contentId);
/**
* This method is intended to be called solely from the CMS admin. Similar methods
* exist that are intended for other clients (e.g. lookupStructuredContentItemsBy....
* <br>
* Returns content items for the passed in sandbox that match the passed in criteria.
* The criteria acts as a where clause to be used in the search for content items.
* Implementations should automatically add criteria such that no archived items
* are returned from this method.
* <br>
* The SandBox parameter impacts the results as follows. If a <code>SandBoxType</code> of
* production is passed in, only those items in that SandBox are returned.
* <br>
* If a non-production SandBox is passed in, then the method will return the items associatd
* with the related production SandBox and then merge in the results of the passed in SandBox.
*
* @param sandbox - the sandbox to find structured content items (null indicates items that are in production for
* sites that are single tenant.
* @param criteria - the criteria used to search for content
* @return
*/
public List<StructuredContent> findContentItems(SandBox sandbox, Criteria criteria);
/**
* Finds all content items regardless of the {@link Sandbox} they are a member of
* @return
*/
public List<StructuredContent> findAllContentItems();
/**
* Follows the same rules as {@link #findContentItems(org.broadleafcommerce.common.sandbox.domain.SandBox, org.hibernate.Criteria) findContentItems}.
*
* @return the count of items in this sandbox that match the passed in Criteria
*/
public Long countContentItems(SandBox sandBox, Criteria c);
/**
* This method is intended to be called from within the CMS
* admin only.
*
* Adds the passed in contentItem to the DB.
*
* Creates a sandbox/site if one doesn't already exist.
*/
public StructuredContent addStructuredContent(StructuredContent content, SandBox destinationSandbox);
/**
* This method is intended to be called from within the CMS
* admin only.
*
* Updates the structuredContent according to the following rules:
*
* 1. If sandbox has changed from null to a value
* This means that the user is editing an item in production and
* the edit is taking place in a sandbox.
*
* Clone the item and add it to the new sandbox and set the cloned
* item's originalItemId to the id of the item being updated.
*
* 2. If the sandbox has changed from one value to another
* This means that the user is moving the item from one sandbox
* to another.
*
* Update the siteId for the item to the one associated with the
* new sandbox
*
* 3. If the sandbox has changed from a value to null
* This means that the item is moving from the sandbox to production.
*
* If the item has an originalItemId, then update that item by
* setting it's archived flag to true.
*
* Then, update the siteId of the item being updated to be the
* siteId of the original item.
*
* 4. If the sandbox is the same then just update the item.
*/
public StructuredContent updateStructuredContent(StructuredContent content, SandBox sandbox);
/**
* Saves the given <b>type</b> and returns the merged instance
*/
public StructuredContentType saveStructuredContentType(StructuredContentType type);
/**
* If deleting and item where content.originalItemId != null
* then the item is deleted from the database.
*
* If the originalItemId is null, then this method marks
* the items as deleted within the passed in sandbox.
*
* @param content
* @param destinationSandbox
* @return
*/
public void deleteStructuredContent(StructuredContent content, SandBox destinationSandbox);
/**
* This method returns content
* <br>
* Returns active content items for the passed in sandbox that match the passed in type.
* <br>
* The SandBox parameter impacts the results as follows. If a <code>SandBoxType</code> of
* production is passed in, only those items in that SandBox are returned.
* <br>
* If a non-production SandBox is passed in, then the method will return the items associatd
* with the related production SandBox and then merge in the results of the passed in SandBox.
* <br>
* The secure item is used in cases where the structured content item contains an image path that needs
* to be rewritten to use https.
*
* @param sandBox - the sandbox to find structured content items (null indicates items that are in production for
* sites that are single tenant.
* @param contentType - the type of content to return
* @param count - the max number of content items to return
* @param ruleDTOs - a Map of objects that will be used in MVEL processing.
* @param secure - set to true if the request is being served over https
* @return - The matching items
* @see org.broadleafcommerce.cms.web.structure.DisplayContentTag
*/
public List<StructuredContentDTO> lookupStructuredContentItemsByType(SandBox sandBox, StructuredContentType contentType, Locale locale, Integer count, Map<String,Object> ruleDTOs, boolean secure);
/**
* This method returns content by name only.
* <br>
* Returns active content items for the passed in sandbox that match the passed in type.
* <br>
* The SandBox parameter impacts the results as follows. If a <code>SandBoxType</code> of
* production is passed in, only those items in that SandBox are returned.
* <br>
* If a non-production SandBox is passed in, then the method will return the items associatd
* with the related production SandBox and then merge in the results of the passed in SandBox.
*
* @param sandBox - the sandbox to find structured content items (null indicates items that are in production for
* sites that are single tenant.
* @param contentName - the name of content to return
* @param count - the max number of content items to return
* @param ruleDTOs - a Map of objects that will be used in MVEL processing.
* @param secure - set to true if the request is being served over https
* @return - The matching items
* @see org.broadleafcommerce.cms.web.structure.DisplayContentTag
*/
public List<StructuredContentDTO> lookupStructuredContentItemsByName(SandBox sandBox, String contentName, Locale locale, Integer count, Map<String,Object> ruleDTOs, boolean secure);
/**
* This method returns content by name and type.
* <br>
* Returns active content items for the passed in sandbox that match the passed in type.
* <br>
* The SandBox parameter impacts the results as follows. If a <code>SandBoxType</code> of
* production is passed in, only those items in that SandBox are returned.
* <br>
* If a non-production SandBox is passed in, then the method will return the items associatd
* with the related production SandBox and then merge in the results of the passed in SandBox.
*
* @param sandBox - the sandbox to find structured content items (null indicates items that are in production for
* sites that are single tenant.
* @param contentType - the type of content to return
* @param contentName - the name of content to return
* @param count - the max number of content items to return
* @param ruleDTOs - a Map of objects that will be used in MVEL processing.
* @param secure - set to true if the request is being served over https
* @return - The matching items
* @see org.broadleafcommerce.cms.web.structure.DisplayContentTag
*/
public List<StructuredContentDTO> lookupStructuredContentItemsByName(SandBox sandBox, StructuredContentType contentType, String contentName, Locale locale, Integer count, Map<String,Object> ruleDTOs, boolean secure);
/**
* Removes the items from cache that match the passed in name and page keys.
* @param nameKey - key for a specific content item
* @param typeKey - key for a type of content item
*/
public void removeItemFromCache(String nameKey, String typeKey);
public boolean isAutomaticallyApproveAndPromoteStructuredContent();
public void setAutomaticallyApproveAndPromoteStructuredContent(boolean automaticallyApproveAndPromoteStructuredContent);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_service_StructuredContentService.java
|
2,890 |
public abstract static class AbstractPredicate implements IndexAwarePredicate, DataSerializable {
protected String attribute;
private transient volatile AttributeType attributeType;
protected AbstractPredicate() {
}
protected AbstractPredicate(String attribute) {
this.attribute = attribute;
}
protected Comparable convert(Map.Entry mapEntry, Comparable entryValue, Comparable attributeValue) {
if (attributeValue == null) {
return null;
}
if (attributeValue instanceof IndexImpl.NullObject) {
return IndexImpl.NULL;
}
AttributeType type = attributeType;
if (type == null) {
QueryableEntry queryableEntry = (QueryableEntry) mapEntry;
type = queryableEntry.getAttributeType(attribute);
attributeType = type;
}
if (type == AttributeType.ENUM) {
// if attribute type is enum, convert given attribute to enum string
return type.getConverter().convert(attributeValue);
} else {
// if given attribute value is already in expected type then there's no need for conversion.
if (entryValue != null && entryValue.getClass().isAssignableFrom(attributeValue.getClass())) {
return attributeValue;
} else if (type != null) {
return type.getConverter().convert(attributeValue);
} else {
throw new QueryException("Unknown attribute type: " + attributeValue.getClass());
}
}
}
@Override
public boolean isIndexed(QueryContext queryContext) {
return getIndex(queryContext) != null;
}
protected Index getIndex(QueryContext queryContext) {
return queryContext.getIndex(attribute);
}
protected Comparable readAttribute(Map.Entry entry) {
QueryableEntry queryableEntry = (QueryableEntry) entry;
Comparable val = queryableEntry.getAttribute(attribute);
if (val != null && val.getClass().isEnum()) {
val = val.toString();
}
return val;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(attribute);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
attribute = in.readUTF();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_Predicates.java
|
406 |
@Embeddable
public class ArchiveStatus implements Serializable {
@Column(name = "ARCHIVED")
@AdminPresentation(friendlyName = "archived", visibility = VisibilityEnum.HIDDEN_ALL, group = "ArchiveStatus")
protected Character archived = 'N';
public Character getArchived() {
return archived;
}
public void setArchived(Character archived) {
this.archived = archived;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_persistence_ArchiveStatus.java
|
279 |
public interface EmailServiceProducer {
public void send(@SuppressWarnings("rawtypes") final HashMap props);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_message_EmailServiceProducer.java
|
220 |
public class OConstants {
public static final String ORIENT_VERSION = "1.6.2";
public static final String ORIENT_URL = "www.orientechnologies.com";
public static String getVersion() {
final StringBuilder buffer = new StringBuilder();
buffer.append(OConstants.ORIENT_VERSION);
final String buildNumber = System.getProperty("orientdb.build.number");
if (buildNumber != null) {
buffer.append(" (build ");
buffer.append(buildNumber);
buffer.append(")");
}
return buffer.toString();
}
public static String getBuildNumber() {
final String buildNumber = System.getProperty("orientdb.build.number");
if (buildNumber == null)
return null;
return buildNumber;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_OConstants.java
|
23 |
public class ControlStructureCompletions {
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_ControlStructureCompletions.java
|
669 |
public class DeleteWarmerRequest extends AcknowledgedRequest<DeleteWarmerRequest> {
private String[] names = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String[] indices = Strings.EMPTY_ARRAY;
DeleteWarmerRequest() {
}
/**
* Constructs a new delete warmer request for the specified name.
*
* @param name: the name (or wildcard expression) of the warmer to match, null to delete all.
*/
public DeleteWarmerRequest(String... names) {
names(names);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (CollectionUtils.isEmpty(names)) {
validationException = addValidationError("warmer names are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, names);
}
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("indices are missing", validationException);
} else {
validationException = checkForEmptyString(validationException, indices);
}
return validationException;
}
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
boolean containsEmptyString = false;
for (String string : strings) {
if (!Strings.hasText(string)) {
containsEmptyString = true;
}
}
if (containsEmptyString) {
validationException = addValidationError("types must not contain empty strings", validationException);
}
return validationException;
}
/**
* The name to delete.
*/
@Nullable
String[] names() {
return names;
}
/**
* The name (or wildcard expression) of the index warmer to delete, or null
* to delete all warmers.
*/
public DeleteWarmerRequest names(@Nullable String... names) {
this.names = names;
return this;
}
/**
* Sets the indices this put mapping operation will execute on.
*/
public DeleteWarmerRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* The indices the mappings will be put.
*/
public String[] indices() {
return indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public DeleteWarmerRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
names = in.readStringArray();
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(names);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
writeTimeout(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_warmer_delete_DeleteWarmerRequest.java
|
3,127 |
class FailEngineOnMergeFailure implements MergeSchedulerProvider.FailureListener {
@Override
public void onFailedMerge(MergePolicy.MergeException e) {
failEngine(e);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_engine_internal_InternalEngine.java
|
359 |
future.andThen(new ExecutionCallback<Integer>() {
@Override
public void onResponse(Integer response) {
result[0] = response.intValue();
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
89 |
private enum Symbol {
LATTER, WS, QT, AP, SEP, EOF
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_ODFACommandStream.java
|
4,666 |
private final PercolatorType scoringPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x05;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
return matchPercolator.reduce(shardResults);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
MatchAndScore matchAndScore = matchAndScore(logger, context, highlightPhase);
queryBasedPercolating(percolatorSearcher, context, matchAndScore);
List<BytesRef> matches = matchAndScore.matches();
List<Map<String, HighlightField>> hls = matchAndScore.hls();
float[] scores = matchAndScore.scores().toArray();
long count = matchAndScore.counter();
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, scores, context, request.index(), request.shardId());
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.release();
}
}
};
| 1no label
|
src_main_java_org_elasticsearch_percolator_PercolatorService.java
|
585 |
lifecycleService.runUnderLifecycleLock(new Runnable() {
public void run() {
lifecycleService.fireLifecycleEvent(MERGING);
final NodeEngineImpl nodeEngine = node.nodeEngine;
final Collection<SplitBrainHandlerService> services = nodeEngine.getServices(SplitBrainHandlerService.class);
final Collection<Runnable> tasks = new LinkedList<Runnable>();
for (SplitBrainHandlerService service : services) {
final Runnable runnable = service.prepareMergeRunnable();
if (runnable != null) {
tasks.add(runnable);
}
}
final Collection<ManagedService> managedServices = nodeEngine.getServices(ManagedService.class);
for (ManagedService service : managedServices) {
service.reset();
}
node.onRestart();
node.connectionManager.restart();
node.rejoin();
final Collection<Future> futures = new LinkedList<Future>();
for (Runnable task : tasks) {
Future f = nodeEngine.getExecutionService().submit("hz:system", task);
futures.add(f);
}
long callTimeout = node.groupProperties.OPERATION_CALL_TIMEOUT_MILLIS.getLong();
for (Future f : futures) {
try {
waitOnFutureInterruptible(f, callTimeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
logger.severe("While merging...", e);
}
}
lifecycleService.fireLifecycleEvent(MERGED);
}
});
| 1no label
|
hazelcast_src_main_java_com_hazelcast_cluster_ClusterServiceImpl.java
|
61 |
static final class ForwardingNode<K,V> extends Node<K,V> {
final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab) {
super(MOVED, null, null, null);
this.nextTable = tab;
}
Node<K,V> find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer: for (Node<K,V>[] tab = nextTable;;) {
Node<K,V> e; int n;
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null)
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
if (eh < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
continue outer;
}
else
return e.find(h, k);
}
if ((e = e.next) == null)
return null;
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
617 |
public class IndicesStatsAction extends IndicesAction<IndicesStatsRequest, IndicesStatsResponse, IndicesStatsRequestBuilder> {
public static final IndicesStatsAction INSTANCE = new IndicesStatsAction();
public static final String NAME = "indices/stats";
private IndicesStatsAction() {
super(NAME);
}
@Override
public IndicesStatsResponse newResponse() {
return new IndicesStatsResponse();
}
@Override
public IndicesStatsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new IndicesStatsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_stats_IndicesStatsAction.java
|
5,127 |
public class AggregatorFactories {
public static final AggregatorFactories EMPTY = new Empty();
private final AggregatorFactory[] factories;
public static Builder builder() {
return new Builder();
}
private AggregatorFactories(AggregatorFactory[] factories) {
this.factories = factories;
}
private static Aggregator createAndRegisterContextAware(AggregationContext context, AggregatorFactory factory, Aggregator parent, long estimatedBucketsCount) {
final Aggregator aggregator = factory.create(context, parent, estimatedBucketsCount);
if (aggregator.shouldCollect()) {
context.registerReaderContextAware(aggregator);
}
return aggregator;
}
/**
* Create all aggregators so that they can be consumed with multiple buckets.
*/
public Aggregator[] createSubAggregators(Aggregator parent, final long estimatedBucketsCount) {
Aggregator[] aggregators = new Aggregator[count()];
for (int i = 0; i < factories.length; ++i) {
final AggregatorFactory factory = factories[i];
final Aggregator first = createAndRegisterContextAware(parent.context(), factory, parent, estimatedBucketsCount);
if (first.bucketAggregationMode() == BucketAggregationMode.MULTI_BUCKETS) {
// This aggregator already supports multiple bucket ordinals, can be used directly
aggregators[i] = first;
continue;
}
// the aggregator doesn't support multiple ordinals, let's wrap it so that it does.
aggregators[i] = new Aggregator(first.name(), BucketAggregationMode.MULTI_BUCKETS, AggregatorFactories.EMPTY, 1, first.context(), first.parent()) {
ObjectArray<Aggregator> aggregators;
{
aggregators = BigArrays.newObjectArray(estimatedBucketsCount, context.pageCacheRecycler());
aggregators.set(0, first);
for (long i = 1; i < estimatedBucketsCount; ++i) {
aggregators.set(i, createAndRegisterContextAware(parent.context(), factory, parent, estimatedBucketsCount));
}
}
@Override
public boolean shouldCollect() {
return first.shouldCollect();
}
@Override
protected void doPostCollection() {
for (long i = 0; i < aggregators.size(); ++i) {
final Aggregator aggregator = aggregators.get(i);
if (aggregator != null) {
aggregator.postCollection();
}
}
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
aggregators = BigArrays.grow(aggregators, owningBucketOrdinal + 1);
Aggregator aggregator = aggregators.get(owningBucketOrdinal);
if (aggregator == null) {
aggregator = createAndRegisterContextAware(parent.context(), factory, parent, estimatedBucketsCount);
aggregators.set(owningBucketOrdinal, aggregator);
}
aggregator.collect(doc, 0);
}
@Override
public void setNextReader(AtomicReaderContext reader) {
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
return aggregators.get(owningBucketOrdinal).buildAggregation(0);
}
@Override
public InternalAggregation buildEmptyAggregation() {
return first.buildEmptyAggregation();
}
@Override
public void doRelease() {
Releasables.release(aggregators);
}
};
}
return aggregators;
}
public Aggregator[] createTopLevelAggregators(AggregationContext ctx) {
// These aggregators are going to be used with a single bucket ordinal, no need to wrap the PER_BUCKET ones
Aggregator[] aggregators = new Aggregator[factories.length];
for (int i = 0; i < factories.length; i++) {
aggregators[i] = createAndRegisterContextAware(ctx, factories[i], null, 0);
}
return aggregators;
}
public int count() {
return factories.length;
}
void setParent(AggregatorFactory parent) {
for (AggregatorFactory factory : factories) {
factory.parent = parent;
}
}
public void validate() {
for (AggregatorFactory factory : factories) {
factory.validate();
}
}
private final static class Empty extends AggregatorFactories {
private static final AggregatorFactory[] EMPTY_FACTORIES = new AggregatorFactory[0];
private static final Aggregator[] EMPTY_AGGREGATORS = new Aggregator[0];
private Empty() {
super(EMPTY_FACTORIES);
}
@Override
public Aggregator[] createSubAggregators(Aggregator parent, long estimatedBucketsCount) {
return EMPTY_AGGREGATORS;
}
@Override
public Aggregator[] createTopLevelAggregators(AggregationContext ctx) {
return EMPTY_AGGREGATORS;
}
}
public static class Builder {
private List<AggregatorFactory> factories = new ArrayList<AggregatorFactory>();
public Builder add(AggregatorFactory factory) {
factories.add(factory);
return this;
}
public AggregatorFactories build() {
if (factories.isEmpty()) {
return EMPTY;
}
return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]));
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_AggregatorFactories.java
|
325 |
public class NodesInfoRequest extends NodesOperationRequest<NodesInfoRequest> {
private boolean settings = true;
private boolean os = true;
private boolean process = true;
private boolean jvm = true;
private boolean threadPool = true;
private boolean network = true;
private boolean transport = true;
private boolean http = true;
private boolean plugin = true;
public NodesInfoRequest() {
}
/**
* Get information from nodes based on the nodes ids specified. If none are passed, information
* for all nodes will be returned.
*/
public NodesInfoRequest(String... nodesIds) {
super(nodesIds);
}
/**
* Clears all info flags.
*/
public NodesInfoRequest clear() {
settings = false;
os = false;
process = false;
jvm = false;
threadPool = false;
network = false;
transport = false;
http = false;
plugin = false;
return this;
}
/**
* Sets to return all the data.
*/
public NodesInfoRequest all() {
settings = true;
os = true;
process = true;
jvm = true;
threadPool = true;
network = true;
transport = true;
http = true;
plugin = true;
return this;
}
/**
* Should the node settings be returned.
*/
public boolean settings() {
return this.settings;
}
/**
* Should the node settings be returned.
*/
public NodesInfoRequest settings(boolean settings) {
this.settings = settings;
return this;
}
/**
* Should the node OS be returned.
*/
public boolean os() {
return this.os;
}
/**
* Should the node OS be returned.
*/
public NodesInfoRequest os(boolean os) {
this.os = os;
return this;
}
/**
* Should the node Process be returned.
*/
public boolean process() {
return this.process;
}
/**
* Should the node Process be returned.
*/
public NodesInfoRequest process(boolean process) {
this.process = process;
return this;
}
/**
* Should the node JVM be returned.
*/
public boolean jvm() {
return this.jvm;
}
/**
* Should the node JVM be returned.
*/
public NodesInfoRequest jvm(boolean jvm) {
this.jvm = jvm;
return this;
}
/**
* Should the node Thread Pool info be returned.
*/
public boolean threadPool() {
return this.threadPool;
}
/**
* Should the node Thread Pool info be returned.
*/
public NodesInfoRequest threadPool(boolean threadPool) {
this.threadPool = threadPool;
return this;
}
/**
* Should the node Network be returned.
*/
public boolean network() {
return this.network;
}
/**
* Should the node Network be returned.
*/
public NodesInfoRequest network(boolean network) {
this.network = network;
return this;
}
/**
* Should the node Transport be returned.
*/
public boolean transport() {
return this.transport;
}
/**
* Should the node Transport be returned.
*/
public NodesInfoRequest transport(boolean transport) {
this.transport = transport;
return this;
}
/**
* Should the node HTTP be returned.
*/
public boolean http() {
return this.http;
}
/**
* Should the node HTTP be returned.
*/
public NodesInfoRequest http(boolean http) {
this.http = http;
return this;
}
/**
* Should information about plugins be returned
* @param plugin true if you want info
* @return The request
*/
public NodesInfoRequest plugin(boolean plugin) {
this.plugin = plugin;
return this;
}
/**
* @return true if information about plugins is requested
*/
public boolean plugin() {
return plugin;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
settings = in.readBoolean();
os = in.readBoolean();
process = in.readBoolean();
jvm = in.readBoolean();
threadPool = in.readBoolean();
network = in.readBoolean();
transport = in.readBoolean();
http = in.readBoolean();
plugin = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(settings);
out.writeBoolean(os);
out.writeBoolean(process);
out.writeBoolean(jvm);
out.writeBoolean(threadPool);
out.writeBoolean(network);
out.writeBoolean(transport);
out.writeBoolean(http);
out.writeBoolean(plugin);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_NodesInfoRequest.java
|
241 |
service.submit(runnable, new ExecutionCallback() {
public void onResponse(Object response) {
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
747 |
@Test
public class SBTreeWAL extends SBTreeTest {
static {
OGlobalConfiguration.INDEX_TX_MODE.setValue("FULL");
}
private String buildDirectory;
private String actualStorageDir;
private String expectedStorageDir;
private OWriteAheadLog writeAheadLog;
private ODiskCache actualDiskCache;
private ODiskCache expectedDiskCache;
private OLocalPaginatedStorage actualStorage;
private OSBTree<Integer, OIdentifiable> expectedSBTree;
private OLocalPaginatedStorage expectedStorage;
private OStorageConfiguration expectedStorageConfiguration;
private OStorageConfiguration actualStorageConfiguration;
@BeforeClass
@Override
public void beforeClass() {
actualStorage = mock(OLocalPaginatedStorage.class);
actualStorageConfiguration = mock(OStorageConfiguration.class);
expectedStorage = mock(OLocalPaginatedStorage.class);
expectedStorageConfiguration = mock(OStorageConfiguration.class);
}
@AfterClass
@Override
public void afterClass() {
}
@BeforeMethod
public void beforeMethod() throws IOException {
Mockito.reset(actualStorage, expectedStorage, expectedStorageConfiguration, actualStorageConfiguration);
buildDirectory = System.getProperty("buildDirectory", ".");
buildDirectory += "/sbtreeWithWALTest";
createExpectedSBTree();
createActualSBTree();
}
@AfterMethod
@Override
public void afterMethod() throws Exception {
sbTree.delete();
expectedSBTree.delete();
actualDiskCache.delete();
expectedDiskCache.delete();
writeAheadLog.delete();
Assert.assertTrue(new File(actualStorageDir).delete());
Assert.assertTrue(new File(expectedStorageDir).delete());
Assert.assertTrue(new File(buildDirectory).delete());
}
private void createActualSBTree() throws IOException {
actualStorageConfiguration.clusters = new ArrayList<OStorageClusterConfiguration>();
actualStorageConfiguration.fileTemplate = new OStorageSegmentConfiguration();
actualStorageDir = buildDirectory + "/sbtreeWithWALTestActual";
when(actualStorage.getStoragePath()).thenReturn(actualStorageDir);
when(actualStorage.getName()).thenReturn("sbtreeWithWALTesActual");
File buildDir = new File(buildDirectory);
if (!buildDir.exists())
buildDir.mkdirs();
File actualStorageDirFile = new File(actualStorageDir);
if (!actualStorageDirFile.exists())
actualStorageDirFile.mkdirs();
writeAheadLog = new OWriteAheadLog(6000, -1, 10 * 1024L * OWALPage.PAGE_SIZE, 100L * 1024 * 1024 * 1024, actualStorage);
actualDiskCache = new OReadWriteDiskCache(400L * 1024 * 1024 * 1024, 1648L * 1024 * 1024,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024, 1000000, 100, actualStorage, null, false, false);
OStorageVariableParser variableParser = new OStorageVariableParser(actualStorageDir);
when(actualStorage.getStorageTransaction()).thenReturn(null);
when(actualStorage.getDiskCache()).thenReturn(actualDiskCache);
when(actualStorage.getWALInstance()).thenReturn(writeAheadLog);
when(actualStorage.getVariableParser()).thenReturn(variableParser);
when(actualStorage.getConfiguration()).thenReturn(actualStorageConfiguration);
when(actualStorage.getMode()).thenReturn("rw");
when(actualStorageConfiguration.getDirectory()).thenReturn(actualStorageDir);
sbTree = new OSBTree<Integer, OIdentifiable>(".sbt", 1, true);
sbTree.create("actualSBTree", OIntegerSerializer.INSTANCE, OLinkSerializer.INSTANCE, null, actualStorage);
}
private void createExpectedSBTree() {
expectedStorageConfiguration.clusters = new ArrayList<OStorageClusterConfiguration>();
expectedStorageConfiguration.fileTemplate = new OStorageSegmentConfiguration();
expectedStorageDir = buildDirectory + "/sbtreeWithWALTestExpected";
when(expectedStorage.getStoragePath()).thenReturn(expectedStorageDir);
when(expectedStorage.getName()).thenReturn("sbtreeWithWALTesExpected");
File buildDir = new File(buildDirectory);
if (!buildDir.exists())
buildDir.mkdirs();
File expectedStorageDirFile = new File(expectedStorageDir);
if (!expectedStorageDirFile.exists())
expectedStorageDirFile.mkdirs();
expectedDiskCache = new OReadWriteDiskCache(400L * 1024 * 1024 * 1024, 1648L * 1024 * 1024,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024, 1000000, 100, expectedStorage, null, false, false);
OStorageVariableParser variableParser = new OStorageVariableParser(expectedStorageDir);
when(expectedStorage.getStorageTransaction()).thenReturn(null);
when(expectedStorage.getDiskCache()).thenReturn(expectedDiskCache);
when(expectedStorage.getWALInstance()).thenReturn(null);
when(expectedStorage.getVariableParser()).thenReturn(variableParser);
when(expectedStorage.getConfiguration()).thenReturn(expectedStorageConfiguration);
when(expectedStorage.getMode()).thenReturn("rw");
when(expectedStorageConfiguration.getDirectory()).thenReturn(expectedStorageDir);
expectedSBTree = new OSBTree<Integer, OIdentifiable>(".sbt", 1, true);
expectedSBTree.create("expectedSBTree", OIntegerSerializer.INSTANCE, OLinkSerializer.INSTANCE, null, expectedStorage);
}
@Override
public void testKeyPut() throws Exception {
super.testKeyPut();
assertFileRestoreFromWAL();
}
@Override
public void testKeyPutRandomUniform() throws Exception {
super.testKeyPutRandomUniform();
assertFileRestoreFromWAL();
}
@Override
public void testKeyPutRandomGaussian() throws Exception {
super.testKeyPutRandomGaussian();
assertFileRestoreFromWAL();
}
@Override
public void testKeyDeleteRandomUniform() throws Exception {
super.testKeyDeleteRandomUniform();
assertFileRestoreFromWAL();
}
@Override
public void testKeyDeleteRandomGaussian() throws Exception {
super.testKeyDeleteRandomGaussian();
assertFileRestoreFromWAL();
}
@Override
public void testKeyDelete() throws Exception {
super.testKeyDelete();
assertFileRestoreFromWAL();
}
@Override
public void testKeyAddDelete() throws Exception {
super.testKeyAddDelete();
assertFileRestoreFromWAL();
}
@Override
public void testAddKeyValuesInTwoBucketsAndMakeFirstEmpty() throws Exception {
super.testAddKeyValuesInTwoBucketsAndMakeFirstEmpty();
assertFileRestoreFromWAL();
}
@Override
public void testAddKeyValuesInTwoBucketsAndMakeLastEmpty() throws Exception {
super.testAddKeyValuesInTwoBucketsAndMakeLastEmpty();
assertFileRestoreFromWAL();
}
@Override
public void testAddKeyValuesAndRemoveFirstMiddleAndLastPages() throws Exception {
super.testAddKeyValuesAndRemoveFirstMiddleAndLastPages();
assertFileRestoreFromWAL();
}
@Test(enabled = false)
@Override
public void testValuesMajor() {
super.testValuesMajor();
}
@Test(enabled = false)
@Override
public void testValuesMinor() {
super.testValuesMinor();
}
@Test(enabled = false)
@Override
public void testValuesBetween() {
super.testValuesBetween();
}
private void assertFileRestoreFromWAL() throws IOException {
sbTree.close();
writeAheadLog.close();
expectedSBTree.close();
actualDiskCache.clear();
restoreDataFromWAL();
expectedDiskCache.clear();
assertFileContentIsTheSame(expectedSBTree.getName(), sbTree.getName());
}
private void restoreDataFromWAL() throws IOException {
OWriteAheadLog log = new OWriteAheadLog(4, -1, 10 * 1024L * OWALPage.PAGE_SIZE, 100L * 1024 * 1024 * 1024, actualStorage);
OLogSequenceNumber lsn = log.begin();
List<OWALRecord> atomicUnit = new ArrayList<OWALRecord>();
boolean atomicChangeIsProcessed = false;
while (lsn != null) {
OWALRecord walRecord = log.read(lsn);
atomicUnit.add(walRecord);
if (!atomicChangeIsProcessed) {
Assert.assertTrue(walRecord instanceof OAtomicUnitStartRecord);
atomicChangeIsProcessed = true;
} else if (walRecord instanceof OAtomicUnitEndRecord) {
atomicChangeIsProcessed = false;
for (OWALRecord restoreRecord : atomicUnit) {
if (restoreRecord instanceof OAtomicUnitStartRecord || restoreRecord instanceof OAtomicUnitEndRecord)
continue;
final OUpdatePageRecord updatePageRecord = (OUpdatePageRecord) restoreRecord;
final long fileId = updatePageRecord.getFileId();
final long pageIndex = updatePageRecord.getPageIndex();
if (!expectedDiskCache.isOpen(fileId))
expectedDiskCache.openFile(fileId);
final OCacheEntry cacheEntry = expectedDiskCache.load(fileId, pageIndex, true);
final OCachePointer cachePointer = cacheEntry.getCachePointer();
cachePointer.acquireExclusiveLock();
try {
ODurablePage durablePage = new ODurablePage(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE);
durablePage.restoreChanges(updatePageRecord.getChanges());
durablePage.setLsn(updatePageRecord.getLsn());
cacheEntry.markDirty();
} finally {
cachePointer.releaseExclusiveLock();
expectedDiskCache.release(cacheEntry);
}
}
atomicUnit.clear();
} else {
Assert.assertTrue(walRecord instanceof OUpdatePageRecord);
}
lsn = log.next(lsn);
}
Assert.assertTrue(atomicUnit.isEmpty());
log.close();
}
private void assertFileContentIsTheSame(String expectedBTree, String actualBTree) throws IOException {
File expectedFile = new File(expectedStorageDir, expectedBTree + ".sbt");
RandomAccessFile fileOne = new RandomAccessFile(expectedFile, "r");
RandomAccessFile fileTwo = new RandomAccessFile(new File(actualStorageDir, actualBTree + ".sbt"), "r");
Assert.assertEquals(fileOne.length(), fileTwo.length());
byte[] expectedContent = new byte[OClusterPage.PAGE_SIZE];
byte[] actualContent = new byte[OClusterPage.PAGE_SIZE];
fileOne.seek(OAbstractFile.HEADER_SIZE);
fileTwo.seek(OAbstractFile.HEADER_SIZE);
int bytesRead = fileOne.read(expectedContent);
while (bytesRead >= 0) {
fileTwo.readFully(actualContent, 0, bytesRead);
Assert.assertEquals(expectedContent, actualContent);
expectedContent = new byte[OClusterPage.PAGE_SIZE];
actualContent = new byte[OClusterPage.PAGE_SIZE];
bytesRead = fileOne.read(expectedContent);
}
fileOne.close();
fileTwo.close();
}
}
| 1no label
|
core_src_test_java_com_orientechnologies_orient_core_index_sbtree_local_SBTreeWAL.java
|
467 |
new ODbRelatedCall<Iterator<Map.Entry<Object, Object>>>() {
public Iterator<Map.Entry<Object, Object>> call() {
return indexOne.iterator();
}
});
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseCompare.java
|
16 |
public class CodeCompletions {
private static boolean forceExplicitTypeArgs(Declaration d,
OccurrenceLocation ol) {
if (ol==EXTENDS) {
return true;
}
else {
//TODO: this is a pretty limited implementation
// for now, but eventually we could do
// something much more sophisticated to
// guess if explicit type args will be
// necessary (variance, etc)
if (d instanceof Functional) {
List<ParameterList> pls = ((Functional) d).getParameterLists();
return pls.isEmpty() ||
pls.get(0).getParameters().isEmpty();
}
else {
return false;
}
}
}
static String getTextForDocLink(CeylonParseController cpc,
Declaration decl) {
Package pkg = decl.getUnit().getPackage();
String qname = decl.getQualifiedNameString();
// handle language package or same module and package
Unit unit = cpc.getRootNode().getUnit();
if (pkg!=null &&
(Module.LANGUAGE_MODULE_NAME.equals(pkg.getNameAsString())
|| (unit!=null && pkg.equals(unit.getPackage())))) {
if (decl.isToplevel()) {
return decl.getNameAsString();
}
else { // not top level in language module
int loc = qname.indexOf("::");
if (loc>=0) {
return qname.substring(loc + 2);
}
else {
return qname;
}
}
}
else {
return qname;
}
}
public static String getTextFor(Declaration dec, Unit unit) {
StringBuilder result = new StringBuilder();
result.append(escapeName(dec, unit));
appendTypeParameters(dec, result);
return result.toString();
}
public static String getPositionalInvocationTextFor(
Declaration dec, OccurrenceLocation ol,
ProducedReference pr, Unit unit, boolean includeDefaulted,
String typeArgs) {
StringBuilder result = new StringBuilder(escapeName(dec, unit));
if (typeArgs!=null) {
result.append(typeArgs);
}
else if (forceExplicitTypeArgs(dec, ol)) {
appendTypeParameters(dec, result);
}
appendPositionalArgs(dec, pr, unit, result, includeDefaulted, false);
appendSemiToVoidInvocation(result, dec);
return result.toString();
}
public static String getNamedInvocationTextFor(Declaration dec,
ProducedReference pr, Unit unit, boolean includeDefaulted,
String typeArgs) {
StringBuilder result = new StringBuilder(escapeName(dec, unit));
if (typeArgs!=null) {
result.append(typeArgs);
}
else if (forceExplicitTypeArgs(dec, null)) {
appendTypeParameters(dec, result);
}
appendNamedArgs(dec, pr, unit, result, includeDefaulted, false);
appendSemiToVoidInvocation(result, dec);
return result.toString();
}
private static void appendSemiToVoidInvocation(StringBuilder result,
Declaration dd) {
if ((dd instanceof Method) && ((Method) dd).isDeclaredVoid() &&
((Method) dd).getParameterLists().size()==1) {
result.append(';');
}
}
public static String getDescriptionFor(Declaration dec, Unit unit) {
StringBuilder result = new StringBuilder(dec.getName(unit));
appendTypeParameters(dec, result);
return result.toString();
}
public static String getPositionalInvocationDescriptionFor(
Declaration dec, OccurrenceLocation ol,
ProducedReference pr, Unit unit, boolean includeDefaulted,
String typeArgs) {
StringBuilder result = new StringBuilder(dec.getName(unit));
if (typeArgs!=null) {
result.append(typeArgs);
}
else if (forceExplicitTypeArgs(dec, ol)) {
appendTypeParameters(dec, result);
}
appendPositionalArgs(dec, pr, unit, result,
includeDefaulted, true);
return result.toString();
}
public static String getNamedInvocationDescriptionFor(
Declaration dec, ProducedReference pr,
Unit unit, boolean includeDefaulted, String typeArgs) {
StringBuilder result = new StringBuilder(dec.getName(unit));
if (typeArgs!=null) {
result.append(typeArgs);
}
else if (forceExplicitTypeArgs(dec, null)) {
appendTypeParameters(dec, result);
}
appendNamedArgs(dec, pr, unit, result,
includeDefaulted, true);
return result.toString();
}
public static String getRefinementTextFor(Declaration d,
ProducedReference pr, Unit unit, boolean isInterface,
ClassOrInterface ci, String indent, boolean containsNewline) {
return getRefinementTextFor(d, pr, unit, isInterface, ci,
indent, containsNewline, true);
}
public static String getRefinementTextFor(Declaration d,
ProducedReference pr, Unit unit, boolean isInterface,
ClassOrInterface ci, String indent, boolean containsNewline,
boolean preamble) {
StringBuilder result = new StringBuilder();
if (preamble) {
result.append("shared actual ");
if (isVariable(d) && !isInterface) {
result.append("variable ");
}
}
appendDeclarationHeaderText(d, pr, unit, result);
appendTypeParameters(d, result);
appendParametersText(d, pr, unit, result);
if (d instanceof Class) {
result.append(extraIndent(extraIndent(indent, containsNewline),
containsNewline))
.append(" extends super.").append(escapeName(d));
appendPositionalArgs(d, pr, unit, result, true, false);
}
appendConstraints(d, pr, unit, indent, containsNewline, result);
appendImplText(d, pr, isInterface, unit, indent, result, ci);
return result.toString();
}
private static void appendConstraints(Declaration d, ProducedReference pr,
Unit unit, String indent, boolean containsNewline,
StringBuilder result) {
if (d instanceof Functional) {
for (TypeParameter tp: ((Functional) d).getTypeParameters()) {
List<ProducedType> sts = tp.getSatisfiedTypes();
if (!sts.isEmpty()) {
result.append(extraIndent(extraIndent(indent, containsNewline),
containsNewline))
.append("given ").append(tp.getName())
.append(" satisfies ");
boolean first = true;
for (ProducedType st: sts) {
if (first) {
first = false;
}
else {
result.append("&");
}
result.append(st.substitute(pr.getTypeArguments())
.getProducedTypeName(unit));
}
}
}
}
}
static String getInlineFunctionTextFor(Parameter p,
ProducedReference pr, Unit unit, String indent) {
StringBuilder result = new StringBuilder();
appendNamedArgumentHeader(p, pr, result, false);
appendTypeParameters(p.getModel(), result);
appendParametersText(p.getModel(), pr, unit, result);
if (p.isDeclaredVoid()) {
result.append(" {}");
}
else {
result.append(" => nothing;");
}
return result.toString();
}
public static boolean isVariable(Declaration d) {
return d instanceof TypedDeclaration &&
((TypedDeclaration) d).isVariable();
}
static String getRefinementDescriptionFor(Declaration d,
ProducedReference pr, Unit unit) {
StringBuilder result = new StringBuilder("shared actual ");
if (isVariable(d)) {
result.append("variable ");
}
appendDeclarationHeaderDescription(d, pr, unit, result);
appendTypeParameters(d, result);
appendParametersDescription(d, pr, unit, result);
/*result.append(" - refine declaration in ")
.append(((Declaration) d.getContainer()).getName());*/
return result.toString();
}
static String getInlineFunctionDescriptionFor(Parameter p,
ProducedReference pr, Unit unit) {
StringBuilder result = new StringBuilder();
appendNamedArgumentHeader(p, pr, result, true);
appendTypeParameters(p.getModel(), result);
appendParametersDescription(p.getModel(), pr, unit, result);
return result.toString();
}
public static String getLabelDescriptionFor(Declaration d) {
StringBuilder result = new StringBuilder();
if (d!=null) {
appendDeclarationAnnotations(d, result);
appendDeclarationHeaderDescription(d, d.getUnit(), result);
appendTypeParameters(d, result, true);
appendParametersDescription(d, result, null);
}
return result.toString();
}
private static void appendDeclarationAnnotations(Declaration d,
StringBuilder result) {
if (d.isActual()) result.append("actual ");
if (d.isFormal()) result.append("formal ");
if (d.isDefault()) result.append("default ");
if (isVariable(d)) result.append("variable ");
}
public static String getDocDescriptionFor(Declaration d,
ProducedReference pr, Unit unit) {
StringBuilder result = new StringBuilder();
appendDeclarationHeaderDescription(d, pr, unit, result);
appendTypeParameters(d, pr, result, true, unit);
appendParametersDescription(d, pr, unit, result);
return result.toString();
}
public static StyledString getQualifiedDescriptionFor(Declaration d) {
StyledString result = new StyledString();
if (d!=null) {
appendDeclarationDescription(d, result);
if (d.isClassOrInterfaceMember()) {
Declaration ci = (Declaration) d.getContainer();
result.append(ci.getName(), Highlights.TYPE_ID_STYLER).append('.');
appendMemberName(d, result);
}
else {
appendDeclarationName(d, result);
}
appendTypeParameters(d, result, true);
appendParametersDescription(d, result);
if (d instanceof TypedDeclaration) {
if (EditorsUI.getPreferenceStore().getBoolean(DISPLAY_RETURN_TYPES)) {
TypedDeclaration td = (TypedDeclaration) d;
if (!td.isParameter() &&
!td.isDynamicallyTyped() &&
!(td instanceof Method && ((Method) td).isDeclaredVoid())) {
ProducedType t = td.getType();
if (t!=null) {
result.append(" ∊ ");
appendTypeName(result, t, Highlights.ARROW_STYLER);
}
}
}
}
/*result.append(" - refines declaration in ")
.append(((Declaration) d.getContainer()).getName());*/
}
return result;
}
public static StyledString getStyledDescriptionFor(Declaration d) {
StyledString result = new StyledString();
if (d!=null) {
appendDeclarationAnnotations(d, result);
appendDeclarationDescription(d, result);
appendDeclarationName(d, result);
appendTypeParameters(d, result, true);
appendParametersDescription(d, result);
if (d instanceof TypedDeclaration) {
if (EditorsUI.getPreferenceStore().getBoolean(DISPLAY_RETURN_TYPES)) {
TypedDeclaration td = (TypedDeclaration) d;
if (!td.isParameter() &&
!td.isDynamicallyTyped() &&
!(td instanceof Method && ((Method) td).isDeclaredVoid())) {
ProducedType t = td.getType();
if (t!=null) {
result.append(" ∊ ");
appendTypeName(result, t, Highlights.ARROW_STYLER);
}
}
}
}
/*result.append(" - refines declaration in ")
.append(((Declaration) d.getContainer()).getName());*/
}
return result;
}
private static void appendDeclarationAnnotations(Declaration d,
StyledString result) {
if (d.isActual()) result.append("actual ", Highlights.ANN_STYLER);
if (d.isFormal()) result.append("formal ", Highlights.ANN_STYLER);
if (d.isDefault()) result.append("default ", Highlights.ANN_STYLER);
if (isVariable(d)) result.append("variable ", Highlights.ANN_STYLER);
}
public static void appendPositionalArgs(Declaration dec,
Unit unit, StringBuilder result, boolean includeDefaulted,
boolean descriptionOnly) {
appendPositionalArgs(dec, dec.getReference(),
unit, result, includeDefaulted,
descriptionOnly);
}
private static void appendPositionalArgs(Declaration d, ProducedReference pr,
Unit unit, StringBuilder result, boolean includeDefaulted,
boolean descriptionOnly) {
if (d instanceof Functional) {
List<Parameter> params = getParameters((Functional) d,
includeDefaulted, false);
if (params.isEmpty()) {
result.append("()");
}
else {
boolean paramTypes = descriptionOnly &&
EditorsUI.getPreferenceStore().getBoolean(DISPLAY_PARAMETER_TYPES);
result.append("(");
for (Parameter p: params) {
ProducedTypedReference typedParameter =
pr.getTypedParameter(p);
if (p.getModel() instanceof Functional) {
if (p.isDeclaredVoid()) {
result.append("void ");
}
appendParameters(p.getModel(),
typedParameter,
unit, result,
descriptionOnly);
if (p.isDeclaredVoid()) {
result.append(" {}");
}
else {
result.append(" => ")
.append("nothing");
}
}
else {
ProducedType pt = typedParameter.getType();
if (descriptionOnly && paramTypes && !isTypeUnknown(pt)) {
if (p.isSequenced()) {
pt = unit.getSequentialElementType(pt);
}
result.append(pt.getProducedTypeName(unit));
if (p.isSequenced()) {
result.append(p.isAtLeastOne()?'+':'*');
}
result.append(" ");
}
else if (p.isSequenced()) {
result.append("*");
}
result.append(descriptionOnly || p.getModel()==null ?
p.getName() : escapeName(p.getModel()));
}
result.append(", ");
}
result.setLength(result.length()-2);
result.append(")");
}
}
}
static void appendSuperArgsText(Declaration d, ProducedReference pr,
Unit unit, StringBuilder result, boolean includeDefaulted) {
if (d instanceof Functional) {
List<Parameter> params = getParameters((Functional) d,
includeDefaulted, false);
if (params.isEmpty()) {
result.append("()");
}
else {
result.append("(");
for (Parameter p: params) {
if (p.isSequenced()) {
result.append("*");
}
result.append(escapeName(p.getModel()))
.append(", ");
}
result.setLength(result.length()-2);
result.append(")");
}
}
}
private static List<Parameter> getParameters(Functional fd,
boolean includeDefaults, boolean namedInvocation) {
List<ParameterList> plists = fd.getParameterLists();
if (plists==null || plists.isEmpty()) {
return Collections.<Parameter>emptyList();
}
else {
return CompletionUtil.getParameters(plists.get(0),
includeDefaults, namedInvocation);
}
}
private static void appendNamedArgs(Declaration d, ProducedReference pr,
Unit unit, StringBuilder result, boolean includeDefaulted,
boolean descriptionOnly) {
if (d instanceof Functional) {
List<Parameter> params = getParameters((Functional) d,
includeDefaulted, true);
if (params.isEmpty()) {
result.append(" {}");
}
else {
boolean paramTypes = descriptionOnly &&
EditorsUI.getPreferenceStore().getBoolean(DISPLAY_PARAMETER_TYPES);
result.append(" { ");
for (Parameter p: params) {
String name = descriptionOnly ?
p.getName() :
escapeName(p.getModel());
if (p.getModel() instanceof Functional) {
if (p.isDeclaredVoid()) {
result.append("void ");
}
else {
if (paramTypes && !isTypeUnknown(p.getType())) {
result.append(p.getType().getProducedTypeName(unit)).append(" ");
}
else {
result.append("function ");
}
}
result.append(name);
appendParameters(p.getModel(),
pr.getTypedParameter(p),
unit, result,
descriptionOnly);
if (descriptionOnly) {
result.append("; ");
}
else if (p.isDeclaredVoid()) {
result.append(" {} ");
}
else {
result.append(" => ")
//.append(CeylonQuickFixAssistant.defaultValue(p.getUnit(), p.getType()))
.append("nothing; ");
}
}
else {
if (p==params.get(params.size()-1) &&
!isTypeUnknown(p.getType()) &&
unit.isIterableParameterType(p.getType())) {
// result.append(" ");
}
else {
if (descriptionOnly && paramTypes && !isTypeUnknown(p.getType())) {
result.append(p.getType().getProducedTypeName(unit)).append(" ");
}
result.append(name)
.append(" = ")
//.append(CeylonQuickFixAssistant.defaultValue(p.getUnit(), p.getType()))
.append("nothing")
.append("; ");
}
}
}
result.append("}");
}
}
}
private static void appendTypeParameters(Declaration d,
StringBuilder result) {
appendTypeParameters(d, result, false);
}
private static void appendTypeParameters(Declaration d,
StringBuilder result, boolean variances) {
if (d instanceof Generic) {
List<TypeParameter> types =
((Generic) d).getTypeParameters();
if (!types.isEmpty()) {
result.append("<");
for (TypeParameter tp: types) {
if (variances) {
if (tp.isCovariant()) {
result.append("out ");
}
if (tp.isContravariant()) {
result.append("in ");
}
}
result.append(tp.getName()).append(", ");
}
result.setLength(result.length()-2);
result.append(">");
}
}
}
private static void appendTypeParameters(Declaration d,
ProducedReference pr, StringBuilder result,
boolean variances, Unit unit) {
if (d instanceof Generic) {
List<TypeParameter> types =
((Generic) d).getTypeParameters();
if (!types.isEmpty()) {
result.append("<");
boolean first = true;
for (TypeParameter tp: types) {
if (first) {
first = false;
}
else {
result.append(", ");
}
ProducedType arg = pr==null ?
null : pr.getTypeArguments().get(tp);
if (arg == null) {
if (variances) {
if (tp.isCovariant()) {
result.append("out ");
}
if (tp.isContravariant()) {
result.append("in ");
}
}
result.append(tp.getName());
}
else {
if (pr instanceof ProducedType) {
if (variances) {
SiteVariance variance =
((ProducedType) pr).getVarianceOverrides().get(tp);
if (variance==SiteVariance.IN) {
result.append("in ");
}
if (variance==SiteVariance.OUT) {
result.append("out ");
}
}
}
result.append(arg.getProducedTypeName(unit));
}
}
result.append(">");
}
}
}
private static void appendTypeParameters(Declaration d,
StyledString result, boolean variances) {
if (d instanceof Generic) {
List<TypeParameter> types =
((Generic) d).getTypeParameters();
if (!types.isEmpty()) {
result.append("<");
int len = types.size(), i = 0;
for (TypeParameter tp: types) {
if (variances) {
if (tp.isCovariant()) {
result.append("out ", Highlights.KW_STYLER);
}
if (tp.isContravariant()) {
result.append("in ", Highlights.KW_STYLER);
}
}
result.append(tp.getName(), Highlights.TYPE_STYLER);
if (++i<len) result.append(", ");
}
result.append(">");
}
}
}
private static void appendDeclarationHeaderDescription(Declaration d,
Unit unit, StringBuilder result) {
appendDeclarationHeader(d, null, unit, result, true);
}
private static void appendDeclarationHeaderDescription(Declaration d,
ProducedReference pr, Unit unit, StringBuilder result) {
appendDeclarationHeader(d, pr, unit, result, true);
}
private static void appendDeclarationHeaderText(Declaration d,
ProducedReference pr, Unit unit, StringBuilder result) {
appendDeclarationHeader(d, pr, unit, result, false);
}
private static void appendDeclarationHeader(Declaration d,
ProducedReference pr, Unit unit,
StringBuilder result,
boolean descriptionOnly) {
if (d instanceof Class) {
if (d.isAnonymous()) {
result.append("object");
}
else {
result.append("class");
}
}
else if (d instanceof Interface) {
result.append("interface");
}
else if (d instanceof TypeAlias) {
result.append("alias");
}
else if (d instanceof TypedDeclaration) {
TypedDeclaration td = (TypedDeclaration) d;
boolean isSequenced = d.isParameter() &&
((MethodOrValue) d).getInitializerParameter()
.isSequenced();
ProducedType type;
if (pr == null) {
type = td.getType();
}
else {
type = pr.getType();
}
if (isSequenced && type!=null) {
type = unit.getIteratedType(type);
}
if (type==null) {
type = new UnknownType(unit).getType();
}
String typeName = type.getProducedTypeName(unit);
if (td.isDynamicallyTyped()) {
result.append("dynamic");
}
else if (td instanceof Value &&
type.getDeclaration().isAnonymous()) {
result.append("object");
}
else if (d instanceof Method) {
if (((Functional) d).isDeclaredVoid()) {
result.append("void");
}
else {
result.append(typeName);
}
}
else {
result.append(typeName);
}
if (isSequenced) {
if (((MethodOrValue) d).getInitializerParameter()
.isAtLeastOne()) {
result.append("+");
}
else {
result.append("*");
}
}
}
result.append(" ")
.append(descriptionOnly ?
d.getName() : escapeName(d));
}
private static void appendNamedArgumentHeader(Parameter p,
ProducedReference pr, StringBuilder result,
boolean descriptionOnly) {
if (p.getModel() instanceof Functional) {
Functional fp = (Functional) p.getModel();
result.append(fp.isDeclaredVoid() ? "void" : "function");
}
else {
result.append("value");
}
result.append(" ")
.append(descriptionOnly ?
p.getName() : escapeName(p.getModel()));
}
private static void appendDeclarationDescription(Declaration d,
StyledString result) {
if (d instanceof Class) {
if (d.isAnonymous()) {
result.append("object", Highlights.KW_STYLER);
}
else {
result.append("class", Highlights.KW_STYLER);
}
}
else if (d instanceof Interface) {
result.append("interface", Highlights.KW_STYLER);
}
else if (d instanceof TypeAlias) {
result.append("alias", Highlights.KW_STYLER);
}
else if (d.isParameter()) {
TypedDeclaration td = (TypedDeclaration) d;
ProducedType type = td.getType();
if (td.isDynamicallyTyped()) {
result.append("dynamic", Highlights.KW_STYLER);
}
else if (type!=null) {
boolean isSequenced = //d.isParameter() &&
((MethodOrValue) d).getInitializerParameter()
.isSequenced();
if (isSequenced) {
type = d.getUnit().getIteratedType(type);
}
/*if (td instanceof Value &&
td.getTypeDeclaration().isAnonymous()) {
result.append("object", KW_STYLER);
}
else*/ if (d instanceof Method) {
if (((Functional)d).isDeclaredVoid()) {
result.append("void", Highlights.KW_STYLER);
}
else {
appendTypeName(result, type);
}
}
else {
appendTypeName(result, type);
}
if (isSequenced) {
result.append("*");
}
}
}
else if (d instanceof Value) {
Value v = (Value) d;
if (v.isDynamicallyTyped()) {
result.append("dynamic", Highlights.KW_STYLER);
}
else if (v.getTypeDeclaration()!=null &&
v.getTypeDeclaration().isAnonymous()) {
result.append("object", Highlights.KW_STYLER);
}
else {
result.append("value", Highlights.KW_STYLER);
}
}
else if (d instanceof Method) {
Method m = (Method) d;
if (m.isDynamicallyTyped()) {
result.append("dynamic", Highlights.KW_STYLER);
}
else if (m.isDeclaredVoid()) {
result.append("void", Highlights.KW_STYLER);
}
else {
result.append("function", Highlights.KW_STYLER);
}
}
else if (d instanceof Setter) {
result.append("assign", Highlights.KW_STYLER);
}
result.append(" ");
}
private static void appendMemberName(Declaration d, StyledString result) {
String name = d.getName();
if (name != null) {
if (d instanceof TypeDeclaration) {
result.append(name, Highlights.TYPE_STYLER);
}
else {
result.append(name, Highlights.MEMBER_STYLER);
}
}
}
private static void appendDeclarationName(Declaration d, StyledString result) {
String name = d.getName();
if (name != null) {
if (d instanceof TypeDeclaration) {
result.append(name, Highlights.TYPE_STYLER);
}
else {
result.append(name, Highlights.ID_STYLER);
}
}
}
/*private static void appendPackage(Declaration d, StringBuilder result) {
if (d.isToplevel()) {
result.append(" - ").append(getPackageLabel(d));
}
if (d.isClassOrInterfaceMember()) {
result.append(" - ");
ClassOrInterface td = (ClassOrInterface) d.getContainer();
result.append( td.getName() );
appendPackage(td, result);
}
}*/
private static void appendImplText(Declaration d, ProducedReference pr,
boolean isInterface, Unit unit, String indent, StringBuilder result,
ClassOrInterface ci) {
if (d instanceof Method) {
if (ci!=null && !ci.isAnonymous()) {
if (d.getName().equals("equals")) {
List<ParameterList> pl = ((Method) d).getParameterLists();
if (!pl.isEmpty()) {
List<Parameter> ps = pl.get(0).getParameters();
if (!ps.isEmpty()) {
appendEqualsImpl(unit, indent, result, ci, ps);
return;
}
}
}
}
if (!d.isFormal()) {
result.append(" => super.").append(d.getName());
appendSuperArgsText(d, pr, unit, result, true);
result.append(";");
}
else {
if (((Functional) d).isDeclaredVoid()) {
result.append(" {}");
}
else {
result.append(" => nothing;");
}
}
}
else if (d instanceof Value) {
if (ci!=null && !ci.isAnonymous()) {
if (d.getName().equals("hash")) {
appendHashImpl(unit, indent, result, ci);
return;
}
}
if (isInterface/*||d.isParameter()*/) {
//interfaces can't have references,
//so generate a setter for variables
if (d.isFormal()) {
result.append(" => nothing;");
}
else {
result.append(" => super.")
.append(d.getName()).append(";");
}
if (isVariable(d)) {
result.append(indent)
.append("assign ").append(d.getName())
.append(" {}");
}
}
else {
//we can have a references, so use = instead
//of => for variables
String arrow = isVariable(d) ? " = " : " => ";
if (d.isFormal()) {
result.append(arrow).append("nothing;");
}
else {
result.append(arrow)
.append("super.").append(d.getName())
.append(";");
}
}
}
else {
//TODO: in the case of a class, formal member refinements!
result.append(" {}");
}
}
private static void appendHashImpl(Unit unit, String indent,
StringBuilder result, ClassOrInterface ci) {
result.append(" {")
.append(indent).append(getDefaultIndent())
.append("variable value hash = 1;")
.append(indent).append(getDefaultIndent());
String ind = indent+getDefaultIndent();
appendMembersToHash(unit, ind, result, ci);
result.append("return hash;")
.append(indent)
.append("}");
}
private static void appendEqualsImpl(Unit unit, String indent,
StringBuilder result, ClassOrInterface ci, List<Parameter> ps) {
Parameter p = ps.get(0);
result.append(" {")
.append(indent).append(getDefaultIndent())
.append("if (is ").append(ci.getName()).append(" ").append(p.getName()).append(") {")
.append(indent).append(getDefaultIndent()).append(getDefaultIndent())
.append("return ");
String ind = indent+getDefaultIndent()+getDefaultIndent()+getDefaultIndent();
appendMembersToEquals(unit, ind, result, ci, p);
result.append(indent).append(getDefaultIndent())
.append("}")
.append(indent).append(getDefaultIndent())
.append("else {")
.append(indent).append(getDefaultIndent()).append(getDefaultIndent())
.append("return false;")
.append(indent).append(getDefaultIndent())
.append("}")
.append(indent)
.append("}");
}
private static boolean isObjectField(Declaration m) {
return m.getName()!=null &&
m.getName().equals("hash") ||
m.getName().equals("string");
}
private static void appendMembersToEquals(Unit unit, String indent,
StringBuilder result, ClassOrInterface ci, Parameter p) {
boolean found = false;
for (Declaration m: ci.getMembers()) {
if (m instanceof Value &&
!isObjectField(m)) {
Value value = (Value) m;
if (!value.isTransient()) {
if (!unit.getNullValueDeclaration().getType()
.isSubtypeOf(value.getType())) {
result.append(value.getName())
.append("==")
.append(p.getName())
.append(".")
.append(value.getName())
.append(" && ")
.append(indent);
found = true;
}
}
}
}
if (found) {
result.setLength(result.length()-4-indent.length());
result.append(";");
}
else {
result.append("true;");
}
}
private static void appendMembersToHash(Unit unit, String indent,
StringBuilder result, ClassOrInterface ci) {
for (Declaration m: ci.getMembers()) {
if (m instanceof Value &&
!isObjectField(m)) {
Value value = (Value) m;
if (!value.isTransient()) {
if (!unit.getNullValueDeclaration().getType()
.isSubtypeOf(value.getType())) {
result.append("hash = 31*hash + ")
.append(value.getName())
.append(".hash;")
.append(indent);
}
}
}
}
}
private static String extraIndent(String indent, boolean containsNewline) {
return containsNewline ? indent + getDefaultIndent() : indent;
}
public static void appendParametersDescription(Declaration d, StringBuilder result,
CeylonParseController cpc) {
appendParameters(d, null, d.getUnit(), result, cpc, true);
}
public static void appendParametersText(Declaration d, ProducedReference pr,
Unit unit, StringBuilder result) {
appendParameters(d, pr, unit, result, null, false);
}
private static void appendParametersDescription(Declaration d, ProducedReference pr,
Unit unit, StringBuilder result) {
appendParameters(d, pr, unit, result, null, true);
}
private static void appendParameters(Declaration d, ProducedReference pr,
Unit unit, StringBuilder result, boolean descriptionOnly) {
appendParameters(d, pr, unit, result, null, descriptionOnly);
}
private static void appendParameters(Declaration d, ProducedReference pr,
Unit unit, StringBuilder result, CeylonParseController cpc,
boolean descriptionOnly) {
if (d instanceof Functional) {
List<ParameterList> plists = ((Functional) d).getParameterLists();
if (plists!=null) {
for (ParameterList params: plists) {
if (params.getParameters().isEmpty()) {
result.append("()");
}
else {
result.append("(");
for (Parameter p: params.getParameters()) {
appendParameter(result, pr, p, unit,
descriptionOnly);
if (cpc!=null) {
result.append(getDefaultValueDescription(p, cpc));
}
result.append(", ");
}
result.setLength(result.length()-2);
result.append(")");
}
}
}
}
}
public static void appendParameterText(StringBuilder result,
ProducedReference pr, Parameter p, Unit unit) {
appendParameter(result, pr, p, unit, false);
}
private static void appendParameter(StringBuilder result,
ProducedReference pr, Parameter p, Unit unit,
boolean descriptionOnly) {
if (p.getModel() == null) {
result.append(p.getName());
}
else {
ProducedTypedReference ppr = pr==null ?
null : pr.getTypedParameter(p);
appendDeclarationHeader(p.getModel(), ppr, unit, result,
descriptionOnly);
appendParameters(p.getModel(), ppr, unit, result,
descriptionOnly);
}
}
public static void appendParameterContextInfo(StringBuilder result,
ProducedReference pr, Parameter p, Unit unit,
boolean namedInvocation, boolean isListedValues) {
if (p.getModel() == null) {
result.append(p.getName());
}
else {
ProducedTypedReference ppr = pr==null ?
null : pr.getTypedParameter(p);
String typeName;
ProducedType type = ppr.getType();
if (isListedValues && namedInvocation) {
ProducedType et = unit.getIteratedType(type);
typeName = et.getProducedTypeName(unit);
if (unit.isEntryType(et)) {
typeName = '<' + typeName + '>';
}
typeName += unit.isNonemptyIterableType(type) ? '+' : '*';
}
else if (p.isSequenced() && !namedInvocation) {
ProducedType et = unit.getSequentialElementType(type);
typeName = et.getProducedTypeName(unit);
if (unit.isEntryType(et)) {
typeName = '<' + typeName + '>';
}
typeName += p.isAtLeastOne() ? '+' : '*';
}
else {
typeName = type.getProducedTypeName(unit);
}
result.append(typeName).append(" ").append(p.getName());
appendParametersDescription(p.getModel(), ppr, unit, result);
}
if (namedInvocation && !isListedValues) {
result.append(p.getModel() instanceof Method ?
" => ... " : " = ... " );
}
}
private static void appendParametersDescription(Declaration d, StyledString result) {
if (d instanceof Functional) {
List<ParameterList> plists = ((Functional) d).getParameterLists();
if (plists!=null) {
for (ParameterList params: plists) {
if (params.getParameters().isEmpty()) {
result.append("()");
}
else {
result.append("(");
int len = params.getParameters().size(), i=0;
for (Parameter p: params.getParameters()) {
if (p.getModel()==null) {
result.append(p.getName());
}
else {
appendDeclarationDescription(p.getModel(), result);
appendDeclarationName(p.getModel(), result);
appendParametersDescription(p.getModel(), result);
/*result.append(p.getType().getProducedTypeName(), TYPE_STYLER)
.append(" ").append(p.getName(), ID_STYLER);
if (p instanceof FunctionalParameter) {
result.append("(");
FunctionalParameter fp = (FunctionalParameter) p;
List<Parameter> fpl = fp.getParameterLists().get(0).getParameters();
int len2 = fpl.size(), j=0;
for (Parameter pp: fpl) {
result.append(pp.getType().getProducedTypeName(), TYPE_STYLER)
.append(" ").append(pp.getName(), ID_STYLER);
if (++j<len2) result.append(", ");
}
result.append(")");
}*/
}
if (++i<len) result.append(", ");
}
result.append(")");
}
}
}
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_CodeCompletions.java
|
480 |
public class AnalyzeAction extends IndicesAction<AnalyzeRequest, AnalyzeResponse, AnalyzeRequestBuilder> {
public static final AnalyzeAction INSTANCE = new AnalyzeAction();
public static final String NAME = "indices/analyze";
private AnalyzeAction() {
super(NAME);
}
@Override
public AnalyzeResponse newResponse() {
return new AnalyzeResponse();
}
@Override
public AnalyzeRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new AnalyzeRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeAction.java
|
435 |
public class ClientTopicProxy<E> extends ClientProxy implements ITopic<E> {
private final String name;
private volatile Data key;
public ClientTopicProxy(String instanceName, String serviceName, String objectId) {
super(instanceName, serviceName, objectId);
this.name = objectId;
}
@Override
public void publish(E message) {
SerializationService serializationService = getContext().getSerializationService();
final Data data = serializationService.toData(message);
PublishRequest request = new PublishRequest(name, data);
invoke(request);
}
@Override
public String addMessageListener(final MessageListener<E> listener) {
AddMessageListenerRequest request = new AddMessageListenerRequest(name);
EventHandler<PortableMessage> handler = new EventHandler<PortableMessage>() {
@Override
public void handle(PortableMessage event) {
SerializationService serializationService = getContext().getSerializationService();
ClientClusterService clusterService = getContext().getClusterService();
E messageObject = serializationService.toObject(event.getMessage());
Member member = clusterService.getMember(event.getUuid());
Message<E> message = new Message<E>(name, messageObject, event.getPublishTime(), member);
listener.onMessage(message);
}
@Override
public void onListenerRegister() {
}
};
return listen(request, getKey(), handler);
}
@Override
public boolean removeMessageListener(String registrationId) {
final RemoveMessageListenerRequest request = new RemoveMessageListenerRequest(name, registrationId);
return stopListening(request, registrationId);
}
@Override
public LocalTopicStats getLocalTopicStats() {
throw new UnsupportedOperationException("Locality is ambiguous for client!!!");
}
@Override
protected void onDestroy() {
}
private Data getKey() {
if (key == null) {
key = getContext().getSerializationService().toData(name);
}
return key;
}
@Override
protected <T> T invoke(ClientRequest req) {
return super.invoke(req, getKey());
}
@Override
public String toString() {
return "ITopic{" + "name='" + getName() + '\'' + '}';
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientTopicProxy.java
|
198 |
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName,
Reader reader) {
Tokenizer t = new WhitespaceTokenizer(Lucene.VERSION, reader);
return new TokenStreamComponents(t, new UniqueTokenFilter(t));
}
};
| 0true
|
src_test_java_org_apache_lucene_analysis_miscellaneous_UniqueTokenFilterTests.java
|
661 |
class ShardValidateQueryRequest extends BroadcastShardOperationRequest {
private BytesReference source;
private String[] types = Strings.EMPTY_ARRAY;
private boolean explain;
private long nowInMillis;
@Nullable
private String[] filteringAliases;
ShardValidateQueryRequest() {
}
public ShardValidateQueryRequest(String index, int shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) {
super(index, shardId, request);
this.source = request.source();
this.types = request.types();
this.explain = request.explain();
this.filteringAliases = filteringAliases;
this.nowInMillis = request.nowInMillis;
}
public BytesReference source() {
return source;
}
public String[] types() {
return this.types;
}
public boolean explain() {
return this.explain;
}
public String[] filteringAliases() {
return filteringAliases;
}
public long nowInMillis() {
return this.nowInMillis;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
source = in.readBytesReference();
int typesSize = in.readVInt();
if (typesSize > 0) {
types = new String[typesSize];
for (int i = 0; i < typesSize; i++) {
types[i] = in.readString();
}
}
int aliasesSize = in.readVInt();
if (aliasesSize > 0) {
filteringAliases = new String[aliasesSize];
for (int i = 0; i < aliasesSize; i++) {
filteringAliases[i] = in.readString();
}
}
explain = in.readBoolean();
nowInMillis = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBytesReference(source);
out.writeVInt(types.length);
for (String type : types) {
out.writeString(type);
}
if (filteringAliases != null) {
out.writeVInt(filteringAliases.length);
for (String alias : filteringAliases) {
out.writeString(alias);
}
} else {
out.writeVInt(0);
}
out.writeBoolean(explain);
out.writeVLong(nowInMillis);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_validate_query_ShardValidateQueryRequest.java
|
2,857 |
@edu.umd.cs.findbugs.annotations.SuppressWarnings("EI_EXPOSE_REP")
public class ReplicaSyncResponse extends Operation
implements PartitionAwareOperation, BackupOperation, UrgentSystemOperation {
private byte[] data;
private long[] replicaVersions;
private boolean compressed;
public ReplicaSyncResponse() {
}
public ReplicaSyncResponse(byte[] data, long[] replicaVersions, boolean compressed) {
this.data = data;
this.replicaVersions = replicaVersions;
this.compressed = compressed;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
InternalPartitionServiceImpl partitionService = (InternalPartitionServiceImpl) nodeEngine.getPartitionService();
SerializationService serializationService = nodeEngine.getSerializationService();
int partitionId = getPartitionId();
int replicaIndex = getReplicaIndex();
BufferObjectDataInput in = null;
try {
if (data != null && data.length > 0) {
logApplyReplicaSync(partitionId, replicaIndex);
byte[] taskData = compressed ? IOUtil.decompress(data) : data;
in = serializationService.createObjectDataInput(taskData);
int size = in.readInt();
for (int i = 0; i < size; i++) {
Operation op = (Operation) serializationService.readObject(in);
try {
ErrorLoggingResponseHandler responseHandler
= new ErrorLoggingResponseHandler(nodeEngine.getLogger(op.getClass()));
op.setNodeEngine(nodeEngine)
.setPartitionId(partitionId)
.setReplicaIndex(replicaIndex)
.setResponseHandler(responseHandler);
op.beforeRun();
op.run();
op.afterRun();
} catch (Throwable e) {
logException(op, e);
}
}
}
} finally {
closeResource(in);
partitionService.finalizeReplicaSync(partitionId, replicaVersions);
}
}
private void logException(Operation op, Throwable e) {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
ILogger logger = nodeEngine.getLogger(getClass());
Level level = nodeEngine.isActive() ? Level.WARNING : Level.FINEST;
if (logger.isLoggable(level)) {
logger.log(level, "While executing " + op, e);
}
}
private void logApplyReplicaSync(int partitionId, int replicaIndex) {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
ILogger logger = nodeEngine.getLogger(getClass());
if (logger.isFinestEnabled()) {
logger.finest("Applying replica sync for partition: " + partitionId + ", replica: " + replicaIndex);
}
}
@Override
public void afterRun() throws Exception {
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
public Object getResponse() {
return null;
}
@Override
public boolean validatesTarget() {
return true;
}
@Override
public void logError(Throwable e) {
ReplicaErrorLogger.log(e, getLogger());
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
IOUtil.writeByteArray(out, data);
out.writeLongArray(replicaVersions);
out.writeBoolean(compressed);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
data = IOUtil.readByteArray(in);
replicaVersions = in.readLongArray();
compressed = in.readBoolean();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("ReplicaSyncResponse");
sb.append("{partition=").append(getPartitionId());
sb.append(", replica=").append(getReplicaIndex());
sb.append(", version=").append(Arrays.toString(replicaVersions));
sb.append('}');
return sb.toString();
}
private static final class ErrorLoggingResponseHandler implements ResponseHandler {
private final ILogger logger;
private ErrorLoggingResponseHandler(ILogger logger) {
this.logger = logger;
}
@Override
public void sendResponse(final Object obj) {
if (obj instanceof Throwable) {
Throwable t = (Throwable) obj;
logger.severe(t);
}
}
@Override
public boolean isLocal() {
return true;
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_ReplicaSyncResponse.java
|
1,451 |
public class TimestampsRegionCache extends LocalRegionCache implements RegionCache {
public TimestampsRegionCache(final String name, final HazelcastInstance hazelcastInstance) {
super(name, hazelcastInstance, null);
}
@Override
public boolean put(Object key, Object value, Object currentVersion) {
return update(key, value, currentVersion, null, null);
}
@Override
protected MessageListener<Object> createMessageListener() {
return new MessageListener<Object>() {
public void onMessage(final Message<Object> message) {
final Timestamp ts = (Timestamp) message.getMessageObject();
final Object key = ts.getKey();
for (;;) {
final Value value = cache.get(key);
final Long current = value != null ? (Long) value.getValue() : null;
if (current != null) {
if (ts.getTimestamp() > current) {
if (cache.replace(key, value, new Value(value.getVersion(),
ts.getTimestamp(), Clock.currentTimeMillis()))) {
return;
}
} else {
return;
}
} else {
if (cache.putIfAbsent(key, new Value(null, ts.getTimestamp(),
Clock.currentTimeMillis())) == null) {
return;
}
}
}
}
};
}
@Override
protected Object createMessage(final Object key, final Object value, final Object currentVersion) {
return new Timestamp(key, (Long) value);
}
final void cleanup() {
}
}
| 1no label
|
hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_local_TimestampsRegionCache.java
|
140 |
private class MyDistributedObjectListener implements DistributedObjectListener {
@Override
public void distributedObjectCreated(DistributedObjectEvent event) {
send(event);
}
@Override
public void distributedObjectDestroyed(DistributedObjectEvent event) {
}
private void send(DistributedObjectEvent event) {
if (endpoint.live()) {
PortableDistributedObjectEvent portableEvent = new PortableDistributedObjectEvent(
event.getEventType(), event.getDistributedObject().getName(), event.getServiceName());
endpoint.sendEvent(portableEvent, getCallId());
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_client_DistributedObjectListenerRequest.java
|
1,484 |
public class RoutingTable implements Iterable<IndexRoutingTable> {
public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build();
private final long version;
// index to IndexRoutingTable map
private final ImmutableMap<String, IndexRoutingTable> indicesRouting;
RoutingTable(long version, Map<String, IndexRoutingTable> indicesRouting) {
this.version = version;
this.indicesRouting = ImmutableMap.copyOf(indicesRouting);
}
/**
* Returns the version of the {@link RoutingTable}.
*
* @return version of the {@link RoutingTable}
*/
public long version() {
return this.version;
}
@Override
public UnmodifiableIterator<IndexRoutingTable> iterator() {
return indicesRouting.values().iterator();
}
public boolean hasIndex(String index) {
return indicesRouting.containsKey(index);
}
public IndexRoutingTable index(String index) {
return indicesRouting.get(index);
}
public Map<String, IndexRoutingTable> indicesRouting() {
return indicesRouting;
}
public Map<String, IndexRoutingTable> getIndicesRouting() {
return indicesRouting();
}
public RoutingNodes routingNodes(ClusterState state) {
return new RoutingNodes(state);
}
public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
RoutingTableValidation validation = validate(metaData);
if (!validation.valid()) {
throw new RoutingValidationException(validation);
}
return this;
}
public RoutingTableValidation validate(MetaData metaData) {
RoutingTableValidation validation = new RoutingTableValidation();
for (IndexRoutingTable indexRoutingTable : this) {
indexRoutingTable.validate(validation, metaData);
}
return validation;
}
public List<ShardRouting> shardsWithState(ShardRoutingState... states) {
List<ShardRouting> shards = newArrayList();
for (IndexRoutingTable indexRoutingTable : this) {
shards.addAll(indexRoutingTable.shardsWithState(states));
}
return shards;
}
/**
* All the shards (replicas) for the provided indices.
*
* @param indices The indices to return all the shards (replicas), can be <tt>null</tt> or empty array to indicate all indices
* @return All the shards matching the specific index
* @throws IndexMissingException If an index passed does not exists
*/
public List<ShardRouting> allShards(String... indices) throws IndexMissingException {
List<ShardRouting> shards = Lists.newArrayList();
if (indices == null || indices.length == 0) {
indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
}
for (String index : indices) {
IndexRoutingTable indexRoutingTable = index(index);
if (indexRoutingTable == null) {
throw new IndexMissingException(new Index(index));
}
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
shards.add(shardRouting);
}
}
}
return shards;
}
/**
* All the shards (primary + replicas) for the provided indices grouped (each group is a single element, consisting
* of the shard). This is handy for components that expect to get group iterators, but still want in some
* cases to iterate over all the shards (and not just one shard in replication group).
*
* @param indices The indices to return all the shards (replicas), can be <tt>null</tt> or empty array to indicate all indices
* @return All the shards grouped into a single shard element group each
* @throws IndexMissingException If an index passed does not exists
* @see IndexRoutingTable#groupByAllIt()
*/
public GroupShardsIterator allShardsGrouped(String... indices) throws IndexMissingException {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
if (indices == null || indices.length == 0) {
indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
}
for (String index : indices) {
IndexRoutingTable indexRoutingTable = index(index);
if (indexRoutingTable == null) {
continue;
// we simply ignore indices that don't exists (make sense for operations that use it currently)
// throw new IndexMissingException(new Index(index));
}
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
set.add(shardRouting.shardsIt());
}
}
}
return new GroupShardsIterator(set);
}
public GroupShardsIterator allActiveShardsGrouped(String[] indices, boolean includeEmpty) throws IndexMissingException {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
if (indices == null || indices.length == 0) {
indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
}
for (String index : indices) {
IndexRoutingTable indexRoutingTable = index(index);
if (indexRoutingTable == null) {
continue;
// we simply ignore indices that don't exists (make sense for operations that use it currently)
// throw new IndexMissingException(new Index(index));
}
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (shardRouting.active()) {
set.add(shardRouting.shardsIt());
} else if (includeEmpty) { // we need this for counting properly, just make it an empty one
set.add(new PlainShardIterator(shardRouting.shardId(), ImmutableList.<ShardRouting>of()));
}
}
}
}
return new GroupShardsIterator(set);
}
public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty) throws IndexMissingException {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
if (indices == null || indices.length == 0) {
indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
}
for (String index : indices) {
IndexRoutingTable indexRoutingTable = index(index);
if (indexRoutingTable == null) {
continue;
// we simply ignore indices that don't exists (make sense for operations that use it currently)
// throw new IndexMissingException(new Index(index));
}
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (shardRouting.assignedToNode()) {
set.add(shardRouting.shardsIt());
} else if (includeEmpty) { // we need this for counting properly, just make it an empty one
set.add(new PlainShardIterator(shardRouting.shardId(), ImmutableList.<ShardRouting>of()));
}
}
}
}
return new GroupShardsIterator(set);
}
/**
* All the *active* primary shards for the provided indices grouped (each group is a single element, consisting
* of the primary shard). This is handy for components that expect to get group iterators, but still want in some
* cases to iterate over all primary shards (and not just one shard in replication group).
*
* @param indices The indices to return all the shards (replicas), can be <tt>null</tt> or empty array to indicate all indices
* @return All the primary shards grouped into a single shard element group each
* @throws IndexMissingException If an index passed does not exists
* @see IndexRoutingTable#groupByAllIt()
*/
public GroupShardsIterator activePrimaryShardsGrouped(String[] indices, boolean includeEmpty) throws IndexMissingException {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<ShardIterator>();
if (indices == null || indices.length == 0) {
indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
}
for (String index : indices) {
IndexRoutingTable indexRoutingTable = index(index);
if (indexRoutingTable == null) {
throw new IndexMissingException(new Index(index));
}
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
ShardRouting primary = indexShardRoutingTable.primaryShard();
if (primary.active()) {
set.add(primary.shardsIt());
} else if (includeEmpty) { // we need this for counting properly, just make it an empty one
set.add(new PlainShardIterator(primary.shardId(), ImmutableList.<ShardRouting>of()));
}
}
}
return new GroupShardsIterator(set);
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(RoutingTable routingTable) {
return new Builder(routingTable);
}
public static class Builder {
private long version;
private final Map<String, IndexRoutingTable> indicesRouting = newHashMap();
public Builder() {
}
public Builder(RoutingTable routingTable) {
version = routingTable.version;
for (IndexRoutingTable indexRoutingTable : routingTable) {
indicesRouting.put(indexRoutingTable.index(), indexRoutingTable);
}
}
public Builder updateNodes(RoutingNodes routingNodes) {
// this is being called without pre initializing the routing table, so we must copy over the version as well
this.version = routingNodes.routingTable().version();
Map<String, IndexRoutingTable.Builder> indexRoutingTableBuilders = newHashMap();
for (RoutingNode routingNode : routingNodes) {
for (MutableShardRouting shardRoutingEntry : routingNode) {
// every relocating shard has a double entry, ignore the target one.
if (shardRoutingEntry.state() == ShardRoutingState.INITIALIZING && shardRoutingEntry.relocatingNodeId() != null)
continue;
String index = shardRoutingEntry.index();
IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index);
if (indexBuilder == null) {
indexBuilder = new IndexRoutingTable.Builder(index);
indexRoutingTableBuilders.put(index, indexBuilder);
}
IndexShardRoutingTable refData = routingNodes.routingTable().index(shardRoutingEntry.index()).shard(shardRoutingEntry.id());
indexBuilder.addShard(refData, shardRoutingEntry);
}
}
for (MutableShardRouting shardRoutingEntry : Iterables.concat(routingNodes.unassigned(), routingNodes.ignoredUnassigned())) {
String index = shardRoutingEntry.index();
IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index);
if (indexBuilder == null) {
indexBuilder = new IndexRoutingTable.Builder(index);
indexRoutingTableBuilders.put(index, indexBuilder);
}
IndexShardRoutingTable refData = routingNodes.routingTable().index(shardRoutingEntry.index()).shard(shardRoutingEntry.id());
indexBuilder.addShard(refData, shardRoutingEntry);
}
for (ShardId shardId : routingNodes.getShardsToClearPostAllocationFlag()) {
IndexRoutingTable.Builder indexRoutingBuilder = indexRoutingTableBuilders.get(shardId.index().name());
if (indexRoutingBuilder != null) {
indexRoutingBuilder.clearPostAllocationFlag(shardId);
}
}
for (IndexRoutingTable.Builder indexBuilder : indexRoutingTableBuilders.values()) {
add(indexBuilder);
}
return this;
}
public Builder updateNumberOfReplicas(int numberOfReplicas, String... indices) throws IndexMissingException {
if (indices == null || indices.length == 0) {
indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
}
for (String index : indices) {
IndexRoutingTable indexRoutingTable = indicesRouting.get(index);
if (indexRoutingTable == null) {
// ignore index missing failure, its closed...
continue;
}
int currentNumberOfReplicas = indexRoutingTable.shards().get(0).size() - 1; // remove the required primary
IndexRoutingTable.Builder builder = new IndexRoutingTable.Builder(index);
// re-add all the shards
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
builder.addIndexShard(indexShardRoutingTable);
}
if (currentNumberOfReplicas < numberOfReplicas) {
// now, add "empty" ones
for (int i = 0; i < (numberOfReplicas - currentNumberOfReplicas); i++) {
builder.addReplica();
}
} else if (currentNumberOfReplicas > numberOfReplicas) {
int delta = currentNumberOfReplicas - numberOfReplicas;
if (delta <= 0) {
// ignore, can't remove below the current one...
} else {
for (int i = 0; i < delta; i++) {
builder.removeReplica();
}
}
}
indicesRouting.put(index, builder.build());
}
return this;
}
public Builder addAsNew(IndexMetaData indexMetaData) {
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsNew(indexMetaData);
add(indexRoutingBuilder);
}
return this;
}
public Builder addAsRecovery(IndexMetaData indexMetaData) {
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsRecovery(indexMetaData);
add(indexRoutingBuilder);
}
return this;
}
public Builder addAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsRestore(indexMetaData, restoreSource);
add(indexRoutingBuilder);
return this;
}
public Builder addAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsNewRestore(indexMetaData, restoreSource);
add(indexRoutingBuilder);
return this;
}
public Builder add(IndexRoutingTable indexRoutingTable) {
indexRoutingTable.validate();
indicesRouting.put(indexRoutingTable.index(), indexRoutingTable);
return this;
}
public Builder add(IndexRoutingTable.Builder indexRoutingTableBuilder) {
add(indexRoutingTableBuilder.build());
return this;
}
public Builder remove(String index) {
indicesRouting.remove(index);
return this;
}
public Builder version(long version) {
this.version = version;
return this;
}
public RoutingTable build() {
// normalize the versions right before we build it...
for (IndexRoutingTable indexRoutingTable : indicesRouting.values()) {
indicesRouting.put(indexRoutingTable.index(), indexRoutingTable.normalizeVersions());
}
return new RoutingTable(version, indicesRouting);
}
public static RoutingTable readFrom(StreamInput in) throws IOException {
Builder builder = new Builder();
builder.version = in.readLong();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in);
builder.add(index);
}
return builder.build();
}
public static void writeTo(RoutingTable table, StreamOutput out) throws IOException {
out.writeLong(table.version);
out.writeVInt(table.indicesRouting.size());
for (IndexRoutingTable index : table.indicesRouting.values()) {
IndexRoutingTable.Builder.writeTo(index, out);
}
}
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder("routing_table:\n");
for (Map.Entry<String, IndexRoutingTable> entry : indicesRouting.entrySet()) {
sb.append(entry.getValue().prettyPrint()).append('\n');
}
return sb.toString();
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_RoutingTable.java
|
2,159 |
public class TxnSetOperation extends BasePutOperation implements MapTxnOperation {
private long version;
private transient boolean shouldBackup;
private String ownerUuid;
public TxnSetOperation() {
}
public TxnSetOperation(String name, Data dataKey, Data value, long version) {
super(name, dataKey, value);
this.version = version;
}
public TxnSetOperation(String name, Data dataKey, Data value, long version, long ttl) {
super(name, dataKey, value);
this.version = version;
this.ttl = ttl;
}
@Override
public boolean shouldWait() {
return !recordStore.canAcquireLock(dataKey, ownerUuid, getThreadId());
}
@Override
public void run() {
recordStore.unlock(dataKey, ownerUuid, getThreadId());
Record record = recordStore.getRecord(dataKey);
if (record == null || version == record.getVersion()) {
recordStore.set(dataKey, dataValue, ttl);
shouldBackup = true;
}
}
public long getVersion() {
return version;
}
public void setVersion(long version) {
this.version = version;
}
@Override
public void setOwnerUuid(String ownerUuid) {
this.ownerUuid = ownerUuid;
}
@Override
public Object getResponse() {
return Boolean.TRUE;
}
public boolean shouldNotify() {
return true;
}
public Operation getBackupOperation() {
RecordInfo replicationInfo = mapService.createRecordInfo(recordStore.getRecord(dataKey));
return new PutBackupOperation(name, dataKey, dataValue, replicationInfo, true);
}
public void onWaitExpire() {
final ResponseHandler responseHandler = getResponseHandler();
responseHandler.sendResponse(false);
}
@Override
public boolean shouldBackup() {
return shouldBackup;
}
public WaitNotifyKey getNotifiedKey() {
return getWaitKey();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(version);
out.writeUTF(ownerUuid);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
version = in.readLong();
ownerUuid = in.readUTF();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_tx_TxnSetOperation.java
|
4,678 |
final static class Match extends QueryCollector {
final PercolateContext context;
final HighlightPhase highlightPhase;
final List<BytesRef> matches = new ArrayList<BytesRef>();
final List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
final boolean limit;
final int size;
long counter = 0;
Match(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase) {
super(logger, context);
this.limit = context.limit;
this.size = context.size;
this.context = context;
this.highlightPhase = highlightPhase;
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
// run the query
try {
collector.reset();
if (context.highlight() != null) {
context.parsedQuery(new ParsedQuery(query, ImmutableMap.<String, Filter>of()));
context.hitContext().cache().clear();
}
searcher.search(query, collector);
if (collector.exists()) {
if (!limit || counter < size) {
matches.add(values.copyShared());
if (context.highlight() != null) {
highlightPhase.hitExecute(context, context.hitContext());
hls.add(context.hitContext().hit().getHighlightFields());
}
}
counter++;
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.collect(doc);
}
}
} catch (IOException e) {
logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
}
}
long counter() {
return counter;
}
List<BytesRef> matches() {
return matches;
}
List<Map<String, HighlightField>> hls() {
return hls;
}
}
| 1no label
|
src_main_java_org_elasticsearch_percolator_QueryCollector.java
|
4,474 |
public class RecoverySettings extends AbstractComponent {
public static final String INDICES_RECOVERY_FILE_CHUNK_SIZE = "indices.recovery.file_chunk_size";
public static final String INDICES_RECOVERY_TRANSLOG_OPS = "indices.recovery.translog_ops";
public static final String INDICES_RECOVERY_TRANSLOG_SIZE = "indices.recovery.translog_size";
public static final String INDICES_RECOVERY_COMPRESS = "indices.recovery.compress";
public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams";
public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams";
public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec";
public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb").bytes();
/**
* Use {@link #INDICES_RECOVERY_MAX_BYTES_PER_SEC} instead
*/
@Deprecated
public static final String INDICES_RECOVERY_MAX_SIZE_PER_SEC = "indices.recovery.max_size_per_sec";
private volatile ByteSizeValue fileChunkSize;
private volatile boolean compress;
private volatile int translogOps;
private volatile ByteSizeValue translogSize;
private volatile int concurrentStreams;
private volatile int concurrentSmallFileStreams;
private final ThreadPoolExecutor concurrentStreamPool;
private final ThreadPoolExecutor concurrentSmallFileStreamPool;
private volatile ByteSizeValue maxBytesPerSec;
private volatile SimpleRateLimiter rateLimiter;
@Inject
public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
this.fileChunkSize = componentSettings.getAsBytesSize("file_chunk_size", settings.getAsBytesSize("index.shard.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB)));
this.translogOps = componentSettings.getAsInt("translog_ops", settings.getAsInt("index.shard.recovery.translog_ops", 1000));
this.translogSize = componentSettings.getAsBytesSize("translog_size", settings.getAsBytesSize("index.shard.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB)));
this.compress = componentSettings.getAsBoolean("compress", true);
this.concurrentStreams = componentSettings.getAsInt("concurrent_streams", settings.getAsInt("index.shard.recovery.concurrent_streams", 3));
this.concurrentStreamPool = EsExecutors.newScaling(0, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[recovery_stream]"));
this.concurrentSmallFileStreams = componentSettings.getAsInt("concurrent_small_file_streams", settings.getAsInt("index.shard.recovery.concurrent_small_file_streams", 2));
this.concurrentSmallFileStreamPool = EsExecutors.newScaling(0, concurrentSmallFileStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]"));
this.maxBytesPerSec = componentSettings.getAsBytesSize("max_bytes_per_sec", componentSettings.getAsBytesSize("max_size_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB)));
if (maxBytesPerSec.bytes() <= 0) {
rateLimiter = null;
} else {
rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
}
logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]",
maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress);
nodeSettingsService.addListener(new ApplySettings());
}
public void close() {
concurrentStreamPool.shutdown();
try {
concurrentStreamPool.awaitTermination(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// that's fine...
}
concurrentStreamPool.shutdownNow();
}
public ByteSizeValue fileChunkSize() {
return fileChunkSize;
}
public boolean compress() {
return compress;
}
public int translogOps() {
return translogOps;
}
public ByteSizeValue translogSize() {
return translogSize;
}
public int concurrentStreams() {
return concurrentStreams;
}
public ThreadPoolExecutor concurrentStreamPool() {
return concurrentStreamPool;
}
public ThreadPoolExecutor concurrentSmallFileStreamPool() {
return concurrentSmallFileStreamPool;
}
public RateLimiter rateLimiter() {
return rateLimiter;
}
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, settings.getAsBytesSize(INDICES_RECOVERY_MAX_SIZE_PER_SEC, RecoverySettings.this.maxBytesPerSec));
if (!Objects.equal(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) {
logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec);
RecoverySettings.this.maxBytesPerSec = maxSizePerSec;
if (maxSizePerSec.bytes() <= 0) {
rateLimiter = null;
} else if (rateLimiter != null) {
rateLimiter.setMbPerSec(maxSizePerSec.mbFrac());
} else {
rateLimiter = new SimpleRateLimiter(maxSizePerSec.mbFrac());
}
}
ByteSizeValue fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.this.fileChunkSize);
if (!fileChunkSize.equals(RecoverySettings.this.fileChunkSize)) {
logger.info("updating [indices.recovery.file_chunk_size] from [{}] to [{}]", RecoverySettings.this.fileChunkSize, fileChunkSize);
RecoverySettings.this.fileChunkSize = fileChunkSize;
}
int translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, RecoverySettings.this.translogOps);
if (translogOps != RecoverySettings.this.translogOps) {
logger.info("updating [indices.recovery.translog_ops] from [{}] to [{}]", RecoverySettings.this.translogOps, translogOps);
RecoverySettings.this.translogOps = translogOps;
}
ByteSizeValue translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.this.translogSize);
if (!translogSize.equals(RecoverySettings.this.translogSize)) {
logger.info("updating [indices.recovery.translog_size] from [{}] to [{}]", RecoverySettings.this.translogSize, translogSize);
RecoverySettings.this.translogSize = translogSize;
}
boolean compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, RecoverySettings.this.compress);
if (compress != RecoverySettings.this.compress) {
logger.info("updating [indices.recovery.compress] from [{}] to [{}]", RecoverySettings.this.compress, compress);
RecoverySettings.this.compress = compress;
}
int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams);
if (concurrentStreams != RecoverySettings.this.concurrentStreams) {
logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams);
RecoverySettings.this.concurrentStreams = concurrentStreams;
RecoverySettings.this.concurrentStreamPool.setMaximumPoolSize(concurrentStreams);
}
int concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RecoverySettings.this.concurrentSmallFileStreams);
if (concurrentSmallFileStreams != RecoverySettings.this.concurrentSmallFileStreams) {
logger.info("updating [indices.recovery.concurrent_small_file_streams] from [{}] to [{}]", RecoverySettings.this.concurrentSmallFileStreams, concurrentSmallFileStreams);
RecoverySettings.this.concurrentSmallFileStreams = concurrentSmallFileStreams;
RecoverySettings.this.concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_recovery_RecoverySettings.java
|
514 |
public class IndicesExistsAction extends IndicesAction<IndicesExistsRequest, IndicesExistsResponse, IndicesExistsRequestBuilder> {
public static final IndicesExistsAction INSTANCE = new IndicesExistsAction();
public static final String NAME = "indices/exists";
private IndicesExistsAction() {
super(NAME);
}
@Override
public IndicesExistsResponse newResponse() {
return new IndicesExistsResponse();
}
@Override
public IndicesExistsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new IndicesExistsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_exists_indices_IndicesExistsAction.java
|
2,193 |
public class MultiPhrasePrefixQuery extends Query {
private String field;
private ArrayList<Term[]> termArrays = new ArrayList<Term[]>();
private ArrayList<Integer> positions = new ArrayList<Integer>();
private int maxExpansions = Integer.MAX_VALUE;
private int slop = 0;
/**
* Sets the phrase slop for this query.
*
* @see org.apache.lucene.search.PhraseQuery#setSlop(int)
*/
public void setSlop(int s) {
slop = s;
}
public void setMaxExpansions(int maxExpansions) {
this.maxExpansions = maxExpansions;
}
/**
* Sets the phrase slop for this query.
*
* @see org.apache.lucene.search.PhraseQuery#getSlop()
*/
public int getSlop() {
return slop;
}
/**
* Add a single term at the next position in the phrase.
*
* @see org.apache.lucene.search.PhraseQuery#add(Term)
*/
public void add(Term term) {
add(new Term[]{term});
}
/**
* Add multiple terms at the next position in the phrase. Any of the terms
* may match.
*
* @see org.apache.lucene.search.PhraseQuery#add(Term)
*/
public void add(Term[] terms) {
int position = 0;
if (positions.size() > 0)
position = positions.get(positions.size() - 1).intValue() + 1;
add(terms, position);
}
/**
* Allows to specify the relative position of terms within the phrase.
*
* @param terms
* @param position
* @see org.apache.lucene.search.PhraseQuery#add(Term, int)
*/
public void add(Term[] terms, int position) {
if (termArrays.size() == 0)
field = terms[0].field();
for (int i = 0; i < terms.length; i++) {
if (terms[i].field() != field) {
throw new IllegalArgumentException(
"All phrase terms must be in the same field (" + field + "): "
+ terms[i]);
}
}
termArrays.add(terms);
positions.add(Integer.valueOf(position));
}
/**
* Returns a List of the terms in the multiphrase.
* Do not modify the List or its contents.
*/
public List<Term[]> getTermArrays() {
return Collections.unmodifiableList(termArrays);
}
/**
* Returns the relative positions of terms in this phrase.
*/
public int[] getPositions() {
int[] result = new int[positions.size()];
for (int i = 0; i < positions.size(); i++)
result[i] = positions.get(i).intValue();
return result;
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (termArrays.isEmpty()) {
return new MatchNoDocsQuery();
}
MultiPhraseQuery query = new MultiPhraseQuery();
query.setSlop(slop);
int sizeMinus1 = termArrays.size() - 1;
for (int i = 0; i < sizeMinus1; i++) {
query.add(termArrays.get(i), positions.get(i));
}
Term[] suffixTerms = termArrays.get(sizeMinus1);
int position = positions.get(sizeMinus1);
ObjectOpenHashSet<Term> terms = new ObjectOpenHashSet<Term>();
for (Term term : suffixTerms) {
getPrefixTerms(terms, term, reader);
if (terms.size() > maxExpansions) {
break;
}
}
if (terms.isEmpty()) {
return Queries.newMatchNoDocsQuery();
}
query.add(terms.toArray(Term.class), position);
return query.rewrite(reader);
}
private void getPrefixTerms(ObjectOpenHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
// SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms
// instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually.
TermsEnum termsEnum = null;
List<AtomicReaderContext> leaves = reader.leaves();
for (AtomicReaderContext leaf : leaves) {
Terms _terms = leaf.reader().terms(field);
if (_terms == null) {
continue;
}
termsEnum = _terms.iterator(termsEnum);
TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(prefix.bytes());
if (TermsEnum.SeekStatus.END == seekStatus) {
continue;
}
for (BytesRef term = termsEnum.term(); term != null; term = termsEnum.next()) {
if (!StringHelper.startsWith(term, prefix.bytes())) {
break;
}
terms.add(new Term(field, BytesRef.deepCopyOf(term)));
if (terms.size() >= maxExpansions) {
return;
}
}
}
}
@Override
public final String toString(String f) {
StringBuilder buffer = new StringBuilder();
if (field == null || !field.equals(f)) {
buffer.append(field);
buffer.append(":");
}
buffer.append("\"");
Iterator<Term[]> i = termArrays.iterator();
while (i.hasNext()) {
Term[] terms = i.next();
if (terms.length > 1) {
buffer.append("(");
for (int j = 0; j < terms.length; j++) {
buffer.append(terms[j].text());
if (j < terms.length - 1)
buffer.append(" ");
}
buffer.append(")");
} else {
buffer.append(terms[0].text());
}
if (i.hasNext())
buffer.append(" ");
}
buffer.append("\"");
if (slop != 0) {
buffer.append("~");
buffer.append(slop);
}
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
/**
* Returns true if <code>o</code> is equal to this.
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof MultiPhrasePrefixQuery)) return false;
MultiPhrasePrefixQuery other = (MultiPhrasePrefixQuery) o;
return this.getBoost() == other.getBoost()
&& this.slop == other.slop
&& termArraysEquals(this.termArrays, other.termArrays)
&& this.positions.equals(other.positions);
}
/**
* Returns a hash code value for this object.
*/
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost())
^ slop
^ termArraysHashCode()
^ positions.hashCode()
^ 0x4AC65113;
}
// Breakout calculation of the termArrays hashcode
private int termArraysHashCode() {
int hashCode = 1;
for (final Term[] termArray : termArrays) {
hashCode = 31 * hashCode
+ (termArray == null ? 0 : Arrays.hashCode(termArray));
}
return hashCode;
}
// Breakout calculation of the termArrays equals
private boolean termArraysEquals(List<Term[]> termArrays1, List<Term[]> termArrays2) {
if (termArrays1.size() != termArrays2.size()) {
return false;
}
ListIterator<Term[]> iterator1 = termArrays1.listIterator();
ListIterator<Term[]> iterator2 = termArrays2.listIterator();
while (iterator1.hasNext()) {
Term[] termArray1 = iterator1.next();
Term[] termArray2 = iterator2.next();
if (!(termArray1 == null ? termArray2 == null : Arrays.equals(termArray1,
termArray2))) {
return false;
}
}
return true;
}
public String getField() {
return field;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_search_MultiPhrasePrefixQuery.java
|
196 |
public class UniqueTokenFilter extends TokenFilter {
private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class);
// use a fixed version, as we don't care about case sensitivity.
private final CharArraySet previous = new CharArraySet(Version.LUCENE_31, 8, false);
private final boolean onlyOnSamePosition;
public UniqueTokenFilter(TokenStream in) {
this(in, false);
}
public UniqueTokenFilter(TokenStream in, boolean onlyOnSamePosition) {
super(in);
this.onlyOnSamePosition = onlyOnSamePosition;
}
@Override
public final boolean incrementToken() throws IOException {
while (input.incrementToken()) {
final char term[] = termAttribute.buffer();
final int length = termAttribute.length();
boolean duplicate;
if (onlyOnSamePosition) {
final int posIncrement = posIncAttribute.getPositionIncrement();
if (posIncrement > 0) {
previous.clear();
}
duplicate = (posIncrement == 0 && previous.contains(term, 0, length));
} else {
duplicate = previous.contains(term, 0, length);
}
// clone the term, and add to the set of seen terms.
char saved[] = new char[length];
System.arraycopy(term, 0, saved, 0, length);
previous.add(saved);
if (!duplicate) {
return true;
}
}
return false;
}
@Override
public final void reset() throws IOException {
super.reset();
previous.clear();
}
}
| 0true
|
src_main_java_org_apache_lucene_analysis_miscellaneous_UniqueTokenFilter.java
|
296 |
public class OTraverseContext extends OBasicCommandContext {
private Set<ORID> history = new HashSet<ORID>();
private List<OTraverseAbstractProcess<?>> stack = new ArrayList<OTraverseAbstractProcess<?>>();
private int depth = -1;
public void push(final OTraverseAbstractProcess<?> iProcess) {
stack.add(iProcess);
}
public Map<String, Object> getVariables() {
final HashMap<String, Object> map = new HashMap<String, Object>();
map.put("depth", depth);
map.put("path", getPath());
map.put("stack", stack);
// DELEGATE
map.putAll(super.getVariables());
return map;
}
public Object getVariable(final String iName) {
final String name = iName.trim().toUpperCase();
if ("DEPTH".startsWith(name))
return depth;
else if (name.startsWith("PATH"))
return ODocumentHelper.getFieldValue(getPath(), iName.substring("PATH".length()));
else if (name.startsWith("STACK"))
return ODocumentHelper.getFieldValue(stack, iName.substring("STACK".length()));
else if (name.startsWith("HISTORY"))
return ODocumentHelper.getFieldValue(history, iName.substring("HISTORY".length()));
else
// DELEGATE
return super.getVariable(iName);
}
public OTraverseAbstractProcess<?> pop() {
if (stack.isEmpty())
throw new IllegalStateException("Traverse stack is empty");
return stack.remove(stack.size() - 1);
}
public OTraverseAbstractProcess<?> peek() {
return stack.isEmpty() ? null : stack.get(stack.size() - 1);
}
public OTraverseAbstractProcess<?> peek(final int iFromLast) {
return stack.size() + iFromLast < 0 ? null : stack.get(stack.size() + iFromLast);
}
public void reset() {
stack.clear();
}
public boolean isAlreadyTraversed(final OIdentifiable identity) {
return history.contains(identity.getIdentity());
}
public void addTraversed(final OIdentifiable identity) {
history.add(identity.getIdentity());
}
public int incrementDepth() {
return ++depth;
}
public int decrementDepth() {
return --depth;
}
public String getPath() {
final StringBuilder buffer = new StringBuilder();
for (OTraverseAbstractProcess<?> process : stack) {
final String status = process.getStatus();
if (status != null) {
if (buffer.length() > 0 && !status.startsWith("["))
buffer.append('.');
buffer.append(status);
}
}
return buffer.toString();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_command_traverse_OTraverseContext.java
|
2,585 |
public class SocketAcceptor implements Runnable {
private final ServerSocketChannel serverSocketChannel;
private final TcpIpConnectionManager connectionManager;
private final ILogger logger;
public SocketAcceptor(ServerSocketChannel serverSocketChannel, TcpIpConnectionManager connectionManager) {
this.serverSocketChannel = serverSocketChannel;
this.connectionManager = connectionManager;
this.logger = connectionManager.ioService.getLogger(this.getClass().getName());
}
public void run() {
Selector selector = null;
try {
if (logger.isFinestEnabled()) {
log(Level.FINEST, "Starting SocketAcceptor on " + serverSocketChannel);
}
selector = Selector.open();
serverSocketChannel.configureBlocking(false);
serverSocketChannel.register(selector, SelectionKey.OP_ACCEPT);
while (connectionManager.isLive()) {
// block until new connection or interruption.
final int keyCount = selector.select();
if (Thread.currentThread().isInterrupted()) {
break;
}
if (keyCount == 0) {
continue;
}
final Set<SelectionKey> setSelectedKeys = selector.selectedKeys();
final Iterator<SelectionKey> it = setSelectedKeys.iterator();
while (it.hasNext()) {
final SelectionKey sk = it.next();
it.remove();
// of course it is acceptable!
if (sk.isValid() && sk.isAcceptable()) {
acceptSocket();
}
}
}
} catch (OutOfMemoryError e) {
OutOfMemoryErrorDispatcher.onOutOfMemory(e);
} catch (IOException e) {
log(Level.SEVERE, e.getClass().getName() + ": " + e.getMessage(), e);
} finally {
closeSelector(selector);
}
}
private void closeSelector(Selector selector) {
if (selector != null) {
try {
if (logger.isFinestEnabled()) {
logger.finest("Closing selector " + Thread.currentThread().getName());
}
selector.close();
} catch (final Exception ignored) {
}
}
}
private void acceptSocket() {
if (!connectionManager.isLive()) {
return;
}
SocketChannelWrapper socketChannelWrapper = null;
try {
final SocketChannel socketChannel = serverSocketChannel.accept();
if (socketChannel != null) {
socketChannelWrapper = connectionManager.wrapSocketChannel(socketChannel, false);
}
} catch (Exception e) {
if (e instanceof ClosedChannelException && !connectionManager.isLive()) {
// ClosedChannelException
// or AsynchronousCloseException
// or ClosedByInterruptException
logger.finest("Terminating socket acceptor thread...", e);
} else {
String error = "Unexpected error while accepting connection! "
+ e.getClass().getName() + ": " + e.getMessage();
log(Level.WARNING, error);
try {
serverSocketChannel.close();
} catch (Exception ignore) {
}
connectionManager.ioService.onFatalError(e);
}
}
if (socketChannelWrapper != null) {
final SocketChannelWrapper socketChannel = socketChannelWrapper;
log(Level.INFO, "Accepting socket connection from " + socketChannel.socket().getRemoteSocketAddress());
final MemberSocketInterceptor memberSocketInterceptor = connectionManager.getMemberSocketInterceptor();
if (memberSocketInterceptor == null) {
configureAndAssignSocket(socketChannel, null);
} else {
connectionManager.ioService.executeAsync(new Runnable() {
public void run() {
configureAndAssignSocket(socketChannel, memberSocketInterceptor);
}
});
}
}
}
private void configureAndAssignSocket(SocketChannelWrapper socketChannel, MemberSocketInterceptor memberSocketInterceptor) {
try {
connectionManager.initSocket(socketChannel.socket());
if (memberSocketInterceptor != null) {
log(Level.FINEST, "Calling member socket interceptor: " + memberSocketInterceptor + " for " + socketChannel);
memberSocketInterceptor.onAccept(socketChannel.socket());
}
socketChannel.configureBlocking(false);
connectionManager.assignSocketChannel(socketChannel);
} catch (Exception e) {
log(Level.WARNING, e.getClass().getName() + ": " + e.getMessage(), e);
IOUtil.closeResource(socketChannel);
}
}
private void log(Level level, String message) {
log(level, message, null);
}
private void log(Level level, String message, Exception e) {
logger.log(level, message, e);
connectionManager.ioService.getSystemLogService().logConnection(message);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_SocketAcceptor.java
|
580 |
class ShardOptimizeRequest extends BroadcastShardOperationRequest {
private boolean waitForMerge = OptimizeRequest.Defaults.WAIT_FOR_MERGE;
private int maxNumSegments = OptimizeRequest.Defaults.MAX_NUM_SEGMENTS;
private boolean onlyExpungeDeletes = OptimizeRequest.Defaults.ONLY_EXPUNGE_DELETES;
private boolean flush = OptimizeRequest.Defaults.FLUSH;
ShardOptimizeRequest() {
}
public ShardOptimizeRequest(String index, int shardId, OptimizeRequest request) {
super(index, shardId, request);
waitForMerge = request.waitForMerge();
maxNumSegments = request.maxNumSegments();
onlyExpungeDeletes = request.onlyExpungeDeletes();
flush = request.flush();
}
boolean waitForMerge() {
return waitForMerge;
}
int maxNumSegments() {
return maxNumSegments;
}
public boolean onlyExpungeDeletes() {
return onlyExpungeDeletes;
}
public boolean flush() {
return flush;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
waitForMerge = in.readBoolean();
maxNumSegments = in.readInt();
onlyExpungeDeletes = in.readBoolean();
flush = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(waitForMerge);
out.writeInt(maxNumSegments);
out.writeBoolean(onlyExpungeDeletes);
out.writeBoolean(flush);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_optimize_ShardOptimizeRequest.java
|
289 |
public class OJSScriptFormatter implements OScriptFormatter {
public String getFunctionDefinition(final OFunction f) {
final StringBuilder fCode = new StringBuilder();
fCode.append("function ");
fCode.append(f.getName());
fCode.append('(');
int i = 0;
if (f.getParameters() != null)
for (String p : f.getParameters()) {
if (i++ > 0)
fCode.append(',');
fCode.append(p);
}
fCode.append(") {\n");
fCode.append(f.getCode());
fCode.append("\n}\n");
return fCode.toString();
}
@Override
public String getFunctionInvoke(final OFunction iFunction, final Object[] iArgs) {
final StringBuilder code = new StringBuilder();
code.append(iFunction.getName());
code.append('(');
if (iArgs != null) {
int i = 0;
for (Object a : iArgs) {
if (i++ > 0)
code.append(',');
code.append(a);
}
}
code.append(");");
return code.toString();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_formatter_OJSScriptFormatter.java
|
5,352 |
public class InternalAvg extends MetricsAggregation.SingleValue implements Avg {
public final static Type TYPE = new Type("avg");
public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalAvg readResult(StreamInput in) throws IOException {
InternalAvg result = new InternalAvg();
result.readFrom(in);
return result;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
private double sum;
private long count;
InternalAvg() {} // for serialization
public InternalAvg(String name, double sum, long count) {
super(name);
this.sum = sum;
this.count = count;
}
@Override
public double value() {
return getValue();
}
public double getValue() {
return sum / count;
}
@Override
public Type type() {
return TYPE;
}
@Override
public InternalAvg reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return (InternalAvg) aggregations.get(0);
}
InternalAvg reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
reduced = (InternalAvg) aggregation;
} else {
reduced.count += ((InternalAvg) aggregation).count;
reduced.sum += ((InternalAvg) aggregation).sum;
}
}
return reduced;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
valueFormatter = ValueFormatterStreams.readOptional(in);
sum = in.readDouble();
count = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
ValueFormatterStreams.writeOptional(valueFormatter, out);
out.writeDouble(sum);
out.writeVLong(count);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.field(CommonFields.VALUE, count != 0 ? getValue() : null);
if (count != 0 && valueFormatter != null) {
builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(getValue()));
}
builder.endObject();
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_avg_InternalAvg.java
|
546 |
refreshAction.execute(Requests.refreshRequest(request.indices()), new ActionListener<RefreshResponse>() {
@Override
public void onResponse(RefreshResponse refreshResponse) {
removeMapping();
}
@Override
public void onFailure(Throwable e) {
removeMapping();
}
protected void removeMapping() {
DeleteMappingClusterStateUpdateRequest clusterStateUpdateRequest = new DeleteMappingClusterStateUpdateRequest()
.indices(request.indices()).types(request.types())
.ackTimeout(request.timeout())
.masterNodeTimeout(request.masterNodeTimeout());
metaDataMappingService.removeMapping(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new DeleteMappingResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
});
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_TransportDeleteMappingAction.java
|
743 |
public class ExplainRequest extends SingleShardOperationRequest<ExplainRequest> {
private static final XContentType contentType = Requests.CONTENT_TYPE;
private String type = "_all";
private String id;
private String routing;
private String preference;
private BytesReference source;
private boolean sourceUnsafe;
private String[] fields;
private FetchSourceContext fetchSourceContext;
private String[] filteringAlias = Strings.EMPTY_ARRAY;
long nowInMillis;
ExplainRequest() {
}
public ExplainRequest(String index, String type, String id) {
this.index = index;
this.type = type;
this.id = id;
}
public String type() {
return type;
}
public ExplainRequest type(String type) {
this.type = type;
return this;
}
public String id() {
return id;
}
public ExplainRequest id(String id) {
this.id = id;
return this;
}
public String routing() {
return routing;
}
public ExplainRequest routing(String routing) {
this.routing = routing;
return this;
}
/**
* Simple sets the routing. Since the parent is only used to get to the right shard.
*/
public ExplainRequest parent(String parent) {
this.routing = parent;
return this;
}
public String preference() {
return preference;
}
public ExplainRequest preference(String preference) {
this.preference = preference;
return this;
}
public BytesReference source() {
return source;
}
public boolean sourceUnsafe() {
return sourceUnsafe;
}
public ExplainRequest source(QuerySourceBuilder sourceBuilder) {
this.source = sourceBuilder.buildAsBytes(contentType);
this.sourceUnsafe = false;
return this;
}
public ExplainRequest source(BytesReference source, boolean unsafe) {
this.source = source;
this.sourceUnsafe = unsafe;
return this;
}
/**
* Allows setting the {@link FetchSourceContext} for this request, controlling if and how _source should be returned.
*/
public ExplainRequest fetchSourceContext(FetchSourceContext context) {
this.fetchSourceContext = context;
return this;
}
public FetchSourceContext fetchSourceContext() {
return fetchSourceContext;
}
public String[] fields() {
return fields;
}
public ExplainRequest fields(String[] fields) {
this.fields = fields;
return this;
}
public String[] filteringAlias() {
return filteringAlias;
}
public ExplainRequest filteringAlias(String[] filteringAlias) {
if (filteringAlias != null) {
this.filteringAlias = filteringAlias;
}
return this;
}
@Override
protected void beforeLocalFork() {
if (sourceUnsafe) {
source = source.copyBytesArray();
sourceUnsafe = false;
}
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (type == null) {
validationException = ValidateActions.addValidationError("type is missing", validationException);
}
if (id == null) {
validationException = ValidateActions.addValidationError("id is missing", validationException);
}
if (source == null) {
validationException = ValidateActions.addValidationError("source is missing", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
type = in.readString();
id = in.readString();
routing = in.readOptionalString();
preference = in.readOptionalString();
source = in.readBytesReference();
sourceUnsafe = false;
filteringAlias = in.readStringArray();
if (in.readBoolean()) {
fields = in.readStringArray();
}
fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
nowInMillis = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(type);
out.writeString(id);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeBytesReference(source);
out.writeStringArray(filteringAlias);
if (fields != null) {
out.writeBoolean(true);
out.writeStringArray(fields);
} else {
out.writeBoolean(false);
}
FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
out.writeVLong(nowInMillis);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_explain_ExplainRequest.java
|
339 |
static class EntListener implements EntryListener<Integer, Deal>, Serializable {
private final CountDownLatch _gateAdd;
private final CountDownLatch _gateRemove;
private final CountDownLatch _gateEvict;
private final CountDownLatch _gateUpdate;
EntListener(CountDownLatch gateAdd, CountDownLatch gateRemove, CountDownLatch gateEvict, CountDownLatch gateUpdate) {
_gateAdd = gateAdd;
_gateRemove = gateRemove;
_gateEvict = gateEvict;
_gateUpdate = gateUpdate;
}
@Override
public void entryAdded(EntryEvent<Integer, Deal> arg0) {
_gateAdd.countDown();
}
@Override
public void entryEvicted(EntryEvent<Integer, Deal> arg0) {
_gateEvict.countDown();
}
@Override
public void entryRemoved(EntryEvent<Integer, Deal> arg0) {
_gateRemove.countDown();
}
@Override
public void entryUpdated(EntryEvent<Integer, Deal> arg0) {
_gateUpdate.countDown();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
100 |
public interface PageField extends Serializable {
public Long getId();
public void setId(Long id);
public String getFieldKey();
public void setFieldKey(String fieldKey);
public Page getPage();
public void setPage(Page page);
public String getValue();
public void setValue(String value);
public PageField cloneEntity();
public AdminAuditable getAuditable();
public void setAuditable(AdminAuditable auditable);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageField.java
|
336 |
new Thread() {
public void run() {
try {
if (!tempMap.tryLock("key1", 2, TimeUnit.SECONDS)) {
latch.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
575 |
public class OptimizeAction extends IndicesAction<OptimizeRequest, OptimizeResponse, OptimizeRequestBuilder> {
public static final OptimizeAction INSTANCE = new OptimizeAction();
public static final String NAME = "indices/optimize";
private OptimizeAction() {
super(NAME);
}
@Override
public OptimizeResponse newResponse() {
return new OptimizeResponse();
}
@Override
public OptimizeRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new OptimizeRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_optimize_OptimizeAction.java
|
38 |
@Component("blInventoryTypeOptionsExtensionListener")
public class InventoryTypeEnumOptionsExtensionListener extends AbstractRuleBuilderEnumOptionsExtensionListener {
@Override
protected Map<String, Class<? extends BroadleafEnumerationType>> getValuesToGenerate() {
Map<String, Class<? extends BroadleafEnumerationType>> map =
new HashMap<String, Class<? extends BroadleafEnumerationType>>();
map.put("blcOptions_InventoryType", InventoryType.class);
return map;
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_options_InventoryTypeEnumOptionsExtensionListener.java
|
265 |
public class AppendCallable implements Callable<String>, DataSerializable{
public static final String APPENDAGE = ":CallableResult";
private String msg;
public AppendCallable() {
}
public AppendCallable(String msg) {
this.msg = msg;
}
public String call() throws Exception {
return msg + APPENDAGE;
}
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(msg);
}
public void readData(ObjectDataInput in) throws IOException {
msg = in.readUTF();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_tasks_AppendCallable.java
|
160 |
public class OStringSerializer implements OBinarySerializer<String> {
public static final OStringSerializer INSTANCE = new OStringSerializer();
public static final byte ID = 13;
public int getObjectSize(final String object, Object... hints) {
return object.length() * 2 + OIntegerSerializer.INT_SIZE;
}
public void serialize(final String object, final byte[] stream, int startPosition, Object... hints) {
int length = object.length();
OIntegerSerializer.INSTANCE.serialize(length, stream, startPosition);
startPosition += OIntegerSerializer.INT_SIZE;
char[] stringContent = new char[length];
object.getChars(0, length, stringContent, 0);
for (char character : stringContent) {
stream[startPosition] = (byte) character;
startPosition++;
stream[startPosition] = (byte) (character >>> 8);
startPosition++;
}
}
public String deserialize(final byte[] stream, int startPosition) {
int len = OIntegerSerializer.INSTANCE.deserialize(stream, startPosition);
char[] buffer = new char[len];
startPosition += OIntegerSerializer.INT_SIZE;
for (int i = 0; i < len; i++) {
buffer[i] = (char) ((0xFF & stream[startPosition]) | ((0xFF & stream[startPosition + 1]) << 8));
startPosition += 2;
}
return new String(buffer);
}
public int getObjectSize(byte[] stream, int startPosition) {
return OIntegerSerializer.INSTANCE.deserialize(stream, startPosition) * 2 + OIntegerSerializer.INT_SIZE;
}
public byte getId() {
return ID;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return OIntegerSerializer.INSTANCE.deserializeNative(stream, startPosition) * 2 + OIntegerSerializer.INT_SIZE;
}
public void serializeNative(String object, byte[] stream, int startPosition, Object... hints) {
int length = object.length();
OIntegerSerializer.INSTANCE.serializeNative(length, stream, startPosition);
startPosition += OIntegerSerializer.INT_SIZE;
char[] stringContent = new char[length];
object.getChars(0, length, stringContent, 0);
for (char character : stringContent) {
stream[startPosition] = (byte) character;
startPosition++;
stream[startPosition] = (byte) (character >>> 8);
startPosition++;
}
}
public String deserializeNative(byte[] stream, int startPosition) {
int len = OIntegerSerializer.INSTANCE.deserializeNative(stream, startPosition);
char[] buffer = new char[len];
startPosition += OIntegerSerializer.INT_SIZE;
for (int i = 0; i < len; i++) {
buffer[i] = (char) ((0xFF & stream[startPosition]) | ((0xFF & stream[startPosition + 1]) << 8));
startPosition += 2;
}
return new String(buffer);
}
@Override
public void serializeInDirectMemory(String object, ODirectMemoryPointer pointer, long offset, Object... hints) {
int length = object.length();
pointer.setInt(offset, length);
offset += OIntegerSerializer.INT_SIZE;
byte[] binaryData = new byte[length * 2];
char[] stringContent = new char[length];
object.getChars(0, length, stringContent, 0);
int counter = 0;
for (char character : stringContent) {
binaryData[counter] = (byte) character;
counter++;
binaryData[counter] = (byte) (character >>> 8);
counter++;
}
pointer.set(offset, binaryData, 0, binaryData.length);
}
@Override
public String deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
int len = pointer.getInt(offset);
final char[] buffer = new char[len];
offset += OIntegerSerializer.INT_SIZE;
byte[] binaryData = pointer.get(offset, buffer.length * 2);
for (int i = 0; i < len; i++)
buffer[i] = (char) ((0xFF & binaryData[i << 1]) | ((0xFF & binaryData[(i << 1) + 1]) << 8));
return new String(buffer);
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return pointer.getInt(offset) * 2 + OIntegerSerializer.INT_SIZE;
}
public boolean isFixedLength() {
return false;
}
public int getFixedLength() {
return 0;
}
@Override
public String preprocess(String value, Object... hints) {
return value;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_serialization_types_OStringSerializer.java
|
91 |
mapClient.addEntryListener(new EntryAdapter<Integer, GenericEvent>() {
public void entryAdded(EntryEvent<Integer, GenericEvent> event) {
adds++;
}
public void entryEvicted(EntryEvent<Integer, GenericEvent> event) {
if (event.getValue() == null) evictionsNull++;
}
}, true);
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientEntryListenerDisconnectTest.java
|
1,522 |
public class ValueGroupCountMapReduce {
public static final String PROPERTY = Tokens.makeNamespace(ValueGroupCountMapReduce.class) + ".property";
public static final String CLASS = Tokens.makeNamespace(ValueGroupCountMapReduce.class) + ".class";
public static final String TYPE = Tokens.makeNamespace(ValueGroupCountMapReduce.class) + ".type";
public enum Counters {
PROPERTIES_COUNTED
}
public static Configuration createConfiguration(final Class<? extends Element> klass, final String key, final Class<? extends Writable> type) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
configuration.set(PROPERTY, key);
configuration.setClass(TYPE, type, Writable.class);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable> {
private String property;
private WritableHandler handler;
private boolean isVertex;
// making use of in-map aggregation/combiner
private CounterMap<Object> map;
private int mapSpillOver;
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.map = new CounterMap<Object>();
this.mapSpillOver = context.getConfiguration().getInt(Tokens.TITAN_HADOOP_PIPELINE_MAP_SPILL_OVER, Tokens.DEFAULT_MAP_SPILL_OVER);
this.property = context.getConfiguration().get(PROPERTY);
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.handler = new WritableHandler(context.getConfiguration().getClass(TYPE, Text.class, WritableComparable.class));
this.outputs = new SafeMapperOutputs(context);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
this.map.incr(ElementPicker.getProperty(value, this.property), value.pathCount());
DEFAULT_COMPAT.incrementContextCounter(context, Counters.PROPERTIES_COUNTED, 1L);
}
} else {
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
this.map.incr(ElementPicker.getProperty(edge, this.property), edge.pathCount());
DEFAULT_COMPAT.incrementContextCounter(context, Counters.PROPERTIES_COUNTED, 1L);
}
}
}
// protected against memory explosion
if (this.map.size() > this.mapSpillOver) {
this.dischargeMap(context);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
private final LongWritable longWritable = new LongWritable();
public void dischargeMap(final Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
for (final java.util.Map.Entry<Object, Long> entry : this.map.entrySet()) {
this.longWritable.set(entry.getValue());
context.write(this.handler.set(entry.getKey()), this.longWritable);
}
this.map.clear();
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
this.dischargeMap(context);
this.outputs.close();
}
}
public static class Combiner extends Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable> {
private final LongWritable longWritable = new LongWritable();
@Override
public void reduce(final WritableComparable key, final Iterable<LongWritable> values, final Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable token : values) {
totalCount = totalCount + token.get();
}
this.longWritable.set(totalCount);
context.write(key, this.longWritable);
}
}
public static class Reduce extends Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable> {
private SafeReducerOutputs outputs;
@Override
public void setup(final Reducer.Context context) throws IOException, InterruptedException {
this.outputs = new SafeReducerOutputs(context);
}
private final LongWritable longWritable = new LongWritable();
@Override
public void reduce(final WritableComparable key, final Iterable<LongWritable> values, final Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable token : values) {
totalCount = totalCount + token.get();
}
this.longWritable.set(totalCount);
this.outputs.write(Tokens.SIDEEFFECT, key, this.longWritable);
}
@Override
public void cleanup(final Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_ValueGroupCountMapReduce.java
|
15 |
@Scope("prototype")
@Component("blForeignSkuFieldPersistenceProvider")
public class ForeignSkuFieldPersistenceProvider extends FieldPersistenceProviderAdapter {
@Override
public FieldProviderResponse extractValue(ExtractValueRequest extractValueRequest, Property property) {
if (!canHandleExtraction(extractValueRequest, property)) {
return FieldProviderResponse.NOT_HANDLED;
}
try {
String val = extractValueRequest.getFieldManager().getFieldValue(extractValueRequest.getRequestedValue(),
extractValueRequest.getMetadata().getForeignKeyProperty()).toString();
String displayVal = null;
if (!StringUtils.isEmpty(extractValueRequest.getMetadata().getForeignKeyDisplayValueProperty())) {
String nameProperty = extractValueRequest.getMetadata().getForeignKeyDisplayValueProperty();
Sku sku = (Sku) extractValueRequest.getRequestedValue();
displayVal = extractValueRequest.getRecordHelper().getStringValueFromGetter(sku, nameProperty);
}
extractValueRequest.setDisplayVal(displayVal);
property.setValue(val);
property.setDisplayValue(displayVal);
} catch (Exception e) {
return FieldProviderResponse.NOT_HANDLED;
}
return FieldProviderResponse.HANDLED_BREAK;
}
protected boolean canHandleExtraction(ExtractValueRequest extractValueRequest, Property property) {
String fkc = extractValueRequest.getMetadata().getForeignKeyClass();
String rvc = null;
if (extractValueRequest.getRequestedValue() != null) {
rvc = extractValueRequest.getRequestedValue().getClass().getName();
}
return (SkuImpl.class.getName().equals(fkc) || Sku.class.getName().equals(fkc)) &&
(SkuImpl.class.getName().equals(rvc) || Sku.class.getName().equals(rvc)) &&
extractValueRequest.getMetadata().getFieldType().equals(SupportedFieldType.ADDITIONAL_FOREIGN_KEY);
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_persistence_module_provider_ForeignSkuFieldPersistenceProvider.java
|
1,579 |
public class BatchPersistencePackage implements Serializable {
protected PersistencePackage[] persistencePackages;
public PersistencePackage[] getPersistencePackages() {
return persistencePackages;
}
public void setPersistencePackages(PersistencePackage[] persistencePackages) {
this.persistencePackages = persistencePackages;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof BatchPersistencePackage)) return false;
BatchPersistencePackage that = (BatchPersistencePackage) o;
if (!Arrays.equals(persistencePackages, that.persistencePackages)) return false;
return true;
}
@Override
public int hashCode() {
return persistencePackages != null ? Arrays.hashCode(persistencePackages) : 0;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_BatchPersistencePackage.java
|
401 |
public enum ClientNearCacheType {
/**
* java.util.concurrent.ConcurrentMap implementation
*/
Map,
/**
* com.hazelcast.core.ReplicatedMap implementation
*/
ReplicatedMap
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_nearcache_ClientNearCacheType.java
|
29 |
static final class ThenAcceptBoth<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final CompletableFuture<? extends U> snd;
final BiAction<? super T,? super U> fn;
final CompletableFuture<Void> dst;
final Executor executor;
ThenAcceptBoth(CompletableFuture<? extends T> src,
CompletableFuture<? extends U> snd,
BiAction<? super T,? super U> fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final CompletableFuture<? extends U> b;
final BiAction<? super T,? super U> fn;
final CompletableFuture<Void> dst;
Object r, s; T t; U u; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
(b = this.snd) != null &&
(s = b.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex != null)
u = null;
else if (s instanceof AltResult) {
ex = ((AltResult)s).ex;
u = null;
}
else {
@SuppressWarnings("unchecked") U us = (U) s;
u = us;
}
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAcceptBoth<T,U>(t, u, fn, dst));
else
fn.accept(t, u);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
148 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166e_extra_AtomicDoubleArray.java
|
232 |
XPostingsHighlighter highlighter = new XPostingsHighlighter() {
@Override
protected PassageFormatter getFormatter(String field) {
return new PassageFormatter() {
PassageFormatter defaultFormatter = new DefaultPassageFormatter();
@Override
public String[] format(Passage passages[], String content) {
// Just turns the String snippet into a length 2
// array of String
return new String[] {"blah blah", defaultFormatter.format(passages, content).toString()};
}
};
}
};
| 0true
|
src_test_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighterTests.java
|
408 |
public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<DeleteSnapshotRequest, DeleteSnapshotResponse, DeleteSnapshotRequestBuilder> {
/**
* Constructs delete snapshot request builder
*
* @param clusterAdminClient cluster admin client
*/
public DeleteSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteSnapshotRequest());
}
/**
* Constructs delete snapshot request builder with specified repository and snapshot names
*
* @param clusterAdminClient cluster admin client
* @param repository repository name
* @param snapshot snapshot name
*/
public DeleteSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String snapshot) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteSnapshotRequest(repository, snapshot));
}
/**
* Sets the repository name
*
* @param repository repository name
* @return this builder
*/
public DeleteSnapshotRequestBuilder setRepository(String repository) {
request.repository(repository);
return this;
}
/**
* Sets the snapshot name
*
* @param snapshot snapshot name
* @return this builder
*/
public DeleteSnapshotRequestBuilder setSnapshot(String snapshot) {
request.snapshot(snapshot);
return this;
}
@Override
protected void doExecute(ActionListener<DeleteSnapshotResponse> listener) {
((ClusterAdminClient) client).deleteSnapshot(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_delete_DeleteSnapshotRequestBuilder.java
|
417 |
public class ClientListProxy<E> extends AbstractClientCollectionProxy<E> implements IList<E> {
public ClientListProxy(String instanceName, String serviceName, String name) {
super(instanceName, serviceName, name);
}
public boolean addAll(int index, Collection<? extends E> c) {
throwExceptionIfNull(c);
final List<Data> valueList = new ArrayList<Data>(c.size());
for (E e : c) {
throwExceptionIfNull(e);
valueList.add(toData(e));
}
final ListAddAllRequest request = new ListAddAllRequest(getName(), valueList, index);
final Boolean result = invoke(request);
return result;
}
public E get(int index) {
final ListGetRequest request = new ListGetRequest(getName(), index);
return invoke(request);
}
public E set(int index, E element) {
throwExceptionIfNull(element);
final Data value = toData(element);
final ListSetRequest request = new ListSetRequest(getName(), index, value);
return invoke(request);
}
public void add(int index, E element) {
throwExceptionIfNull(element);
final Data value = toData(element);
final ListAddRequest request = new ListAddRequest(getName(), value, index);
invoke(request);
}
public E remove(int index) {
final ListRemoveRequest request = new ListRemoveRequest(getName(), index);
return invoke(request);
}
public int indexOf(Object o) {
return indexOfInternal(o, false);
}
public int lastIndexOf(Object o) {
return indexOfInternal(o, true);
}
private int indexOfInternal(Object o, boolean last) {
throwExceptionIfNull(o);
final Data value = toData(o);
final ListIndexOfRequest request = new ListIndexOfRequest(getName(), value, last);
final Integer result = invoke(request);
return result;
}
public ListIterator<E> listIterator() {
return listIterator(0);
}
public ListIterator<E> listIterator(int index) {
return subList(-1, -1).listIterator(index);
}
public List<E> subList(int fromIndex, int toIndex) {
final ListSubRequest request = new ListSubRequest(getName(), fromIndex, toIndex);
final SerializableCollection result = invoke(request);
final Collection<Data> collection = result.getCollection();
final List<E> list = new ArrayList<E>(collection.size());
for (Data value : collection) {
list.add((E) toObject(value));
}
return list;
}
@Override
public String toString() {
return "IList{" + "name='" + getName() + '\'' + '}';
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientListProxy.java
|
1,619 |
class UpdateTask extends PrioritizedRunnable {
public final String source;
public final ClusterStateUpdateTask updateTask;
public final long addedAt = System.currentTimeMillis();
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
super(priority);
this.source = source;
this.updateTask = updateTask;
}
@Override
public void run() {
if (!lifecycle.started()) {
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
return;
}
logger.debug("processing [{}]: execute", source);
ClusterState previousClusterState = clusterState;
ClusterState newClusterState;
try {
newClusterState = updateTask.execute(previousClusterState);
} catch (Throwable e) {
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update, state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.readOnlyRoutingNodes().prettyPrint());
logger.trace(sb.toString(), e);
}
updateTask.onFailure(source, e);
return;
}
if (previousClusterState == newClusterState) {
logger.debug("processing [{}]: no change in cluster_state", source);
if (updateTask instanceof AckedClusterStateUpdateTask) {
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
return;
}
try {
Discovery.AckListener ackListener = new NoOpAckListener();
if (newClusterState.nodes().localNodeMaster()) {
// only the master controls the version numbers
Builder builder = ClusterState.builder(newClusterState).version(newClusterState.version() + 1);
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1));
}
if (previousClusterState.metaData() != newClusterState.metaData()) {
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
}
newClusterState = builder.build();
if (updateTask instanceof AckedClusterStateUpdateTask) {
final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask;
if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) {
ackedUpdateTask.onAckTimeout();
} else {
try {
ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool);
} catch (EsRejectedExecutionException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
}
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
ackedUpdateTask.onAckTimeout();
}
}
}
} else {
if (previousClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK) && !newClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
// force an update, its a fresh update from the master as we transition from a start of not having a master to having one
// have a fresh instances of routing and metadata to remove the chance that version might be the same
Builder builder = ClusterState.builder(newClusterState);
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()));
builder.metaData(MetaData.builder(newClusterState.metaData()));
newClusterState = builder.build();
logger.debug("got first state from fresh master [{}]", newClusterState.nodes().masterNodeId());
} else if (newClusterState.version() < previousClusterState.version()) {
// we got this cluster state from the master, filter out based on versions (don't call listeners)
logger.debug("got old cluster state [" + newClusterState.version() + "<" + previousClusterState.version() + "] from source [" + source + "], ignoring");
return;
}
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("cluster state updated:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
logger.trace(sb.toString());
} else if (logger.isDebugEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
}
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
// new cluster state, notify all listeners
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
String summary = nodesDelta.shortSummary();
if (summary.length() > 0) {
logger.info("{}, reason: {}", summary, source);
}
}
// TODO, do this in parallel (and wait)
for (DiscoveryNode node : nodesDelta.addedNodes()) {
if (!nodeRequiresConnection(node)) {
continue;
}
try {
transportService.connectToNode(node);
} catch (Throwable e) {
// the fault detection will detect it as failed as well
logger.warn("failed to connect to node [" + node + "]", e);
}
}
// if we are the master, publish the new state to all nodes
// we publish here before we send a notification to all the listeners, since if it fails
// we don't want to notify
if (newClusterState.nodes().localNodeMaster()) {
logger.debug("publishing cluster state version {}", newClusterState.version());
discoveryService.publish(newClusterState, ackListener);
}
// update the current cluster state
clusterState = newClusterState;
logger.debug("set local cluster state to version {}", newClusterState.version());
for (ClusterStateListener listener : priorityClusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
for (ClusterStateListener listener : clusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
for (ClusterStateListener listener : lastClusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
if (!nodesDelta.removedNodes().isEmpty()) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
for (DiscoveryNode node : nodesDelta.removedNodes()) {
transportService.disconnectFromNode(node);
}
}
});
}
//manual ack only from the master at the end of the publish
if (newClusterState.nodes().localNodeMaster()) {
try {
ackListener.onNodeAck(localNode(), null);
} catch (Throwable t) {
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
}
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
logger.debug("processing [{}]: done applying updated cluster_state (version: {})", source, newClusterState.version());
} catch (Throwable t) {
StringBuilder sb = new StringBuilder("failed to apply updated cluster state:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
logger.warn(sb.toString(), t);
// TODO: do we want to call updateTask.onFailure here?
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_service_InternalClusterService.java
|
297 |
public class TransportActionNodeProxy<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
protected final TransportService transportService;
private final GenericAction<Request, Response> action;
private final TransportRequestOptions transportOptions;
@Inject
public TransportActionNodeProxy(Settings settings, GenericAction<Request, Response> action, TransportService transportService) {
super(settings);
this.action = action;
this.transportService = transportService;
this.transportOptions = action.transportOptions(settings);
}
public ActionFuture<Response> execute(DiscoveryNode node, Request request) throws ElasticsearchException {
PlainActionFuture<Response> future = newFuture();
request.listenerThreaded(false);
execute(node, request, future);
return future;
}
public void execute(DiscoveryNode node, final Request request, final ActionListener<Response> listener) {
ActionRequestValidationException validationException = request.validate();
if (validationException != null) {
listener.onFailure(validationException);
return;
}
transportService.sendRequest(node, action.name(), request, transportOptions, new BaseTransportResponseHandler<Response>() {
@Override
public Response newInstance() {
return action.newResponse();
}
@Override
public String executor() {
if (request.listenerThreaded()) {
return ThreadPool.Names.GENERIC;
}
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(Response response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
listener.onFailure(exp);
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_TransportActionNodeProxy.java
|
245 |
private static class AnalyzingComparator implements Comparator<BytesRef> {
private final boolean hasPayloads;
public AnalyzingComparator(boolean hasPayloads) {
this.hasPayloads = hasPayloads;
}
private final ByteArrayDataInput readerA = new ByteArrayDataInput();
private final ByteArrayDataInput readerB = new ByteArrayDataInput();
private final BytesRef scratchA = new BytesRef();
private final BytesRef scratchB = new BytesRef();
@Override
public int compare(BytesRef a, BytesRef b) {
// First by analyzed form:
readerA.reset(a.bytes, a.offset, a.length);
scratchA.length = readerA.readShort();
scratchA.bytes = a.bytes;
scratchA.offset = readerA.getPosition();
readerB.reset(b.bytes, b.offset, b.length);
scratchB.bytes = b.bytes;
scratchB.length = readerB.readShort();
scratchB.offset = readerB.getPosition();
int cmp = scratchA.compareTo(scratchB);
if (cmp != 0) {
return cmp;
}
readerA.skipBytes(scratchA.length);
readerB.skipBytes(scratchB.length);
// Next by cost:
long aCost = readerA.readInt();
long bCost = readerB.readInt();
if (aCost < bCost) {
return -1;
} else if (aCost > bCost) {
return 1;
}
// Finally by surface form:
if (hasPayloads) {
scratchA.length = readerA.readShort();
scratchA.offset = readerA.getPosition();
scratchB.length = readerB.readShort();
scratchB.offset = readerB.getPosition();
} else {
scratchA.offset = readerA.getPosition();
scratchA.length = a.length - scratchA.offset;
scratchB.offset = readerB.getPosition();
scratchB.length = b.length - scratchB.offset;
}
return scratchA.compareTo(scratchB);
}
}
| 0true
|
src_main_java_org_apache_lucene_search_suggest_analyzing_XAnalyzingSuggester.java
|
119 |
static final class WorkQueue {
/**
* Capacity of work-stealing queue array upon initialization.
* Must be a power of two; at least 4, but should be larger to
* reduce or eliminate cacheline sharing among queues.
* Currently, it is much larger, as a partial workaround for
* the fact that JVMs often place arrays in locations that
* share GC bookkeeping (especially cardmarks) such that
* per-write accesses encounter serious memory contention.
*/
static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
/**
* Maximum size for queue arrays. Must be a power of two less
* than or equal to 1 << (31 - width of array entry) to ensure
* lack of wraparound of index calculations, but defined to a
* value a bit less than this to help users trap runaway
* programs before saturating systems.
*/
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
volatile int eventCount; // encoded inactivation count; < 0 if inactive
int nextWait; // encoded record of next event waiter
int nsteals; // number of steals
int hint; // steal index hint
short poolIndex; // index of this queue in pool
final short mode; // 0: lifo, > 0: fifo, < 0: shared
volatile int qlock; // 1: locked, -1: terminate; else 0
volatile int base; // index of next slot for poll
int top; // index of next slot for push
ForkJoinTask<?>[] array; // the elements (initially unallocated)
final ForkJoinPool pool; // the containing pool (may be null)
final ForkJoinWorkerThread owner; // owning thread or null if shared
volatile Thread parker; // == owner during call to park; else null
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
ForkJoinTask<?> currentSteal; // current non-local task being executed
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
int seed) {
this.pool = pool;
this.owner = owner;
this.mode = (short)mode;
this.hint = seed; // store initial seed for runWorker
// Place indices in the center of array (that is not yet allocated)
base = top = INITIAL_QUEUE_CAPACITY >>> 1;
}
/**
* Returns the approximate number of tasks in the queue.
*/
final int queueSize() {
int n = base - top; // non-owner callers must read base first
return (n >= 0) ? 0 : -n; // ignore transient negative
}
/**
* Provides a more accurate estimate of whether this queue has
* any tasks than does queueSize, by checking whether a
* near-empty queue has at least one unclaimed task.
*/
final boolean isEmpty() {
ForkJoinTask<?>[] a; int m, s;
int n = base - (s = top);
return (n >= 0 ||
(n == -1 &&
((a = array) == null ||
(m = a.length - 1) < 0 ||
U.getObject
(a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
}
/**
* Pushes a task. Call only by owner in unshared queues. (The
* shared-queue version is embedded in method externalPush.)
*
* @param task the task. Caller must ensure non-null.
* @throws RejectedExecutionException if array cannot be resized
*/
final void push(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a; ForkJoinPool p;
int s = top, n;
if ((a = array) != null) { // ignore if queue removed
int m = a.length - 1;
U.putOrderedObject(a, ((m & s) << ASHIFT) + ABASE, task);
if ((n = (top = s + 1) - base) <= 2)
(p = pool).signalWork(p.workQueues, this);
else if (n >= m)
growArray();
}
}
/**
* Initializes or doubles the capacity of array. Call either
* by owner or with lock held -- it is OK for base, but not
* top, to move while resizings are in progress.
*/
final ForkJoinTask<?>[] growArray() {
ForkJoinTask<?>[] oldA = array;
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
if (size > MAXIMUM_QUEUE_CAPACITY)
throw new RejectedExecutionException("Queue capacity exceeded");
int oldMask, t, b;
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
(t = top) - (b = base) > 0) {
int mask = size - 1;
do {
ForkJoinTask<?> x;
int oldj = ((b & oldMask) << ASHIFT) + ABASE;
int j = ((b & mask) << ASHIFT) + ABASE;
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
if (x != null &&
U.compareAndSwapObject(oldA, oldj, x, null))
U.putObjectVolatile(a, j, x);
} while (++b != t);
}
return a;
}
/**
* Takes next task, if one exists, in LIFO order. Call only
* by owner in unshared queues.
*/
final ForkJoinTask<?> pop() {
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
if ((a = array) != null && (m = a.length - 1) >= 0) {
for (int s; (s = top - 1) - base >= 0;) {
long j = ((m & s) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
break;
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
return t;
}
}
}
return null;
}
/**
* Takes a task in FIFO order if b is base of queue and a task
* can be claimed without contention. Specialized versions
* appear in ForkJoinPool methods scan and tryHelpStealer.
*/
final ForkJoinTask<?> pollAt(int b) {
ForkJoinTask<?> t; ForkJoinTask<?>[] a;
if ((a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
base == b && U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
return t;
}
}
return null;
}
/**
* Takes next task, if one exists, in FIFO order.
*/
final ForkJoinTask<?> poll() {
ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
while ((b = base) - top < 0 && (a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t != null) {
if (U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
return t;
}
}
else if (base == b) {
if (b + 1 == top)
break;
Thread.yield(); // wait for lagging update (very rare)
}
}
return null;
}
/**
* Takes next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> nextLocalTask() {
return mode == 0 ? pop() : poll();
}
/**
* Returns next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> peek() {
ForkJoinTask<?>[] a = array; int m;
if (a == null || (m = a.length - 1) < 0)
return null;
int i = mode == 0 ? top - 1 : base;
int j = ((i & m) << ASHIFT) + ABASE;
return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
}
/**
* Pops the given task only if it is at the current top.
* (A shared version is available only via FJP.tryExternalUnpush)
*/
final boolean tryUnpush(ForkJoinTask<?> t) {
ForkJoinTask<?>[] a; int s;
if ((a = array) != null && (s = top) != base &&
U.compareAndSwapObject
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
top = s;
return true;
}
return false;
}
/**
* Removes and cancels all known tasks, ignoring any exceptions.
*/
final void cancelAll() {
ForkJoinTask.cancelIgnoringExceptions(currentJoin);
ForkJoinTask.cancelIgnoringExceptions(currentSteal);
for (ForkJoinTask<?> t; (t = poll()) != null; )
ForkJoinTask.cancelIgnoringExceptions(t);
}
// Specialized execution methods
/**
* Polls and runs tasks until empty.
*/
final void pollAndExecAll() {
for (ForkJoinTask<?> t; (t = poll()) != null;)
t.doExec();
}
/**
* Executes a top-level task and any local tasks remaining
* after execution.
*/
final void runTask(ForkJoinTask<?> task) {
if ((currentSteal = task) != null) {
task.doExec();
ForkJoinTask<?>[] a = array;
int md = mode;
++nsteals;
currentSteal = null;
if (md != 0)
pollAndExecAll();
else if (a != null) {
int s, m = a.length - 1;
while ((s = top - 1) - base >= 0) {
long i = ((m & s) << ASHIFT) + ABASE;
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObject(a, i);
if (t == null)
break;
if (U.compareAndSwapObject(a, i, t, null)) {
top = s;
t.doExec();
}
}
}
}
}
/**
* If present, removes from queue and executes the given task,
* or any other cancelled task. Returns (true) on any CAS
* or consistency check failure so caller can retry.
*
* @return false if no progress can be made, else true
*/
final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
boolean stat;
ForkJoinTask<?>[] a; int m, s, b, n;
if (task != null && (a = array) != null && (m = a.length - 1) >= 0 &&
(n = (s = top) - (b = base)) > 0) {
boolean removed = false, empty = true;
stat = true;
for (ForkJoinTask<?> t;;) { // traverse from s to b
long j = ((--s & m) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObject(a, j);
if (t == null) // inconsistent length
break;
else if (t == task) {
if (s + 1 == top) { // pop
if (!U.compareAndSwapObject(a, j, task, null))
break;
top = s;
removed = true;
}
else if (base == b) // replace with proxy
removed = U.compareAndSwapObject(a, j, task,
new EmptyTask());
break;
}
else if (t.status >= 0)
empty = false;
else if (s + 1 == top) { // pop and throw away
if (U.compareAndSwapObject(a, j, t, null))
top = s;
break;
}
if (--n == 0) {
if (!empty && base == b)
stat = false;
break;
}
}
if (removed)
task.doExec();
}
else
stat = false;
return stat;
}
/**
* Tries to poll for and execute the given task or any other
* task in its CountedCompleter computation.
*/
final boolean pollAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int b; Object o; CountedCompleter<?> t, r;
if ((b = base) - top < 0 && (a = array) != null) {
long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((o = U.getObjectVolatile(a, j)) == null)
return true; // retry
if (o instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
t.doExec();
}
return true;
}
else if ((r = r.completer) == null)
break; // not part of root computation
}
}
}
return false;
}
/**
* Tries to pop and execute the given task or any other task
* in its CountedCompleter computation.
*/
final boolean externalPopAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
if (base - (s = top) < 0 && (a = array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (U.compareAndSwapInt(this, QLOCK, 0, 1)) {
if (top == s && array == a &&
U.compareAndSwapObject(a, j, t, null)) {
top = s - 1;
qlock = 0;
t.doExec();
}
else
qlock = 0;
}
return true;
}
else if ((r = r.completer) == null)
break;
}
}
}
return false;
}
/**
* Internal version
*/
final boolean internalPopAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
if (base - (s = top) < 0 && (a = array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (U.compareAndSwapObject(a, j, t, null)) {
top = s - 1;
t.doExec();
}
return true;
}
else if ((r = r.completer) == null)
break;
}
}
}
return false;
}
/**
* Returns true if owned and not known to be blocked.
*/
final boolean isApparentlyUnblocked() {
Thread wt; Thread.State s;
return (eventCount >= 0 &&
(wt = owner) != null &&
(s = wt.getState()) != Thread.State.BLOCKED &&
s != Thread.State.WAITING &&
s != Thread.State.TIMED_WAITING);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long QBASE;
private static final long QLOCK;
private static final int ABASE;
private static final int ASHIFT;
static {
try {
U = getUnsafe();
Class<?> k = WorkQueue.class;
Class<?> ak = ForkJoinTask[].class;
QBASE = U.objectFieldOffset
(k.getDeclaredField("base"));
QLOCK = U.objectFieldOffset
(k.getDeclaredField("qlock"));
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
}
| 0true
|
src_main_java_jsr166e_ForkJoinPool.java
|
27 |
@Service("blCategoryFieldService")
public class CategoryFieldServiceImpl extends AbstractRuleBuilderFieldService {
@Override
public void init() {
fields.add(new FieldData.Builder()
.label("rule_categoryName")
.name("name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_categoryId")
.name("id")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.ID)
.build());
fields.add(new FieldData.Builder()
.label("rule_categoryUrl")
.name("url")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_categoryLongDescription")
.name("longDescription")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
}
@Override
public String getName() {
return RuleIdentifier.CATEGORY;
}
@Override
public String getDtoClassName() {
return "org.broadleafcommerce.core.catalog.domain.CategoryImpl";
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_CategoryFieldServiceImpl.java
|
21 |
@Controller("blAdminCategoryController")
@RequestMapping("/" + AdminCategoryController.SECTION_KEY)
public class AdminCategoryController extends AdminBasicEntityController {
protected static final String SECTION_KEY = "category";
@Resource(name = "blCatalogService")
protected CatalogService catalogService;
@Override
protected String getSectionKey(Map<String, String> pathVars) {
//allow external links to work for ToOne items
if (super.getSectionKey(pathVars) != null) {
return super.getSectionKey(pathVars);
}
return SECTION_KEY;
}
@SuppressWarnings("unchecked")
@RequestMapping(value = "", method = RequestMethod.GET)
public String viewEntityList(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@RequestParam MultiValueMap<String, String> requestParams) throws Exception {
super.viewEntityList(request, response, model, pathVars, requestParams);
List<Category> parentCategories = catalogService.findAllParentCategories();
model.addAttribute("parentCategories", parentCategories);
List<EntityFormAction> mainActions = (List<EntityFormAction>) model.asMap().get("mainActions");
mainActions.add(new EntityFormAction("CategoryTreeView")
.withButtonClass("show-category-tree-view")
.withDisplayText("Category_Tree_View"));
mainActions.add(new EntityFormAction("CategoryListView")
.withButtonClass("show-category-list-view active")
.withDisplayText("Category_List_View"));
model.addAttribute("viewType", "categoryTree");
return "modules/defaultContainer";
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_controller_entity_AdminCategoryController.java
|
749 |
public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetRequest, GetResponse, GetRequestBuilder> {
public GetRequestBuilder(Client client) {
super((InternalClient) client, new GetRequest());
}
public GetRequestBuilder(Client client, @Nullable String index) {
super((InternalClient) client, new GetRequest(index));
}
/**
* Sets the type of the document to fetch. If set to <tt>null</tt>, will use just the id to fetch the
* first document matching it.
*/
public GetRequestBuilder setType(@Nullable String type) {
request.type(type);
return this;
}
/**
* Sets the id of the document to fetch.
*/
public GetRequestBuilder setId(String id) {
request.id(id);
return this;
}
/**
* Sets the parent id of this document. Will simply set the routing to this value, as it is only
* used for routing with delete requests.
*/
public GetRequestBuilder setParent(String parent) {
request.parent(parent);
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
public GetRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public GetRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public GetRequestBuilder setFields(String... fields) {
request.fields(fields);
return this;
}
/**
* Indicates whether the response should contain the stored _source
*
* @param fetch
* @return
*/
public GetRequestBuilder setFetchSource(boolean fetch) {
FetchSourceContext context = request.fetchSourceContext();
if (context == null) {
request.fetchSourceContext(new FetchSourceContext(fetch));
}
else {
context.fetchSource(fetch);
}
return this;
}
/**
* Indicate that _source should be returned, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include An optional include (optionally wildcarded) pattern to filter the returned _source
* @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
*/
public GetRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
return setFetchSource(
include == null? Strings.EMPTY_ARRAY : new String[] {include},
exclude == null? Strings.EMPTY_ARRAY : new String[] {exclude});
}
/**
* Indicate that _source should be returned, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
* @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
*/
public GetRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
FetchSourceContext context = request.fetchSourceContext();
if (context == null) {
request.fetchSourceContext(new FetchSourceContext(includes, excludes));
}
else {
context.fetchSource(true);
context.includes(includes);
context.excludes(excludes);
}
return this;
}
/**
* Should a refresh be executed before this get operation causing the operation to
* return the latest value. Note, heavy get should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public GetRequestBuilder setRefresh(boolean refresh) {
request.refresh(refresh);
return this;
}
public GetRequestBuilder setRealtime(Boolean realtime) {
request.realtime(realtime);
return this;
}
/**
* Sets the version, which will cause the get operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public GetRequestBuilder setVersion(long version) {
request.version(version);
return this;
}
/**
* Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
*/
public GetRequestBuilder setVersionType(VersionType versionType) {
request.versionType(versionType);
return this;
}
@Override
protected void doExecute(ActionListener<GetResponse> listener) {
((Client) client).get(request, listener);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_get_GetRequestBuilder.java
|
238 |
public interface SystemPropertiesService {
public SystemProperty saveSystemProperty(SystemProperty systemProperty);
public void deleteSystemProperty(SystemProperty systemProperty);
public List<SystemProperty> findAllSystemProperties();
public SystemProperty findSystemPropertyByName(String name);
/**
* This method should not persist anything to the database. It should simply return the correct implementation of
* the SystemProperty interface.
* @return
*/
public SystemProperty createNewSystemProperty();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_service_SystemPropertiesService.java
|
5,068 |
static class FieldDataWarmer extends IndicesWarmer.Listener {
@Override
public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
final MapperService mapperService = indexShard.mapperService();
final Map<String, FieldMapper<?>> warmUp = new HashMap<String, FieldMapper<?>>();
boolean parentChild = false;
for (DocumentMapper docMapper : mapperService) {
for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
if (fieldMapper instanceof ParentFieldMapper) {
ParentFieldMapper parentFieldMapper = (ParentFieldMapper) fieldMapper;
if (parentFieldMapper.active()) {
parentChild = true;
}
}
final FieldDataType fieldDataType = fieldMapper.fieldDataType();
if (fieldDataType == null) {
continue;
}
if (fieldDataType.getLoading() != Loading.EAGER) {
continue;
}
final String indexName = fieldMapper.names().indexName();
if (warmUp.containsKey(indexName)) {
continue;
}
warmUp.put(indexName, fieldMapper);
}
}
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
final Executor executor = threadPool.executor(executor());
final CountDownLatch latch = new CountDownLatch(context.newSearcher().reader().leaves().size() * warmUp.size() + (parentChild ? 1 : 0));
for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
for (final FieldMapper<?> fieldMapper : warmUp.values()) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
final long start = System.nanoTime();
indexFieldDataService.getForField(fieldMapper).load(ctx);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldMapper.names().name(), TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldMapper.names().name());
} finally {
latch.countDown();
}
}
});
}
}
if (parentChild) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
final long start = System.nanoTime();
indexShard.indexService().cache().idCache().refresh(context.newSearcher().reader().leaves());
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed id_cache, took [{}]", TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up id cache", t);
} finally {
latch.countDown();
}
}
});
}
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_SearchService.java
|
135 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166e_StampedLock.java
|
52 |
static final class EntrySpliterator<K,V> extends Traverser<K,V>
implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> {
final ConcurrentHashMapV8<K,V> map; // To export MapEntry
long est; // size estimate
EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
long est, ConcurrentHashMapV8<K,V> map) {
super(tab, size, index, limit);
this.map = map;
this.est = est;
}
public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() {
int i, f, h;
return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
f, est >>>= 1, map);
}
public void forEachRemaining(Action<? super Map.Entry<K,V>> action) {
if (action == null) throw new NullPointerException();
for (Node<K,V> p; (p = advance()) != null; )
action.apply(new MapEntry<K,V>(p.key, p.val, map));
}
public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) {
if (action == null) throw new NullPointerException();
Node<K,V> p;
if ((p = advance()) == null)
return false;
action.apply(new MapEntry<K,V>(p.key, p.val, map));
return true;
}
public long estimateSize() { return est; }
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
2,947 |
public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
private String language;
@Inject
public StemmerTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.language = Strings.capitalize(settings.get("language", settings.get("name", "porter")));
}
@Override
public TokenStream create(TokenStream tokenStream) {
if ("arabic".equalsIgnoreCase(language)) {
return new ArabicStemFilter(tokenStream);
} else if ("armenian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new ArmenianStemmer());
} else if ("basque".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new BasqueStemmer());
} else if ("brazilian".equalsIgnoreCase(language)) {
return new BrazilianStemFilter(tokenStream);
} else if ("bulgarian".equalsIgnoreCase(language)) {
return new BulgarianStemFilter(tokenStream);
} else if ("catalan".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new CatalanStemmer());
} else if ("czech".equalsIgnoreCase(language)) {
return new CzechStemFilter(tokenStream);
} else if ("danish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new DanishStemmer());
} else if ("dutch".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new DutchStemmer());
} else if ("english".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new EnglishStemmer());
} else if ("finnish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new FinnishStemmer());
} else if ("french".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new FrenchStemmer());
} else if ("german".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new GermanStemmer());
} else if ("german2".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new German2Stemmer());
} else if ("hungarian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new HungarianStemmer());
} else if ("italian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new ItalianStemmer());
} else if ("kp".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new KpStemmer());
} else if ("kstem".equalsIgnoreCase(language)) {
return new KStemFilter(tokenStream);
} else if ("lovins".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new LovinsStemmer());
} else if ("latvian".equalsIgnoreCase(language)) {
return new LatvianStemFilter(tokenStream);
} else if ("norwegian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new NorwegianStemmer());
} else if ("minimal_norwegian".equalsIgnoreCase(language) || "minimalNorwegian".equals(language)) {
return new NorwegianMinimalStemFilter(tokenStream);
} else if ("porter".equalsIgnoreCase(language)) {
return new PorterStemFilter(tokenStream);
} else if ("porter2".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new PorterStemmer());
} else if ("portuguese".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new PortugueseStemmer());
} else if ("romanian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new RomanianStemmer());
} else if ("russian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new RussianStemmer());
} else if ("spanish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new SpanishStemmer());
} else if ("swedish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new SwedishStemmer());
} else if ("turkish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new TurkishStemmer());
} else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) {
return new EnglishMinimalStemFilter(tokenStream);
} else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) {
return new EnglishPossessiveFilter(version, tokenStream);
} else if ("light_finish".equalsIgnoreCase(language) || "lightFinish".equalsIgnoreCase(language)) {
// leaving this for backward compatibility
return new FinnishLightStemFilter(tokenStream);
} else if ("light_finnish".equalsIgnoreCase(language) || "lightFinnish".equalsIgnoreCase(language)) {
return new FinnishLightStemFilter(tokenStream);
} else if ("light_french".equalsIgnoreCase(language) || "lightFrench".equalsIgnoreCase(language)) {
return new FrenchLightStemFilter(tokenStream);
} else if ("minimal_french".equalsIgnoreCase(language) || "minimalFrench".equalsIgnoreCase(language)) {
return new FrenchMinimalStemFilter(tokenStream);
} else if ("light_german".equalsIgnoreCase(language) || "lightGerman".equalsIgnoreCase(language)) {
return new GermanLightStemFilter(tokenStream);
} else if ("minimal_german".equalsIgnoreCase(language) || "minimalGerman".equalsIgnoreCase(language)) {
return new GermanMinimalStemFilter(tokenStream);
} else if ("hindi".equalsIgnoreCase(language)) {
return new HindiStemFilter(tokenStream);
} else if ("light_hungarian".equalsIgnoreCase(language) || "lightHungarian".equalsIgnoreCase(language)) {
return new HungarianLightStemFilter(tokenStream);
} else if ("indonesian".equalsIgnoreCase(language)) {
return new IndonesianStemFilter(tokenStream);
} else if ("light_italian".equalsIgnoreCase(language) || "lightItalian".equalsIgnoreCase(language)) {
return new ItalianLightStemFilter(tokenStream);
} else if ("light_portuguese".equalsIgnoreCase(language) || "lightPortuguese".equalsIgnoreCase(language)) {
return new PortugueseLightStemFilter(tokenStream);
} else if ("minimal_portuguese".equalsIgnoreCase(language) || "minimalPortuguese".equalsIgnoreCase(language)) {
return new PortugueseMinimalStemFilter(tokenStream);
} else if ("portuguese".equalsIgnoreCase(language)) {
return new PortugueseStemFilter(tokenStream);
} else if ("light_russian".equalsIgnoreCase(language) || "lightRussian".equalsIgnoreCase(language)) {
return new RussianLightStemFilter(tokenStream);
} else if ("light_spanish".equalsIgnoreCase(language) || "lightSpanish".equalsIgnoreCase(language)) {
return new SpanishLightStemFilter(tokenStream);
} else if ("light_swedish".equalsIgnoreCase(language) || "lightSwedish".equalsIgnoreCase(language)) {
return new SwedishLightStemFilter(tokenStream);
} else if ("greek".equalsIgnoreCase(language)) {
return new GreekStemFilter(tokenStream);
}
return new SnowballFilter(tokenStream, language);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_analysis_StemmerTokenFilterFactory.java
|
4,070 |
public class ParentQuery extends Query {
private final Query originalParentQuery;
private final String parentType;
private final Filter childrenFilter;
private Query rewrittenParentQuery;
private IndexReader rewriteIndexReader;
public ParentQuery(Query parentQuery, String parentType, Filter childrenFilter) {
this.originalParentQuery = parentQuery;
this.parentType = parentType;
this.childrenFilter = childrenFilter;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != this.getClass()) {
return false;
}
ParentQuery that = (ParentQuery) obj;
if (!originalParentQuery.equals(that.originalParentQuery)) {
return false;
}
if (!parentType.equals(that.parentType)) {
return false;
}
if (getBoost() != that.getBoost()) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = originalParentQuery.hashCode();
result = 31 * result + parentType.hashCode();
result = 31 * result + Float.floatToIntBits(getBoost());
return result;
}
@Override
public String toString(String field) {
StringBuilder sb = new StringBuilder();
sb.append("ParentQuery[").append(parentType).append("](")
.append(originalParentQuery.toString(field)).append(')')
.append(ToStringUtils.boost(getBoost()));
return sb.toString();
}
@Override
// See TopChildrenQuery#rewrite
public Query rewrite(IndexReader reader) throws IOException {
if (rewrittenParentQuery == null) {
rewriteIndexReader = reader;
rewrittenParentQuery = originalParentQuery.rewrite(reader);
}
return this;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenParentQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
SearchContext searchContext = SearchContext.current();
searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore = searchContext.cacheRecycler().objectFloatMap(-1);
ParentUidCollector collector = new ParentUidCollector(uidToScore.v(), searchContext, parentType);
final Query parentQuery;
if (rewrittenParentQuery == null) {
parentQuery = rewrittenParentQuery = searcher.rewrite(originalParentQuery);
} else {
assert rewriteIndexReader == searcher.getIndexReader();
parentQuery = rewrittenParentQuery;
}
IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
indexSearcher.setSimilarity(searcher.getSimilarity());
indexSearcher.search(parentQuery, collector);
if (uidToScore.v().isEmpty()) {
uidToScore.release();
return Queries.newMatchNoDocsQuery().createWeight(searcher);
}
ChildWeight childWeight = new ChildWeight(parentQuery.createWeight(searcher), childrenFilter, searchContext, uidToScore);
searchContext.addReleasable(childWeight);
return childWeight;
}
private static class ParentUidCollector extends NoopCollector {
private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
private final SearchContext searchContext;
private final String parentType;
private Scorer scorer;
private IdReaderTypeCache typeCache;
ParentUidCollector(ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, SearchContext searchContext, String parentType) {
this.uidToScore = uidToScore;
this.searchContext = searchContext;
this.parentType = parentType;
}
@Override
public void collect(int doc) throws IOException {
if (typeCache == null) {
return;
}
HashedBytesArray parentUid = typeCache.idByDoc(doc);
uidToScore.put(parentUid, scorer.score());
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
typeCache = searchContext.idCache().reader(context.reader()).type(parentType);
}
}
private class ChildWeight extends Weight implements Releasable {
private final Weight parentWeight;
private final Filter childrenFilter;
private final SearchContext searchContext;
private final Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
private ChildWeight(Weight parentWeight, Filter childrenFilter, SearchContext searchContext, Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore) {
this.parentWeight = parentWeight;
this.childrenFilter = new ApplyAcceptedDocsFilter(childrenFilter);
this.searchContext = searchContext;
this.uidToScore = uidToScore;
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");
}
@Override
public Query getQuery() {
return ParentQuery.this;
}
@Override
public float getValueForNormalization() throws IOException {
float sum = parentWeight.getValueForNormalization();
sum *= getBoost() * getBoost();
return sum;
}
@Override
public void normalize(float norm, float topLevelBoost) {
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, acceptDocs);
if (DocIdSets.isEmpty(childrenDocSet)) {
return null;
}
IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
if (idTypeCache == null) {
return null;
}
return new ChildScorer(this, uidToScore.v(), childrenDocSet.iterator(), idTypeCache);
}
@Override
public boolean release() throws ElasticsearchException {
Releasables.release(uidToScore);
return true;
}
}
private static class ChildScorer extends Scorer {
private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
private final DocIdSetIterator childrenIterator;
private final IdReaderTypeCache typeCache;
private int currentChildDoc = -1;
private float currentScore;
ChildScorer(Weight weight, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator childrenIterator, IdReaderTypeCache typeCache) {
super(weight);
this.uidToScore = uidToScore;
this.childrenIterator = childrenIterator;
this.typeCache = typeCache;
}
@Override
public float score() throws IOException {
return currentScore;
}
@Override
public int freq() throws IOException {
// We don't have the original child query hit info here...
// But the freq of the children could be collector and returned here, but makes this Scorer more expensive.
return 1;
}
@Override
public int docID() {
return currentChildDoc;
}
@Override
public int nextDoc() throws IOException {
while (true) {
currentChildDoc = childrenIterator.nextDoc();
if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) {
return currentChildDoc;
}
HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc);
if (uid == null) {
continue;
}
if (uidToScore.containsKey(uid)) {
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
currentScore = uidToScore.lget();
return currentChildDoc;
}
}
}
@Override
public int advance(int target) throws IOException {
currentChildDoc = childrenIterator.advance(target);
if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) {
return currentChildDoc;
}
HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc);
if (uid == null) {
return nextDoc();
}
if (uidToScore.containsKey(uid)) {
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
currentScore = uidToScore.lget();
return currentChildDoc;
} else {
return nextDoc();
}
}
@Override
public long cost() {
return childrenIterator.cost();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_search_child_ParentQuery.java
|
989 |
public abstract class TransportShardReplicationOperationAction<Request extends ShardReplicationOperationRequest, ReplicaRequest extends ActionRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
protected final TransportService transportService;
protected final ClusterService clusterService;
protected final IndicesService indicesService;
protected final ShardStateAction shardStateAction;
protected final ReplicationType defaultReplicationType;
protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
protected final TransportRequestOptions transportOptions;
final String transportAction;
final String transportReplicaAction;
final String executor;
final boolean checkWriteConsistency;
protected TransportShardReplicationOperationAction(Settings settings, TransportService transportService,
ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ShardStateAction shardStateAction) {
super(settings, threadPool);
this.transportService = transportService;
this.clusterService = clusterService;
this.indicesService = indicesService;
this.shardStateAction = shardStateAction;
this.transportAction = transportAction();
this.transportReplicaAction = transportReplicaAction();
this.executor = executor();
this.checkWriteConsistency = checkWriteConsistency();
transportService.registerHandler(transportAction, new OperationTransportHandler());
transportService.registerHandler(transportReplicaAction, new ReplicaOperationTransportHandler());
this.transportOptions = transportOptions();
this.defaultReplicationType = ReplicationType.fromString(settings.get("action.replication_type", "sync"));
this.defaultWriteConsistencyLevel = WriteConsistencyLevel.fromString(settings.get("action.write_consistency", "quorum"));
}
@Override
protected void doExecute(Request request, ActionListener<Response> listener) {
new AsyncShardOperationAction(request, listener).start();
}
protected abstract Request newRequestInstance();
protected abstract ReplicaRequest newReplicaRequestInstance();
protected abstract Response newResponseInstance();
protected abstract String transportAction();
protected abstract String executor();
protected abstract PrimaryResponse<Response, ReplicaRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest);
protected abstract void shardOperationOnReplica(ReplicaOperationRequest shardRequest);
/**
* Called once replica operations have been dispatched on the
*/
protected void postPrimaryOperation(Request request, PrimaryResponse<Response, ReplicaRequest> response) {
}
protected abstract ShardIterator shards(ClusterState clusterState, Request request) throws ElasticsearchException;
protected abstract boolean checkWriteConsistency();
protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request);
/**
* Resolves the request, by default, simply setting the concrete index (if its aliased one). If the resolve
* means a different execution, then return false here to indicate not to continue and execute this request.
*/
protected boolean resolveRequest(ClusterState state, Request request, ActionListener<Response> listener) {
request.index(state.metaData().concreteIndex(request.index()));
return true;
}
protected TransportRequestOptions transportOptions() {
return TransportRequestOptions.EMPTY;
}
/**
* Should the operations be performed on the replicas as well. Defaults to <tt>false</tt> meaning operations
* will be executed on the replica.
*/
protected boolean ignoreReplicas() {
return false;
}
private String transportReplicaAction() {
return transportAction() + "/replica";
}
protected boolean retryPrimaryException(Throwable e) {
return TransportActions.isShardNotAvailableException(e);
}
/**
* Should an exception be ignored when the operation is performed on the replica.
*/
boolean ignoreReplicaException(Throwable e) {
if (TransportActions.isShardNotAvailableException(e)) {
return true;
}
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof ConnectTransportException) {
return true;
}
// on version conflict or document missing, it means
// that a news change has crept into the replica, and its fine
if (cause instanceof VersionConflictEngineException) {
return true;
}
// same here
if (cause instanceof DocumentAlreadyExistsException) {
return true;
}
return false;
}
class OperationTransportHandler extends BaseTransportRequestHandler<Request> {
@Override
public Request newInstance() {
return newRequestInstance();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
// no need to have a threaded listener since we just send back a response
request.listenerThreaded(false);
// if we have a local operation, execute it on a thread since we don't spawn
request.operationThreaded(true);
execute(request, new ActionListener<Response>() {
@Override
public void onResponse(Response result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Throwable e1) {
logger.warn("Failed to send response for " + transportAction, e1);
}
}
});
}
}
class ReplicaOperationTransportHandler extends BaseTransportRequestHandler<ReplicaOperationRequest> {
@Override
public ReplicaOperationRequest newInstance() {
return new ReplicaOperationRequest();
}
@Override
public String executor() {
return executor;
}
// we must never reject on because of thread pool capacity on replicas
@Override
public boolean isForceExecution() {
return true;
}
@Override
public void messageReceived(final ReplicaOperationRequest request, final TransportChannel channel) throws Exception {
shardOperationOnReplica(request);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
protected class PrimaryOperationRequest implements Streamable {
public int shardId;
public Request request;
public PrimaryOperationRequest() {
}
public PrimaryOperationRequest(int shardId, Request request) {
this.shardId = shardId;
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
shardId = in.readVInt();
request = newRequestInstance();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(shardId);
request.writeTo(out);
}
}
protected class ReplicaOperationRequest extends TransportRequest {
public int shardId;
public ReplicaRequest request;
public ReplicaOperationRequest() {
}
public ReplicaOperationRequest(int shardId, ReplicaRequest request) {
super(request);
this.shardId = shardId;
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = in.readVInt();
request = newReplicaRequestInstance();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(shardId);
request.writeTo(out);
}
}
protected class AsyncShardOperationAction {
private final ActionListener<Response> listener;
private final Request request;
private volatile ClusterState clusterState;
private volatile ShardIterator shardIt;
private final AtomicBoolean primaryOperationStarted = new AtomicBoolean();
private final ReplicationType replicationType;
protected final long startTime = System.currentTimeMillis();
AsyncShardOperationAction(Request request, ActionListener<Response> listener) {
this.request = request;
this.listener = listener;
if (request.replicationType() != ReplicationType.DEFAULT) {
replicationType = request.replicationType();
} else {
replicationType = defaultReplicationType;
}
}
public void start() {
start(false);
}
/**
* Returns <tt>true</tt> if the action starting to be performed on the primary (or is done).
*/
public boolean start(final boolean fromClusterEvent) throws ElasticsearchException {
this.clusterState = clusterService.state();
try {
ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
if (blockException != null) {
if (blockException.retryable()) {
logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
retry(fromClusterEvent, blockException);
return false;
} else {
throw blockException;
}
}
// check if we need to execute, and if not, return
if (!resolveRequest(clusterState, request, listener)) {
return true;
}
blockException = checkRequestBlock(clusterState, request);
if (blockException != null) {
if (blockException.retryable()) {
logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
retry(fromClusterEvent, blockException);
return false;
} else {
throw blockException;
}
}
shardIt = shards(clusterState, request);
} catch (Throwable e) {
listener.onFailure(e);
return true;
}
// no shardIt, might be in the case between index gateway recovery and shardIt initialization
if (shardIt.size() == 0) {
logger.trace("no shard instances known for shard [{}], scheduling a retry", shardIt.shardId());
retry(fromClusterEvent, null);
return false;
}
boolean foundPrimary = false;
ShardRouting shardX;
while ((shardX = shardIt.nextOrNull()) != null) {
final ShardRouting shard = shardX;
// we only deal with primary shardIt here...
if (!shard.primary()) {
continue;
}
if (!shard.active() || !clusterState.nodes().nodeExists(shard.currentNodeId())) {
logger.trace("primary shard [{}] is not yet active or we do not know the node it is assigned to [{}], scheduling a retry.", shard.shardId(), shard.currentNodeId());
retry(fromClusterEvent, null);
return false;
}
// check here for consistency
if (checkWriteConsistency) {
WriteConsistencyLevel consistencyLevel = defaultWriteConsistencyLevel;
if (request.consistencyLevel() != WriteConsistencyLevel.DEFAULT) {
consistencyLevel = request.consistencyLevel();
}
int requiredNumber = 1;
if (consistencyLevel == WriteConsistencyLevel.QUORUM && shardIt.size() > 2) {
// only for more than 2 in the number of shardIt it makes sense, otherwise its 1 shard with 1 replica, quorum is 1 (which is what it is initialized to)
requiredNumber = (shardIt.size() / 2) + 1;
} else if (consistencyLevel == WriteConsistencyLevel.ALL) {
requiredNumber = shardIt.size();
}
if (shardIt.sizeActive() < requiredNumber) {
logger.trace("not enough active copies of shard [{}] to meet write consistency of [{}] (have {}, needed {}), scheduling a retry.",
shard.shardId(), consistencyLevel, shardIt.sizeActive(), requiredNumber);
retry(fromClusterEvent, null);
return false;
}
}
if (!primaryOperationStarted.compareAndSet(false, true)) {
return true;
}
foundPrimary = true;
if (shard.currentNodeId().equals(clusterState.nodes().localNodeId())) {
try {
if (request.operationThreaded()) {
request.beforeLocalFork();
threadPool.executor(executor).execute(new Runnable() {
@Override
public void run() {
try {
performOnPrimary(shard.id(), shard, clusterState);
} catch (Throwable t) {
listener.onFailure(t);
}
}
});
} else {
performOnPrimary(shard.id(), shard, clusterState);
}
} catch (Throwable t) {
listener.onFailure(t);
}
} else {
DiscoveryNode node = clusterState.nodes().get(shard.currentNodeId());
transportService.sendRequest(node, transportAction, request, transportOptions, new BaseTransportResponseHandler<Response>() {
@Override
public Response newInstance() {
return newResponseInstance();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(Response response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
// if we got disconnected from the node, or the node / shard is not in the right state (being closed)
if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
retryPrimaryException(exp)) {
primaryOperationStarted.set(false);
// we already marked it as started when we executed it (removed the listener) so pass false
// to re-add to the cluster listener
logger.trace("received an error from node the primary was assigned to ({}), scheduling a retry", exp.getMessage());
retry(false, null);
} else {
listener.onFailure(exp);
}
}
});
}
break;
}
// we won't find a primary if there are no shards in the shard iterator, retry...
if (!foundPrimary) {
logger.trace("couldn't find a eligible primary shard, scheduling for retry.");
retry(fromClusterEvent, null);
return false;
}
return true;
}
void retry(boolean fromClusterEvent, @Nullable final Throwable failure) {
if (fromClusterEvent) {
logger.trace("retry scheduling ignored as it as we already have a listener in place");
return;
}
// make it threaded operation so we fork on the discovery listener thread
request.beforeLocalFork();
request.operationThreaded(true);
TimeValue timeout = new TimeValue(request.timeout().millis() - (System.currentTimeMillis() - startTime));
if (timeout.millis() <= 0) {
raiseTimeoutFailure(timeout, failure);
return;
}
clusterService.add(timeout, new TimeoutClusterStateListener() {
@Override
public void postAdded() {
// check if state version changed while we were adding this listener
long sampledVersion = clusterState.version();
long currentVersion = clusterService.state().version();
if (sampledVersion != currentVersion) {
logger.trace("state change while we were trying to add listener, trying to start again, sampled_version [{}], current_version [{}]", sampledVersion, currentVersion);
if (start(true)) {
// if we managed to start and perform the operation on the primary, we can remove this listener
clusterService.remove(this);
}
}
}
@Override
public void onClose() {
clusterService.remove(this);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
logger.trace("cluster changed (version {}), trying to start again", event.state().version());
if (start(true)) {
// if we managed to start and perform the operation on the primary, we can remove this listener
clusterService.remove(this);
}
}
@Override
public void onTimeout(TimeValue timeValue) {
// just to be on the safe side, see if we can start it now?
if (start(true)) {
clusterService.remove(this);
return;
}
clusterService.remove(this);
raiseTimeoutFailure(timeValue, failure);
}
});
}
void raiseTimeoutFailure(TimeValue timeout, @Nullable Throwable failure) {
if (failure == null) {
if (shardIt == null) {
failure = new UnavailableShardsException(null, "no available shards: Timeout waiting for [" + timeout + "], request: " + request.toString());
} else {
failure = new UnavailableShardsException(shardIt.shardId(), "[" + shardIt.size() + "] shardIt, [" + shardIt.sizeActive() + "] active : Timeout waiting for [" + timeout + "], request: " + request.toString());
}
}
listener.onFailure(failure);
}
void performOnPrimary(int primaryShardId, final ShardRouting shard, ClusterState clusterState) {
try {
PrimaryResponse<Response, ReplicaRequest> response = shardOperationOnPrimary(clusterState, new PrimaryOperationRequest(primaryShardId, request));
performReplicas(response);
} catch (Throwable e) {
// shard has not been allocated yet, retry it here
if (retryPrimaryException(e)) {
primaryOperationStarted.set(false);
logger.trace("had an error while performing operation on primary ({}), scheduling a retry.", e.getMessage());
retry(false, e);
return;
}
if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
if (logger.isTraceEnabled()) {
logger.trace(shard.shortSummary() + ": Failed to execute [" + request + "]", e);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", e);
}
}
listener.onFailure(e);
}
}
void performReplicas(final PrimaryResponse<Response, ReplicaRequest> response) {
if (ignoreReplicas()) {
postPrimaryOperation(request, response);
listener.onResponse(response.response());
return;
}
ShardRouting shard;
// we double check on the state, if it got changed we need to make sure we take the latest one cause
// maybe a replica shard started its recovery process and we need to apply it there...
// we also need to make sure if the new state has a new primary shard (that we indexed to before) started
// and assigned to another node (while the indexing happened). In that case, we want to apply it on the
// new primary shard as well...
ClusterState newState = clusterService.state();
ShardRouting newPrimaryShard = null;
if (clusterState != newState) {
shardIt.reset();
ShardRouting originalPrimaryShard = null;
while ((shard = shardIt.nextOrNull()) != null) {
if (shard.primary()) {
originalPrimaryShard = shard;
break;
}
}
if (originalPrimaryShard == null || !originalPrimaryShard.active()) {
throw new ElasticsearchIllegalStateException("unexpected state, failed to find primary shard on an index operation that succeeded");
}
clusterState = newState;
shardIt = shards(newState, request);
while ((shard = shardIt.nextOrNull()) != null) {
if (shard.primary()) {
if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId())) {
newPrimaryShard = null;
} else {
newPrimaryShard = shard;
}
break;
}
}
}
// initialize the counter
int replicaCounter = shardIt.assignedReplicasIncludingRelocating();
if (newPrimaryShard != null) {
replicaCounter++;
}
if (replicaCounter == 0) {
postPrimaryOperation(request, response);
listener.onResponse(response.response());
return;
}
if (replicationType == ReplicationType.ASYNC) {
postPrimaryOperation(request, response);
// async replication, notify the listener
listener.onResponse(response.response());
// now, trick the counter so it won't decrease to 0 and notify the listeners
replicaCounter = Integer.MIN_VALUE;
}
// we add one to the replica count to do the postPrimaryOperation
replicaCounter++;
AtomicInteger counter = new AtomicInteger(replicaCounter);
IndexMetaData indexMetaData = clusterState.metaData().index(request.index());
if (newPrimaryShard != null) {
performOnReplica(response, counter, newPrimaryShard, newPrimaryShard.currentNodeId(), indexMetaData);
}
shardIt.reset(); // reset the iterator
while ((shard = shardIt.nextOrNull()) != null) {
// if its unassigned, nothing to do here...
if (shard.unassigned()) {
continue;
}
// if the shard is primary and relocating, add one to the counter since we perform it on the replica as well
// (and we already did it on the primary)
boolean doOnlyOnRelocating = false;
if (shard.primary()) {
if (shard.relocating()) {
doOnlyOnRelocating = true;
} else {
continue;
}
}
// we index on a replica that is initializing as well since we might not have got the event
// yet that it was started. We will get an exception IllegalShardState exception if its not started
// and that's fine, we will ignore it
if (!doOnlyOnRelocating) {
performOnReplica(response, counter, shard, shard.currentNodeId(), indexMetaData);
}
if (shard.relocating()) {
performOnReplica(response, counter, shard, shard.relocatingNodeId(), indexMetaData);
}
}
// now do the postPrimary operation, and check if the listener needs to be invoked
postPrimaryOperation(request, response);
// we also invoke here in case replicas finish before postPrimaryAction does
if (counter.decrementAndGet() == 0) {
listener.onResponse(response.response());
}
}
void performOnReplica(final PrimaryResponse<Response, ReplicaRequest> response, final AtomicInteger counter, final ShardRouting shard, String nodeId, final IndexMetaData indexMetaData) {
// if we don't have that node, it means that it might have failed and will be created again, in
// this case, we don't have to do the operation, and just let it failover
if (!clusterState.nodes().nodeExists(nodeId)) {
if (counter.decrementAndGet() == 0) {
listener.onResponse(response.response());
}
return;
}
final ReplicaOperationRequest shardRequest = new ReplicaOperationRequest(shardIt.shardId().id(), response.replicaRequest());
if (!nodeId.equals(clusterState.nodes().localNodeId())) {
DiscoveryNode node = clusterState.nodes().get(nodeId);
transportService.sendRequest(node, transportReplicaAction, shardRequest, transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty vResponse) {
finishIfPossible();
}
@Override
public void handleException(TransportException exp) {
if (!ignoreReplicaException(exp.unwrapCause())) {
logger.warn("Failed to perform " + transportAction + " on replica " + shardIt.shardId(), exp);
shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
"Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(exp) + "]");
}
finishIfPossible();
}
private void finishIfPossible() {
if (counter.decrementAndGet() == 0) {
listener.onResponse(response.response());
}
}
});
} else {
if (request.operationThreaded()) {
request.beforeLocalFork();
try {
threadPool.executor(executor).execute(new AbstractRunnable() {
@Override
public void run() {
try {
shardOperationOnReplica(shardRequest);
} catch (Throwable e) {
if (!ignoreReplicaException(e)) {
logger.warn("Failed to perform " + transportAction + " on replica " + shardIt.shardId(), e);
shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
"Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]");
}
}
if (counter.decrementAndGet() == 0) {
listener.onResponse(response.response());
}
}
// we must never reject on because of thread pool capacity on replicas
@Override
public boolean isForceExecution() {
return true;
}
});
} catch (Throwable e) {
if (!ignoreReplicaException(e)) {
logger.warn("Failed to perform " + transportAction + " on replica " + shardIt.shardId(), e);
shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
"Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]");
}
// we want to decrement the counter here, in teh failure handling, cause we got rejected
// from executing on the thread pool
if (counter.decrementAndGet() == 0) {
listener.onResponse(response.response());
}
}
} else {
try {
shardOperationOnReplica(shardRequest);
} catch (Throwable e) {
if (!ignoreReplicaException(e)) {
logger.warn("Failed to perform " + transportAction + " on replica" + shardIt.shardId(), e);
shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
"Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]");
}
}
if (counter.decrementAndGet() == 0) {
listener.onResponse(response.response());
}
}
}
}
}
public static class PrimaryResponse<Response, ReplicaRequest> {
private final ReplicaRequest replicaRequest;
private final Response response;
private final Object payload;
public PrimaryResponse(ReplicaRequest replicaRequest, Response response, Object payload) {
this.replicaRequest = replicaRequest;
this.response = response;
this.payload = payload;
}
public ReplicaRequest replicaRequest() {
return this.replicaRequest;
}
public Response response() {
return response;
}
public Object payload() {
return payload;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_support_replication_TransportShardReplicationOperationAction.java
|
199 |
public static class Name {
public static final String Audit = "Auditable_Tab";
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_audit_Auditable.java
|
1,074 |
public class OSQLHelper {
public static final String NAME = "sql";
public static final String VALUE_NOT_PARSED = "_NOT_PARSED_";
public static final String NOT_NULL = "_NOT_NULL_";
public static final String DEFINED = "_DEFINED_";
private static ClassLoader orientClassLoader = OSQLFilterItemAbstract.class.getClassLoader();
/**
* Convert fields from text to real value. Supports: String, RID, Boolean, Float, Integer and NULL.
*
* @param iDatabase
* @param iValue
* Value to convert.
* @return The value converted if recognized, otherwise VALUE_NOT_PARSED
*/
public static Object parseValue(String iValue, final OCommandContext iContext) {
if (iValue == null)
return null;
iValue = iValue.trim();
Object fieldValue = VALUE_NOT_PARSED;
if (iValue.startsWith("'") && iValue.endsWith("'") || iValue.startsWith("\"") && iValue.endsWith("\""))
// STRING
fieldValue = OStringSerializerHelper.getStringContent(iValue);
else if (iValue.charAt(0) == OStringSerializerHelper.LIST_BEGIN
&& iValue.charAt(iValue.length() - 1) == OStringSerializerHelper.LIST_END) {
// COLLECTION/ARRAY
final List<String> items = OStringSerializerHelper.smartSplit(iValue.substring(1, iValue.length() - 1),
OStringSerializerHelper.RECORD_SEPARATOR);
final List<Object> coll = new ArrayList<Object>();
for (String item : items) {
coll.add(parseValue(item, iContext));
}
fieldValue = coll;
} else if (iValue.charAt(0) == OStringSerializerHelper.MAP_BEGIN
&& iValue.charAt(iValue.length() - 1) == OStringSerializerHelper.MAP_END) {
// MAP
final List<String> items = OStringSerializerHelper.smartSplit(iValue.substring(1, iValue.length() - 1),
OStringSerializerHelper.RECORD_SEPARATOR);
final Map<Object, Object> map = new HashMap<Object, Object>();
for (String item : items) {
final List<String> parts = OStringSerializerHelper.smartSplit(item, OStringSerializerHelper.ENTRY_SEPARATOR);
if (parts == null || parts.size() != 2)
throw new OCommandSQLParsingException("Map found but entries are not defined as <key>:<value>");
map.put(parseValue(parts.get(0), iContext), parseValue(parts.get(1), iContext));
}
if (map.containsKey(ODocumentHelper.ATTRIBUTE_TYPE))
// IT'S A DOCUMENT
fieldValue = new ODocument(map);
else
fieldValue = map;
} else if (iValue.charAt(0) == OStringSerializerHelper.EMBEDDED_BEGIN
&& iValue.charAt(iValue.length() - 1) == OStringSerializerHelper.EMBEDDED_END) {
// SUB-COMMAND
fieldValue = new OCommandSQL(iValue.substring(1, iValue.length() - 1));
((OCommandSQL) fieldValue).getContext().setParent(iContext);
} else if (iValue.charAt(0) == ORID.PREFIX)
// RID
fieldValue = new ORecordId(iValue.trim());
else {
final String upperCase = iValue.toUpperCase(Locale.ENGLISH);
if (upperCase.equals("NULL"))
// NULL
fieldValue = null;
else if (upperCase.equals("NOT NULL"))
// NULL
fieldValue = NOT_NULL;
else if (upperCase.equals("DEFINED"))
// NULL
fieldValue = DEFINED;
else if (upperCase.equals("TRUE"))
// BOOLEAN, TRUE
fieldValue = Boolean.TRUE;
else if (upperCase.equals("FALSE"))
// BOOLEAN, FALSE
fieldValue = Boolean.FALSE;
else {
final Object v = parseStringNumber(iValue);
if (v != null)
fieldValue = v;
}
}
return fieldValue;
}
public static Object parseStringNumber(final String iValue) {
final OType t = ORecordSerializerCSVAbstract.getType(iValue);
if (t == OType.INTEGER)
return Integer.parseInt(iValue);
else if (t == OType.LONG)
return Long.parseLong(iValue);
else if (t == OType.FLOAT)
return Float.parseFloat(iValue);
else if (t == OType.SHORT)
return Short.parseShort(iValue);
else if (t == OType.BYTE)
return Byte.parseByte(iValue);
else if (t == OType.DOUBLE)
return Double.parseDouble(iValue);
else if (t == OType.DATE || t == OType.DATETIME)
return new Date(Long.parseLong(iValue));
return null;
}
public static Object parseValue(final OSQLPredicate iSQLFilter, final OBaseParser iCommand, final String iWord,
final OCommandContext iContext) {
if (iWord.charAt(0) == OStringSerializerHelper.PARAMETER_POSITIONAL
|| iWord.charAt(0) == OStringSerializerHelper.PARAMETER_NAMED) {
if (iSQLFilter != null)
return iSQLFilter.addParameter(iWord);
else
return new OSQLFilterItemParameter(iWord);
} else
return parseValue(iCommand, iWord, iContext);
}
public static Object parseValue(final OBaseParser iCommand, final String iWord, final OCommandContext iContext) {
if (iWord.equals("*"))
return "*";
// TRY TO PARSE AS RAW VALUE
final Object v = parseValue(iWord, iContext);
if (v != VALUE_NOT_PARSED)
return v;
if (!iWord.equalsIgnoreCase("any()") && !iWord.equalsIgnoreCase("all()")) {
// TRY TO PARSE AS FUNCTION
final Object func = OSQLHelper.getFunction(iCommand, iWord);
if (func != null)
return func;
}
if (iWord.startsWith("$"))
// CONTEXT VARIABLE
return new OSQLFilterItemVariable(iCommand, iWord);
// PARSE AS FIELD
return new OSQLFilterItemField(iCommand, iWord);
}
public static OSQLFunctionRuntime getFunction(final OBaseParser iCommand, final String iWord) {
final int separator = iWord.indexOf('.');
final int beginParenthesis = iWord.indexOf(OStringSerializerHelper.EMBEDDED_BEGIN);
if (beginParenthesis > -1 && (separator == -1 || separator > beginParenthesis)) {
final int endParenthesis = iWord.indexOf(OStringSerializerHelper.EMBEDDED_END, beginParenthesis);
if (endParenthesis > -1 && Character.isLetter(iWord.charAt(0)))
// FUNCTION: CREATE A RUN-TIME CONTAINER FOR IT TO SAVE THE PARAMETERS
return new OSQLFunctionRuntime(iCommand, iWord);
}
return null;
}
public static Object getValue(final Object iObject) {
if (iObject == null)
return null;
if (iObject instanceof OSQLFilterItem)
return ((OSQLFilterItem) iObject).getValue(null, null);
return iObject;
}
public static Object getValue(final Object iObject, final ORecordInternal<?> iRecord) {
if (iObject == null)
return null;
if (iObject instanceof OSQLFilterItem)
return ((OSQLFilterItem) iObject).getValue(iRecord, null);
return iObject;
}
public static Object getValue(final Object iObject, final ORecordInternal<?> iRecord, final OCommandContext iContext) {
if (iObject == null)
return null;
if (iObject instanceof OSQLFilterItem)
return ((OSQLFilterItem) iObject).getValue(iRecord, iContext);
else if (iObject instanceof String) {
final String s = ((String) iObject).trim();
if (!s.isEmpty() && !OIOUtils.isStringContent(iObject) && !Character.isDigit(s.charAt(0)))
// INTERPRETS IT
return ODocumentHelper.getFieldValue(iRecord, s, iContext);
}
return iObject;
}
public static Object resolveFieldValue(final ODocument iDocument, final String iFieldName, final Object iFieldValue,
final OCommandParameters iArguments) {
if (iFieldValue instanceof OSQLFilterItemField) {
final OSQLFilterItemField f = (OSQLFilterItemField) iFieldValue;
if (f.getRoot().equals("?"))
// POSITIONAL PARAMETER
return iArguments.getNext();
else if (f.getRoot().startsWith(":"))
// NAMED PARAMETER
return iArguments.getByName(f.getRoot().substring(1));
}
if (iFieldValue instanceof ODocument && !((ODocument) iFieldValue).getIdentity().isValid())
// EMBEDDED DOCUMENT
((ODocument) iFieldValue).addOwner(iDocument);
return OSQLHelper.getValue(iFieldValue, iDocument);
}
public static Set<ODocument> bindParameters(final ODocument iDocument, final Map<String, Object> iFields,
final OCommandParameters iArguments, final OCommandContext iContext) {
if (iFields == null)
return null;
Set<ODocument> changedDocuments = null;
// BIND VALUES
for (Entry<String, Object> field : iFields.entrySet()) {
final String fieldName = field.getKey();
Object fieldValue = field.getValue();
if (fieldValue != null) {
if (fieldValue instanceof OCommandSQL) {
final OCommandSQL cmd = (OCommandSQL) fieldValue;
cmd.getContext().setParent(iContext);
fieldValue = ODatabaseRecordThreadLocal.INSTANCE.get().command(cmd).execute();
// CHECK FOR CONVERSIONS
if (iDocument.getSchemaClass() != null) {
final OProperty prop = iDocument.getSchemaClass().getProperty(fieldName);
if (prop != null) {
if (prop.getType() == OType.LINK) {
if (OMultiValue.isMultiValue(fieldValue) && OMultiValue.getSize(fieldValue) == 1)
// GET THE FIRST ITEM AS UNIQUE LINK
fieldValue = OMultiValue.getFirstValue(fieldValue);
}
}
}
if (OMultiValue.isMultiValue(fieldValue)) {
final List<Object> tempColl = new ArrayList<Object>(OMultiValue.getSize(fieldValue));
String singleFieldName = null;
for (Object o : OMultiValue.getMultiValueIterable(fieldValue)) {
if (o instanceof OIdentifiable && !((OIdentifiable) o).getIdentity().isPersistent()) {
// TEMPORARY / EMBEDDED
final ORecord<?> rec = ((OIdentifiable) o).getRecord();
if (rec != null && rec instanceof ODocument) {
// CHECK FOR ONE FIELD ONLY
final ODocument doc = (ODocument) rec;
if (doc.fields() == 1) {
singleFieldName = doc.fieldNames()[0];
tempColl.add(doc.field(singleFieldName));
} else {
// TRANSFORM IT IN EMBEDDED
doc.getIdentity().reset();
doc.addOwner(iDocument);
tempColl.add(doc);
}
}
}
}
fieldValue = tempColl;
}
}
}
final ODocument doc = iDocument.field(fieldName, resolveFieldValue(iDocument, fieldName, fieldValue, iArguments));
if (doc != null) {
if (changedDocuments == null)
changedDocuments = new HashSet<ODocument>();
changedDocuments.add(doc);
}
}
return changedDocuments;
}
public static String[] getAllMethodNames() {
final List<String> methods = new ArrayList<String>();
final Iterator<OSQLMethodFactory> ite = lookupProviderWithOrientClassLoader(OSQLMethodFactory.class, orientClassLoader);
while (ite.hasNext()) {
final OSQLMethodFactory factory = ite.next();
methods.addAll(factory.getMethodNames());
}
return methods.toArray(new String[methods.size()]);
}
public static OSQLMethod getMethodByName(String name) {
name = name.toLowerCase();
final Iterator<OSQLMethodFactory> ite = lookupProviderWithOrientClassLoader(OSQLMethodFactory.class, orientClassLoader);
while (ite.hasNext()) {
final OSQLMethodFactory factory = ite.next();
if (factory.hasMethod(name)) {
return factory.createMethod(name);
}
}
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_OSQLHelper.java
|
1,368 |
public abstract class OTransactionAbstract implements OTransaction {
protected final ODatabaseRecordTx database;
protected TXSTATUS status = TXSTATUS.INVALID;
protected OTransactionAbstract(final ODatabaseRecordTx iDatabase) {
database = iDatabase;
}
public boolean isActive() {
return status != TXSTATUS.INVALID;
}
public TXSTATUS getStatus() {
return status;
}
public ODatabaseRecordTx getDatabase() {
return database;
}
public static void updateCacheFromEntries(final OTransaction tx, final Iterable<? extends ORecordOperation> entries,
final boolean updateStrategy) {
final OLevel1RecordCache dbCache = tx.getDatabase().getLevel1Cache();
for (ORecordOperation txEntry : entries) {
if (!updateStrategy)
// ALWAYS REMOVE THE RECORD FROM CACHE
dbCache.deleteRecord(txEntry.getRecord().getIdentity());
else if (txEntry.type == ORecordOperation.DELETED)
// DELETION
dbCache.deleteRecord(txEntry.getRecord().getIdentity());
else if (txEntry.type == ORecordOperation.UPDATED || txEntry.type == ORecordOperation.CREATED)
// UDPATE OR CREATE
dbCache.updateRecord(txEntry.getRecord());
}
}
protected void invokeCommitAgainstListeners() {
// WAKE UP LISTENERS
for (ODatabaseListener listener : ((ODatabaseRaw) database.getUnderlying()).browseListeners())
try {
listener.onBeforeTxCommit(database.getUnderlying());
} catch (Throwable t) {
OLogManager.instance().error(this, "Error on commit callback against listener: " + listener, t);
}
}
protected void invokeRollbackAgainstListeners() {
// WAKE UP LISTENERS
for (ODatabaseListener listener : ((ODatabaseRaw) database.getUnderlying()).browseListeners())
try {
listener.onBeforeTxRollback(database.getUnderlying());
} catch (Throwable t) {
OLogManager.instance().error(this, "Error on rollback callback against listener: " + listener, t);
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionAbstract.java
|
4,700 |
public class PluginsService extends AbstractComponent {
private static final String ES_PLUGIN_PROPERTIES = "es-plugin.properties";
private final Environment environment;
/**
* We keep around a list of jvm plugins
*/
private final ImmutableList<Tuple<PluginInfo, Plugin>> plugins;
private final ImmutableMap<Plugin, List<OnModuleReference>> onModuleReferences;
private PluginsInfo cachedPluginsInfo;
private final TimeValue refreshInterval;
private long lastRefresh;
static class OnModuleReference {
public final Class<? extends Module> moduleClass;
public final Method onModuleMethod;
OnModuleReference(Class<? extends Module> moduleClass, Method onModuleMethod) {
this.moduleClass = moduleClass;
this.onModuleMethod = onModuleMethod;
}
}
/**
* Constructs a new PluginService
* @param settings The settings of the system
* @param environment The environment of the system
*/
public PluginsService(Settings settings, Environment environment) {
super(settings);
this.environment = environment;
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> tupleBuilder = ImmutableList.builder();
// first we load all the default plugins from the settings
String[] defaultPluginsClasses = settings.getAsArray("plugin.types");
for (String pluginClass : defaultPluginsClasses) {
Plugin plugin = loadPlugin(pluginClass, settings);
PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), hasSite(plugin.name()), true, PluginInfo.VERSION_NOT_AVAILABLE);
if (logger.isTraceEnabled()) {
logger.trace("plugin loaded from settings [{}]", pluginInfo);
}
tupleBuilder.add(new Tuple<PluginInfo, Plugin>(pluginInfo, plugin));
}
// now, find all the ones that are in the classpath
loadPluginsIntoClassLoader();
tupleBuilder.addAll(loadPluginsFromClasspath(settings));
this.plugins = tupleBuilder.build();
// We need to build a List of jvm and site plugins for checking mandatory plugins
Map<String, Plugin> jvmPlugins = Maps.newHashMap();
List<String> sitePlugins = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> tuple : this.plugins) {
jvmPlugins.put(tuple.v2().name(), tuple.v2());
if (tuple.v1().isSite()) {
sitePlugins.add(tuple.v1().getName());
}
}
// we load site plugins
ImmutableList<Tuple<PluginInfo, Plugin>> tuples = loadSitePlugins();
for (Tuple<PluginInfo, Plugin> tuple : tuples) {
sitePlugins.add(tuple.v1().getName());
}
// Checking expected plugins
String[] mandatoryPlugins = settings.getAsArray("plugin.mandatory", null);
if (mandatoryPlugins != null) {
Set<String> missingPlugins = Sets.newHashSet();
for (String mandatoryPlugin : mandatoryPlugins) {
if (!jvmPlugins.containsKey(mandatoryPlugin) && !sitePlugins.contains(mandatoryPlugin) && !missingPlugins.contains(mandatoryPlugin)) {
missingPlugins.add(mandatoryPlugin);
}
}
if (!missingPlugins.isEmpty()) {
throw new ElasticsearchException("Missing mandatory plugins [" + Strings.collectionToDelimitedString(missingPlugins, ", ") + "]");
}
}
logger.info("loaded {}, sites {}", jvmPlugins.keySet(), sitePlugins);
MapBuilder<Plugin, List<OnModuleReference>> onModuleReferences = MapBuilder.newMapBuilder();
for (Plugin plugin : jvmPlugins.values()) {
List<OnModuleReference> list = Lists.newArrayList();
for (Method method : plugin.getClass().getDeclaredMethods()) {
if (!method.getName().equals("onModule")) {
continue;
}
if (method.getParameterTypes().length == 0 || method.getParameterTypes().length > 1) {
logger.warn("Plugin: {} implementing onModule with no parameters or more than one parameter", plugin.name());
continue;
}
Class moduleClass = method.getParameterTypes()[0];
if (!Module.class.isAssignableFrom(moduleClass)) {
logger.warn("Plugin: {} implementing onModule by the type is not of Module type {}", plugin.name(), moduleClass);
continue;
}
method.setAccessible(true);
list.add(new OnModuleReference(moduleClass, method));
}
if (!list.isEmpty()) {
onModuleReferences.put(plugin, list);
}
}
this.onModuleReferences = onModuleReferences.immutableMap();
this.refreshInterval = componentSettings.getAsTime("info_refresh_interval", TimeValue.timeValueSeconds(10));
}
public ImmutableList<Tuple<PluginInfo, Plugin>> plugins() {
return plugins;
}
public void processModules(Iterable<Module> modules) {
for (Module module : modules) {
processModule(module);
}
}
public void processModule(Module module) {
for (Tuple<PluginInfo, Plugin> plugin : plugins()) {
plugin.v2().processModule(module);
// see if there are onModule references
List<OnModuleReference> references = onModuleReferences.get(plugin.v2());
if (references != null) {
for (OnModuleReference reference : references) {
if (reference.moduleClass.isAssignableFrom(module.getClass())) {
try {
reference.onModuleMethod.invoke(plugin.v2(), module);
} catch (Exception e) {
logger.warn("plugin {}, failed to invoke custom onModule method", e, plugin.v2().name());
}
}
}
}
}
}
public Settings updatedSettings() {
ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder()
.put(this.settings);
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
builder.put(plugin.v2().additionalSettings());
}
return builder.build();
}
public Collection<Class<? extends Module>> modules() {
List<Class<? extends Module>> modules = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().modules());
}
return modules;
}
public Collection<Module> modules(Settings settings) {
List<Module> modules = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().modules(settings));
}
return modules;
}
public Collection<Class<? extends LifecycleComponent>> services() {
List<Class<? extends LifecycleComponent>> services = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().services());
}
return services;
}
public Collection<Class<? extends Module>> indexModules() {
List<Class<? extends Module>> modules = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().indexModules());
}
return modules;
}
public Collection<Module> indexModules(Settings settings) {
List<Module> modules = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().indexModules(settings));
}
return modules;
}
public Collection<Class<? extends CloseableIndexComponent>> indexServices() {
List<Class<? extends CloseableIndexComponent>> services = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().indexServices());
}
return services;
}
public Collection<Class<? extends Module>> shardModules() {
List<Class<? extends Module>> modules = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().shardModules());
}
return modules;
}
public Collection<Module> shardModules(Settings settings) {
List<Module> modules = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().shardModules(settings));
}
return modules;
}
public Collection<Class<? extends CloseableIndexComponent>> shardServices() {
List<Class<? extends CloseableIndexComponent>> services = Lists.newArrayList();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().shardServices());
}
return services;
}
/**
* Get information about plugins (jvm and site plugins).
* Information are cached for 10 seconds by default. Modify `plugins.info_refresh_interval` property if needed.
* Setting `plugins.info_refresh_interval` to `-1` will cause infinite caching.
* Setting `plugins.info_refresh_interval` to `0` will disable caching.
* @return List of plugins information
*/
synchronized public PluginsInfo info() {
if (refreshInterval.millis() != 0) {
if (cachedPluginsInfo != null &&
(refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
if (logger.isTraceEnabled()) {
logger.trace("using cache to retrieve plugins info");
}
return cachedPluginsInfo;
}
lastRefresh = System.currentTimeMillis();
}
if (logger.isTraceEnabled()) {
logger.trace("starting to fetch info on plugins");
}
cachedPluginsInfo = new PluginsInfo();
// We first add all JvmPlugins
for (Tuple<PluginInfo, Plugin> plugin : this.plugins) {
if (logger.isTraceEnabled()) {
logger.trace("adding jvm plugin [{}]", plugin.v1());
}
cachedPluginsInfo.add(plugin.v1());
}
// We reload site plugins (in case of some changes)
for (Tuple<PluginInfo, Plugin> plugin : loadSitePlugins()) {
if (logger.isTraceEnabled()) {
logger.trace("adding site plugin [{}]", plugin.v1());
}
cachedPluginsInfo.add(plugin.v1());
}
return cachedPluginsInfo;
}
private void loadPluginsIntoClassLoader() {
File pluginsFile = environment.pluginsFile();
if (!pluginsFile.exists()) {
return;
}
if (!pluginsFile.isDirectory()) {
return;
}
ClassLoader classLoader = settings.getClassLoader();
Class classLoaderClass = classLoader.getClass();
Method addURL = null;
while (!classLoaderClass.equals(Object.class)) {
try {
addURL = classLoaderClass.getDeclaredMethod("addURL", URL.class);
addURL.setAccessible(true);
break;
} catch (NoSuchMethodException e) {
// no method, try the parent
classLoaderClass = classLoaderClass.getSuperclass();
}
}
if (addURL == null) {
logger.debug("failed to find addURL method on classLoader [" + classLoader + "] to add methods");
return;
}
File[] pluginsFiles = pluginsFile.listFiles();
if (pluginsFile != null) {
for (File pluginFile : pluginsFiles) {
if (pluginFile.isDirectory()) {
if (logger.isTraceEnabled()) {
logger.trace("--- adding plugin [" + pluginFile.getAbsolutePath() + "]");
}
try {
// add the root
addURL.invoke(classLoader, pluginFile.toURI().toURL());
// gather files to add
List<File> libFiles = Lists.newArrayList();
if (pluginFile.listFiles() != null) {
libFiles.addAll(Arrays.asList(pluginFile.listFiles()));
}
File libLocation = new File(pluginFile, "lib");
if (libLocation.exists() && libLocation.isDirectory() && libLocation.listFiles() != null) {
libFiles.addAll(Arrays.asList(libLocation.listFiles()));
}
// if there are jars in it, add it as well
for (File libFile : libFiles) {
if (!(libFile.getName().endsWith(".jar") || libFile.getName().endsWith(".zip"))) {
continue;
}
addURL.invoke(classLoader, libFile.toURI().toURL());
}
} catch (Throwable e) {
logger.warn("failed to add plugin [" + pluginFile + "]", e);
}
}
}
} else {
logger.debug("failed to list plugins from {}. Check your right access.", pluginsFile.getAbsolutePath());
}
}
private ImmutableList<Tuple<PluginInfo,Plugin>> loadPluginsFromClasspath(Settings settings) {
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> plugins = ImmutableList.builder();
// Trying JVM plugins: looking for es-plugin.properties files
try {
Enumeration<URL> pluginUrls = settings.getClassLoader().getResources(ES_PLUGIN_PROPERTIES);
while (pluginUrls.hasMoreElements()) {
URL pluginUrl = pluginUrls.nextElement();
Properties pluginProps = new Properties();
InputStream is = null;
try {
is = pluginUrl.openStream();
pluginProps.load(is);
String pluginClassName = pluginProps.getProperty("plugin");
String pluginVersion = pluginProps.getProperty("version", PluginInfo.VERSION_NOT_AVAILABLE);
Plugin plugin = loadPlugin(pluginClassName, settings);
// Is it a site plugin as well? Does it have also an embedded _site structure
File siteFile = new File(new File(environment.pluginsFile(), plugin.name()), "_site");
boolean isSite = siteFile.exists() && siteFile.isDirectory();
if (logger.isTraceEnabled()) {
logger.trace("found a jvm plugin [{}], [{}]{}",
plugin.name(), plugin.description(), isSite ? ": with _site structure" : "");
}
PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), isSite, true, pluginVersion);
plugins.add(new Tuple<PluginInfo, Plugin>(pluginInfo, plugin));
} catch (Throwable e) {
logger.warn("failed to load plugin from [" + pluginUrl + "]", e);
} finally {
IOUtils.closeWhileHandlingException(is);
}
}
} catch (IOException e) {
logger.warn("failed to find jvm plugins from classpath", e);
}
return plugins.build();
}
private ImmutableList<Tuple<PluginInfo,Plugin>> loadSitePlugins() {
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> sitePlugins = ImmutableList.builder();
List<String> loadedJvmPlugins = new ArrayList<String>();
// Already known jvm plugins are ignored
for(Tuple<PluginInfo, Plugin> tuple : plugins) {
if (tuple.v1().isSite()) {
loadedJvmPlugins.add(tuple.v1().getName());
}
}
// Let's try to find all _site plugins we did not already found
File pluginsFile = environment.pluginsFile();
if (!pluginsFile.exists() || !pluginsFile.isDirectory()) {
return sitePlugins.build();
}
for (File pluginFile : pluginsFile.listFiles()) {
if (!loadedJvmPlugins.contains(pluginFile.getName())) {
File sitePluginDir = new File(pluginFile, "_site");
if (sitePluginDir.exists()) {
// We have a _site plugin. Let's try to get more information on it
String name = pluginFile.getName();
String version = PluginInfo.VERSION_NOT_AVAILABLE;
String description = PluginInfo.DESCRIPTION_NOT_AVAILABLE;
// We check if es-plugin.properties exists in plugin/_site dir
File pluginPropFile = new File(sitePluginDir, ES_PLUGIN_PROPERTIES);
if (pluginPropFile.exists()) {
Properties pluginProps = new Properties();
InputStream is = null;
try {
is = new FileInputStream(pluginPropFile.getAbsolutePath());
pluginProps.load(is);
description = pluginProps.getProperty("description", PluginInfo.DESCRIPTION_NOT_AVAILABLE);
version = pluginProps.getProperty("version", PluginInfo.VERSION_NOT_AVAILABLE);
} catch (Exception e) {
// Can not load properties for this site plugin. Ignoring.
logger.debug("can not load {} file.", e, ES_PLUGIN_PROPERTIES);
} finally {
IOUtils.closeWhileHandlingException(is);
}
}
if (logger.isTraceEnabled()) {
logger.trace("found a site plugin name [{}], version [{}], description [{}]",
name, version, description);
}
sitePlugins.add(new Tuple<PluginInfo, Plugin>(new PluginInfo(name, description, true, false, version), null));
}
}
}
return sitePlugins.build();
}
/**
* @param name plugin name
* @return if this jvm plugin has also a _site structure
*/
private boolean hasSite(String name) {
// Let's try to find all _site plugins we did not already found
File pluginsFile = environment.pluginsFile();
if (!pluginsFile.exists() || !pluginsFile.isDirectory()) {
return false;
}
File sitePluginDir = new File(pluginsFile, name + "/_site");
return sitePluginDir.exists();
}
private Plugin loadPlugin(String className, Settings settings) {
try {
Class<? extends Plugin> pluginClass = (Class<? extends Plugin>) settings.getClassLoader().loadClass(className);
Plugin plugin;
try {
plugin = pluginClass.getConstructor(Settings.class).newInstance(settings);
} catch (NoSuchMethodException e) {
try {
plugin = pluginClass.getConstructor().newInstance();
} catch (NoSuchMethodException e1) {
throw new ElasticsearchException("No constructor for [" + pluginClass + "]. A plugin class must " +
"have either an empty default constructor or a single argument constructor accepting a " +
"Settings instance");
}
}
return plugin;
} catch (Throwable e) {
throw new ElasticsearchException("Failed to load plugin class [" + className + "]", e);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_plugins_PluginsService.java
|
1,472 |
public class PlainShardsIterator implements ShardsIterator {
private final List<ShardRouting> shards;
private final int size;
private final int index;
private final int limit;
private volatile int counter;
public PlainShardsIterator(List<ShardRouting> shards) {
this(shards, 0);
}
public PlainShardsIterator(List<ShardRouting> shards, int index) {
this.shards = shards;
this.size = shards.size();
if (size == 0) {
this.index = 0;
} else {
this.index = Math.abs(index % size);
}
this.counter = this.index;
this.limit = this.index + size;
}
@Override
public void reset() {
this.counter = this.index;
}
@Override
public int remaining() {
return limit - counter;
}
@Override
public ShardRouting firstOrNull() {
if (size == 0) {
return null;
}
return shards.get(index);
}
@Override
public ShardRouting nextOrNull() {
if (size == 0) {
return null;
}
int counter = (this.counter);
if (counter >= size) {
if (counter >= limit) {
return null;
}
this.counter = counter + 1;
return shards.get(counter - size);
} else {
this.counter = counter + 1;
return shards.get(counter);
}
}
@Override
public int size() {
return size;
}
@Override
public int sizeActive() {
int count = 0;
for (int i = 0; i < size; i++) {
if (shards.get(i).active()) {
count++;
}
}
return count;
}
@Override
public int assignedReplicasIncludingRelocating() {
int count = 0;
for (int i = 0; i < size; i++) {
ShardRouting shard = shards.get(i);
if (shard.unassigned()) {
continue;
}
// if the shard is primary and relocating, add one to the counter since we perform it on the replica as well
// (and we already did it on the primary)
if (shard.primary()) {
if (shard.relocating()) {
count++;
}
} else {
count++;
// if we are relocating the replica, we want to perform the index operation on both the relocating
// shard and the target shard. This means that we won't loose index operations between end of recovery
// and reassignment of the shard by the master node
if (shard.relocating()) {
count++;
}
}
}
return count;
}
@Override
public Iterable<ShardRouting> asUnordered() {
return shards;
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_PlainShardsIterator.java
|
2,562 |
clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
// make sure we have the local node id set, we might need it as a result of the new metadata
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()).put(localNode).localNodeId(localNode.id());
return ClusterState.builder(currentState).metaData(masterState.metaData()).nodes(nodesBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
| 1no label
|
src_main_java_org_elasticsearch_discovery_local_LocalDiscovery.java
|
5,835 |
public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder {
private final FieldMapper<?> mapper;
private final SearchContext searchContext;
public SourceScoreOrderFragmentsBuilder(FieldMapper<?> mapper, SearchContext searchContext,
String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
super(preTags, postTags, boundaryScanner);
this.mapper = mapper;
this.searchContext = searchContext;
}
@Override
protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
// we know its low level reader, and matching docId, since that's how we call the highlighter with
SearchLookup lookup = searchContext.lookup();
lookup.setNextReader((AtomicReaderContext) reader.getContext());
lookup.setNextDocId(docId);
List<Object> values = lookup.source().extractRawValues(mapper.names().sourcePath());
Field[] fields = new Field[values.size()];
for (int i = 0; i < values.size(); i++) {
fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
}
return fields;
}
protected String makeFragment( StringBuilder buffer, int[] index, Field[] values, WeightedFragInfo fragInfo,
String[] preTags, String[] postTags, Encoder encoder ){
return super.makeFragment(buffer, index, values, FragmentBuilderHelper.fixWeightedFragInfo(mapper, values, fragInfo), preTags, postTags, encoder);
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_highlight_vectorhighlight_SourceScoreOrderFragmentsBuilder.java
|
16 |
static class A {
private int c = 0;
private final Object o;
A(final Object o) {
this.o = o;
}
public void inc() {
c++;
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestBed.java
|
62 |
public class OModificationOperationProhibitedException extends OException {
private static final long serialVersionUID = 1L;
public OModificationOperationProhibitedException() {
}
public OModificationOperationProhibitedException(String message) {
super(message);
}
public OModificationOperationProhibitedException(Throwable cause) {
super(cause);
}
public OModificationOperationProhibitedException(String message, Throwable cause) {
super(message, cause);
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_lock_OModificationOperationProhibitedException.java
|
158 |
@Service("blContentDefaultRuleProcessor")
public class StructuredContentDefaultRuleProcessor extends AbstractStructuredContentRuleProcessor {
private static final Log LOG = LogFactory.getLog(StructuredContentDefaultRuleProcessor.class);
/**
* Returns true if all of the rules associated with the passed in <code>StructuredContent</code>
* item match based on the passed in vars.
*
* Also returns true if no rules are present for the passed in item.
*
* @param sc - a structured content item to test
* @param vars - a map of objects used by the rule MVEL expressions
* @return the result of the rule checks
*/
public boolean checkForMatch(StructuredContentDTO sc, Map<String, Object> vars) {
String ruleExpression = sc.getRuleExpression();
if (ruleExpression != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Processing content rule for StructuredContent with id " + sc.getId() +". Value = " + ruleExpression);
}
boolean result = executeExpression(ruleExpression, vars);
if (! result) {
if (LOG.isDebugEnabled()) {
LOG.debug("Content failed to pass rule and will not be included for StructuredContent with id " + sc.getId() +". Value = " + ruleExpression);
}
}
return result;
} else {
// If no rule found, then consider this a match.
return true;
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_service_StructuredContentDefaultRuleProcessor.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.