Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
2,094 |
public class BytesStreamOutput extends StreamOutput implements BytesStream {
public static final int DEFAULT_SIZE = 2 * 1024;
public static final int OVERSIZE_LIMIT = 256 * 1024;
/**
* The buffer where data is stored.
*/
protected byte buf[];
/**
* The number of valid bytes in the buffer.
*/
protected int count;
public BytesStreamOutput() {
this(DEFAULT_SIZE);
}
public BytesStreamOutput(int size) {
this.buf = new byte[size];
}
@Override
public boolean seekPositionSupported() {
return true;
}
@Override
public long position() throws IOException {
return count;
}
@Override
public void seek(long position) throws IOException {
if (position > Integer.MAX_VALUE) {
throw new UnsupportedOperationException();
}
count = (int) position;
}
@Override
public void writeByte(byte b) throws IOException {
int newcount = count + 1;
if (newcount > buf.length) {
buf = grow(newcount);
}
buf[count] = b;
count = newcount;
}
public void skip(int length) {
int newcount = count + length;
if (newcount > buf.length) {
buf = grow(newcount);
}
count = newcount;
}
@Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
if (length == 0) {
return;
}
int newcount = count + length;
if (newcount > buf.length) {
buf = grow(newcount);
}
System.arraycopy(b, offset, buf, count, length);
count = newcount;
}
private byte[] grow(int newCount) {
// try and grow faster while we are small...
if (newCount < OVERSIZE_LIMIT) {
newCount = Math.max(buf.length << 1, newCount);
}
return ArrayUtil.grow(buf, newCount);
}
public void seek(int seekTo) {
count = seekTo;
}
public void reset() {
count = 0;
}
public int bufferSize() {
return buf.length;
}
@Override
public void flush() throws IOException {
// nothing to do there
}
@Override
public void close() throws IOException {
// nothing to do here
}
@Override
public BytesReference bytes() {
return new BytesArray(buf, 0, count);
}
/**
* Returns the current size of the buffer.
*
* @return the value of the <code>count</code> field, which is the number
* of valid bytes in this output stream.
* @see java.io.ByteArrayOutputStream#count
*/
public int size() {
return count;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_io_stream_BytesStreamOutput.java
|
202 |
public class ONetworkConnectionPool<CH extends OChannel> implements OResourcePoolListener<String, CH> {
private static final int DEF_WAIT_TIMEOUT = 5000;
private final Map<String, OResourcePool<String, CH>> pools = new HashMap<String, OResourcePool<String, CH>>();
private int maxSize;
private int timeout = DEF_WAIT_TIMEOUT;
public ONetworkConnectionPool(final int iMinSize, final int iMaxSize) {
this(iMinSize, iMaxSize, DEF_WAIT_TIMEOUT);
}
public ONetworkConnectionPool(final int iMinSize, final int iMaxSize, final int iTimeout) {
maxSize = iMaxSize;
timeout = iTimeout;
}
public CH createNewResource(String iKey, Object... iAdditionalArgs) {
return null;
}
public CH acquire(final String iRemoteAddress) throws OLockException {
OResourcePool<String, CH> pool = pools.get(iRemoteAddress);
if (pool == null) {
synchronized (pools) {
pool = pools.get(iRemoteAddress);
if (pool == null) {
pool = new OResourcePool<String, CH>(maxSize, this);
pools.put(iRemoteAddress, pool);
}
}
}
return pool.getResource(iRemoteAddress, timeout);
}
public void release(final CH iChannel) {
final String address = iChannel.socket.getInetAddress().toString();
final OResourcePool<String, CH> pool = pools.get(address);
if (pool == null)
throw new OLockException("Cannot release a network channel not acquired before. Remote address: " + address);
pool.returnResource(iChannel);
}
public boolean reuseResource(final String iKey, final Object[] iAdditionalArgs, final CH iValue) {
return true;
}
public Map<String, OResourcePool<String, CH>> getPools() {
return pools;
}
/**
* Closes all the channels.
*/
public void close() {
for (Entry<String, OResourcePool<String, CH>> pool : pools.entrySet()) {
for (CH channel : pool.getValue().getResources()) {
channel.close();
}
}
}
}
| 0true
|
client_src_main_java_com_orientechnologies_orient_client_remote_ONetworkConnectionPool.java
|
660 |
public class OSBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
public static final String DATA_FILE_EXTENSION = ".sbt";
private ORID identity;
private OSBTree<Object, V> sbTree;
public OSBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
}
@Override
public void flush() {
acquireSharedLock();
try {
sbTree.flush();
} finally {
releaseSharedLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
final OBinarySerializer keySerializer;
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
} else {
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, indexDefinition.getTypes().length,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
}
} else {
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
keySerializer = new OSimpleKeySerializer();
}
final ORecordBytes identityRecord = new ORecordBytes();
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
sbTree.create(indexName, keySerializer, (OBinarySerializer<V>) valueSerializer,
indexDefinition != null ? indexDefinition.getTypes() : null, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void delete() {
acquireSharedLock();
try {
sbTree.delete();
} finally {
releaseSharedLock();
}
}
@Override
public void deleteWithoutLoad(String indexName) {
acquireExclusiveLock();
try {
final ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, 1, OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
sbTree.deleteWithoutLoad(indexName, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
final int keySize;
if (indexDefinition == null || indexDefinition instanceof ORuntimeKeyIndexDefinition)
keySize = 1;
else
keySize = indexDefinition.getTypes().length;
sbTree = new OSBTree<Object, V>(DATA_FILE_EXTENSION, keySize,
OGlobalConfiguration.INDEX_DURABLE_IN_NON_TX_MODE.getValueAsBoolean());
ODatabaseRecord database = getDatabase();
final OStorageLocalAbstract storageLocalAbstract = (OStorageLocalAbstract) database.getStorage().getUnderlying();
sbTree.load(indexName, indexDefinition != null ? indexDefinition.getTypes() : null, storageLocalAbstract);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireSharedLock();
try {
return sbTree.get(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public boolean remove(Object key) {
acquireSharedLock();
try {
return sbTree.remove(key) != null;
} finally {
releaseSharedLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return identity;
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireSharedLock();
try {
sbTree.clear();
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireSharedLock();
try {
return new OSBTreeMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireSharedLock();
try {
return new OSBTreeInverseMapEntryIterator<Object, V>(sbTree);
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireSharedLock();
try {
return new Iterator<V>() {
private final OSBTreeInverseMapEntryIterator<Object, V> entryIterator = new OSBTreeInverseMapEntryIterator<Object, V>(
sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public V next() {
return entryIterator.next().getValue();
}
@Override
public void remove() {
entryIterator.remove();
}
};
} finally {
releaseSharedLock();
}
}
@Override
public Iterable<Object> keys() {
acquireSharedLock();
try {
return new Iterable<Object>() {
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
final OSBTreeMapEntryIterator<Object, V> entryIterator = new OSBTreeMapEntryIterator<Object, V>(sbTree);
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public Object next() {
return entryIterator.next().getKey();
}
@Override
public void remove() {
entryIterator.remove();
}
};
}
};
} finally {
releaseSharedLock();
}
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
acquireSharedLock();
try {
sbTree.close();
} finally {
releaseSharedLock();
}
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
acquireSharedLock();
try {
return sbTree.get(key);
} finally {
releaseSharedLock();
}
}
@Override
public void put(Object key, V value) {
acquireSharedLock();
try {
sbTree.put(key, value);
} finally {
releaseSharedLock();
}
}
@Override
public void getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
final ValuesTransformer<V> transformer, final ValuesResultListener valuesResultListener) {
acquireSharedLock();
try {
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
return addToResult(transformer, valuesResultListener, entry.getValue());
}
});
} finally {
releaseSharedLock();
}
}
@Override
public void getValuesMajor(Object fromKey, boolean isInclusive, final ValuesTransformer<V> transformer,
final ValuesResultListener valuesResultListener) {
acquireSharedLock();
try {
sbTree.loadEntriesMajor(fromKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
return addToResult(transformer, valuesResultListener, entry.getValue());
}
});
} finally {
releaseSharedLock();
}
}
@Override
public void getValuesMinor(Object toKey, boolean isInclusive, final ValuesTransformer<V> transformer,
final ValuesResultListener valuesResultListener) {
acquireSharedLock();
try {
sbTree.loadEntriesMinor(toKey, isInclusive, new OSBTree.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
return addToResult(transformer, valuesResultListener, entry.getValue());
}
});
} finally {
releaseSharedLock();
}
}
@Override
public void getEntriesMajor(Object fromKey, boolean isInclusive, final ValuesTransformer<V> transformer,
final EntriesResultListener entriesResultListener) {
acquireSharedLock();
try {
sbTree.loadEntriesMajor(fromKey, isInclusive, new OTreeInternal.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
return addToEntriesResult(transformer, entriesResultListener, key, value);
}
});
} finally {
releaseSharedLock();
}
}
@Override
public void getEntriesMinor(Object toKey, boolean isInclusive, final ValuesTransformer<V> transformer,
final EntriesResultListener entriesResultListener) {
acquireSharedLock();
try {
sbTree.loadEntriesMinor(toKey, isInclusive, new OTreeInternal.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
return addToEntriesResult(transformer, entriesResultListener, key, value);
}
});
} finally {
releaseSharedLock();
}
}
@Override
public void getEntriesBetween(Object rangeFrom, Object rangeTo, boolean inclusive, final ValuesTransformer<V> transformer,
final EntriesResultListener entriesResultListener) {
acquireSharedLock();
try {
final Set<ODocument> result = new ODocumentFieldsHashSet();
sbTree.loadEntriesBetween(rangeFrom, inclusive, rangeTo, inclusive, new OTreeInternal.RangeResultListener<Object, V>() {
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
final Object key = entry.getKey();
final V value = entry.getValue();
return addToEntriesResult(transformer, entriesResultListener, key, value);
}
});
} finally {
releaseSharedLock();
}
}
@Override
public long size(final ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
if (transformer == null)
return sbTree.size();
else {
final ItemsCounter<V> counter = new ItemsCounter<V>(transformer, -1);
final Object firstKey = sbTree.firstKey();
final Object lastKey = sbTree.lastKey();
if (firstKey != null && lastKey != null) {
sbTree.loadEntriesBetween(firstKey, true, lastKey, true, counter);
return counter.count;
}
return 0;
}
} finally {
releaseSharedLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireSharedLock();
try {
final ItemsCounter<V> itemsCounter = new ItemsCounter<V>(transformer, maxValuesToFetch);
if (rangeTo != null)
sbTree.loadEntriesBetween(rangeFrom, fromInclusive, rangeTo, toInclusive, itemsCounter);
else
sbTree.loadEntriesMajor(rangeFrom, fromInclusive, itemsCounter);
return itemsCounter.count;
} finally {
releaseSharedLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private boolean addToResult(ValuesTransformer<V> transformer, ValuesResultListener resultListener, V value) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
boolean cont = resultListener.addResult(transformedValue);
if (!cont)
return false;
}
return true;
} else
return resultListener.addResult((OIdentifiable) value);
}
private boolean addToEntriesResult(ValuesTransformer<V> transformer, EntriesResultListener entriesResultListener, Object key,
V value) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
boolean cont = entriesResultListener.addResult(document);
if (!cont)
return false;
}
return true;
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
return entriesResultListener.addResult(document);
}
}
private static final class ItemsCounter<V> implements OSBTree.RangeResultListener<Object, V> {
private final ValuesTransformer<V> valuesTransformer;
private final int maxValuesToFetch;
private ItemsCounter(ValuesTransformer<V> valuesTransformer, int maxValuesToFetch) {
this.valuesTransformer = valuesTransformer;
this.maxValuesToFetch = maxValuesToFetch;
}
private int count;
@Override
public boolean addResult(Map.Entry<Object, V> entry) {
if (valuesTransformer != null)
count += valuesTransformer.transformFromValue(entry.getValue()).size();
else
count++;
if (maxValuesToFetch > 0 && count >= maxValuesToFetch)
return false;
return true;
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_engine_OSBTreeIndexEngine.java
|
80 |
NOT_EQUAL {
@Override
public boolean isValidValueType(Class<?> clazz) {
return true;
}
@Override
public boolean isValidCondition(Object condition) {
return true;
}
@Override
public boolean evaluate(Object value, Object condition) {
if (condition==null) {
return value!=null;
} else {
return !condition.equals(value);
}
}
@Override
public String toString() {
return "<>";
}
@Override
public TitanPredicate negate() {
return EQUAL;
}
},
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Cmp.java
|
4,295 |
public class IndicesModule extends AbstractModule implements SpawnModules {
private final Settings settings;
public IndicesModule(Settings settings) {
this.settings = settings;
}
@Override
public Iterable<? extends Module> spawnModules() {
return ImmutableList.of(new IndicesQueriesModule(), new IndicesAnalysisModule());
}
@Override
protected void configure() {
bind(IndicesLifecycle.class).to(InternalIndicesLifecycle.class).asEagerSingleton();
bind(IndicesService.class).to(InternalIndicesService.class).asEagerSingleton();
bind(RecoverySettings.class).asEagerSingleton();
bind(RecoveryTarget.class).asEagerSingleton();
bind(RecoverySource.class).asEagerSingleton();
bind(IndicesStore.class).asEagerSingleton();
bind(IndicesClusterStateService.class).asEagerSingleton();
bind(IndexingMemoryController.class).asEagerSingleton();
bind(IndicesFilterCache.class).asEagerSingleton();
bind(IndicesFieldDataCache.class).asEagerSingleton();
bind(IndicesTermsFilterCache.class).asEagerSingleton();
bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton();
bind(IndicesTTLService.class).asEagerSingleton();
bind(IndicesWarmer.class).to(InternalIndicesWarmer.class).asEagerSingleton();
bind(UpdateHelper.class).asEagerSingleton();
bind(CircuitBreakerService.class).to(InternalCircuitBreakerService.class).asEagerSingleton();
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_IndicesModule.java
|
72 |
@SuppressWarnings("serial")
static final class MapReduceEntriesToLongTask<K,V>
extends BulkTask<K,V,Long> {
final ObjectToLong<Map.Entry<K,V>> transformer;
final LongByLongToLong reducer;
final long basis;
long result;
MapReduceEntriesToLongTask<K,V> rights, nextRight;
MapReduceEntriesToLongTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceEntriesToLongTask<K,V> nextRight,
ObjectToLong<Map.Entry<K,V>> transformer,
long basis,
LongByLongToLong reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Long getRawResult() { return result; }
public final void compute() {
final ObjectToLong<Map.Entry<K,V>> transformer;
final LongByLongToLong reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
long r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceEntriesToLongTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V>
t = (MapReduceEntriesToLongTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
45 |
@Component("blPendingSandBoxItemCustomPersistenceHandler")
public class PendingSandBoxItemCustomPersistenceHandler extends SandBoxItemCustomPersistenceHandler {
private final Log LOG = LogFactory.getLog(PendingSandBoxItemCustomPersistenceHandler.class);
@Override
public Boolean canHandleFetch(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
boolean isSandboxItem = SandBoxItem.class.getName().equals(ceilingEntityFullyQualifiedClassname);
if (isSandboxItem) {
return persistencePackage.getCustomCriteria()[4].equals("pending");
}
return false;
}
@Override
public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
String[] customCriteria = persistencePackage.getCustomCriteria();
if (ArrayUtils.isEmpty(customCriteria) || customCriteria.length != 5) {
ServiceException e = new ServiceException("Invalid request for entity: " + ceilingEntityFullyQualifiedClassname);
LOG.error("Invalid request for entity: " + ceilingEntityFullyQualifiedClassname, e);
throw e;
}
AdminUser adminUser = adminRemoteSecurityService.getPersistentAdminUser();
if (adminUser == null) {
ServiceException e = new ServiceException("Unable to determine current user logged in status");
throw e;
}
try {
String operation = customCriteria[1];
List<Long> targets = new ArrayList<Long>();
if (!StringUtils.isEmpty(customCriteria[2])) {
String[] parts = customCriteria[2].split(",");
for (String part : parts) {
try {
targets.add(Long.valueOf(part));
} catch (NumberFormatException e) {
//do nothing
}
}
}
String requiredPermission = "PERMISSION_ALL_USER_SANDBOX";
boolean allowOperation = false;
for (AdminRole role : adminUser.getAllRoles()) {
for (AdminPermission permission : role.getAllPermissions()) {
if (permission.getName().equals(requiredPermission)) {
allowOperation = true;
break;
}
}
}
if (!allowOperation) {
ServiceException e = new ServiceException("Current user does not have permission to perform operation");
LOG.error("Current user does not have permission to perform operation", e);
throw e;
}
SandBox mySandBox = sandBoxService.retrieveUserSandBox(null, adminUser);
SandBox approvalSandBox = sandBoxService.retrieveApprovalSandBox(mySandBox);
if (operation.equals("releaseAll")) {
sandBoxService.revertAllSandBoxItems(mySandBox, approvalSandBox);
} else if (operation.equals("releaseSelected")) {
List<SandBoxItem> items = retrieveSandBoxItems(targets, dynamicEntityDao, mySandBox);
sandBoxService.revertSelectedSandBoxItems(approvalSandBox, items);
} else if (operation.equals("reclaimAll")) {
sandBoxService.rejectAllSandBoxItems(mySandBox, approvalSandBox, "reclaiming sandbox items");
} else if (operation.equals("reclaimSelected")) {
List<SandBoxItem> items = retrieveSandBoxItems(targets, dynamicEntityDao, mySandBox);
sandBoxService.rejectSelectedSandBoxItems(approvalSandBox, "reclaiming sandbox item", items);
}
PersistencePerspective persistencePerspective = persistencePackage.getPersistencePerspective();
Map<String, FieldMetadata> originalProps = helper.getSimpleMergedProperties(SandBoxItem.class.getName(), persistencePerspective);
cto.get("originalSandBoxId").setFilterValue(mySandBox.getId().toString());
cto.get("archivedFlag").setFilterValue(Boolean.FALSE.toString());
List<FilterMapping> filterMappings = helper.getFilterMappings(persistencePerspective, cto, SandBoxItem.class.getName(), originalProps);
List<Serializable> records = helper.getPersistentRecords(SandBoxItem.class.getName(), filterMappings, cto.getFirstResult(), cto.getMaxResults());
Entity[] results = helper.getRecords(originalProps, records);
int totalRecords = helper.getTotalRecords(StringUtils.isEmpty(persistencePackage.getFetchTypeFullyQualifiedClassname())?
persistencePackage.getCeilingEntityFullyQualifiedClassname():persistencePackage.getFetchTypeFullyQualifiedClassname(),
filterMappings);
DynamicResultSet response = new DynamicResultSet(results, totalRecords);
return response;
} catch (Exception e) {
throw new ServiceException("Unable to execute persistence activity for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_PendingSandBoxItemCustomPersistenceHandler.java
|
1,343 |
public class OWALPage {
public static final int PAGE_SIZE = 65536;
public static final int MIN_RECORD_SIZE = OIntegerSerializer.INT_SIZE + 3;
public static final int CRC_OFFSET = 0;
public static final int FLUSH_ID_OFFSET = CRC_OFFSET + OIntegerSerializer.INT_SIZE;
public static final int FLUSH_INDEX_OFFSET = FLUSH_ID_OFFSET + OLongSerializer.LONG_SIZE;
private static final int FREE_SPACE_OFFSET = FLUSH_INDEX_OFFSET + OIntegerSerializer.INT_SIZE;
public static final int RECORDS_OFFSET = FREE_SPACE_OFFSET + OIntegerSerializer.INT_SIZE;
private static final int MAX_ENTRY_SIZE = PAGE_SIZE - RECORDS_OFFSET;
private final ODirectMemoryPointer pagePointer;
public OWALPage(ODirectMemoryPointer pagePointer, boolean isNew) {
this.pagePointer = pagePointer;
if (isNew)
OIntegerSerializer.INSTANCE.serializeInDirectMemory(MAX_ENTRY_SIZE, pagePointer, FREE_SPACE_OFFSET);
}
public ODirectMemoryPointer getPagePointer() {
return pagePointer;
}
public int appendRecord(byte[] content, boolean mergeWithNextPage, boolean recordTail) {
int freeSpace = getFreeSpace();
int freePosition = PAGE_SIZE - freeSpace;
int position = freePosition;
pagePointer.setByte(position, mergeWithNextPage ? (byte) 1 : 0);
position++;
pagePointer.setByte(position, recordTail ? (byte) 1 : 0);
position++;
OIntegerSerializer.INSTANCE.serializeInDirectMemory(content.length, pagePointer, position);
position += OIntegerSerializer.INT_SIZE;
pagePointer.set(position, content, 0, content.length);
position += content.length;
OIntegerSerializer.INSTANCE.serializeInDirectMemory(freeSpace - 2 - OIntegerSerializer.INT_SIZE - content.length, pagePointer,
FREE_SPACE_OFFSET);
return freePosition;
}
public byte[] getRecord(int position) {
position += 2;
int recordSize = OIntegerSerializer.INSTANCE.deserializeFromDirectMemory(pagePointer, position);
position += OIntegerSerializer.INT_SIZE;
return pagePointer.get(position, recordSize);
}
public int getSerializedRecordSize(int position) {
int recordSize = OIntegerSerializer.INSTANCE.deserializeFromDirectMemory(pagePointer, position + 2);
return recordSize + OIntegerSerializer.INT_SIZE + 2;
}
public boolean mergeWithNextPage(int position) {
return pagePointer.getByte(position) > 0;
}
public boolean recordTail(int position) {
return pagePointer.getByte(position + 1) > 0;
}
public boolean isEmpty() {
return getFreeSpace() == MAX_ENTRY_SIZE;
}
public int getFreeSpace() {
return OIntegerSerializer.INSTANCE.deserializeFromDirectMemory(pagePointer, FREE_SPACE_OFFSET);
}
public int getFilledUpTo() {
return OWALPage.PAGE_SIZE - getFreeSpace();
}
public static int calculateSerializedSize(int recordSize) {
return recordSize + OIntegerSerializer.INT_SIZE + 2;
}
public static int calculateRecordSize(int serializedSize) {
return serializedSize - OIntegerSerializer.INT_SIZE - 2;
}
public void truncateTill(int pageOffset) {
OIntegerSerializer.INSTANCE.serializeInDirectMemory(OWALPage.PAGE_SIZE - pageOffset, pagePointer, FREE_SPACE_OFFSET);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OWALPage.java
|
381 |
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
public ClusterRerouteRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterRerouteRequest());
}
/**
* Adds allocation commands to be applied to the cluster. Note, can be empty, in which case
* will simply run a simple "reroute".
*/
public ClusterRerouteRequestBuilder add(AllocationCommand... commands) {
request.add(commands);
return this;
}
/**
* Sets a dry run flag (defaults to <tt>false</tt>) allowing to run the commands without
* actually applying them to the cluster state, and getting the resulting cluster state back.
*/
public ClusterRerouteRequestBuilder setDryRun(boolean dryRun) {
request.dryRun(dryRun);
return this;
}
/**
* Sets the source for the request
*/
public ClusterRerouteRequestBuilder setSource(BytesReference source) throws Exception {
request.source(source);
return this;
}
@Override
protected void doExecute(ActionListener<ClusterRerouteResponse> listener) {
((ClusterAdminClient) client).reroute(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_reroute_ClusterRerouteRequestBuilder.java
|
1,699 |
public class ChannelBufferBytesReference implements BytesReference {
private final ChannelBuffer buffer;
public ChannelBufferBytesReference(ChannelBuffer buffer) {
this.buffer = buffer;
}
@Override
public byte get(int index) {
return buffer.getByte(buffer.readerIndex() + index);
}
@Override
public int length() {
return buffer.readableBytes();
}
@Override
public BytesReference slice(int from, int length) {
return new ChannelBufferBytesReference(buffer.slice(from, length));
}
@Override
public StreamInput streamInput() {
return ChannelBufferStreamInputFactory.create(buffer.duplicate());
}
@Override
public void writeTo(OutputStream os) throws IOException {
buffer.getBytes(buffer.readerIndex(), os, length());
}
@Override
public byte[] toBytes() {
return copyBytesArray().toBytes();
}
@Override
public BytesArray toBytesArray() {
if (buffer.hasArray()) {
return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes());
}
return copyBytesArray();
}
@Override
public BytesArray copyBytesArray() {
byte[] copy = new byte[buffer.readableBytes()];
buffer.getBytes(buffer.readerIndex(), copy);
return new BytesArray(copy);
}
@Override
public ChannelBuffer toChannelBuffer() {
return buffer.duplicate();
}
@Override
public boolean hasArray() {
return buffer.hasArray();
}
@Override
public byte[] array() {
return buffer.array();
}
@Override
public int arrayOffset() {
return buffer.arrayOffset() + buffer.readerIndex();
}
@Override
public String toUtf8() {
return buffer.toString(Charsets.UTF_8);
}
@Override
public BytesRef toBytesRef() {
if (buffer.hasArray()) {
return new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes());
}
byte[] copy = new byte[buffer.readableBytes()];
buffer.getBytes(buffer.readerIndex(), copy);
return new BytesRef(copy);
}
@Override
public BytesRef copyBytesRef() {
byte[] copy = new byte[buffer.readableBytes()];
buffer.getBytes(buffer.readerIndex(), copy);
return new BytesRef(copy);
}
@Override
public int hashCode() {
return Helper.bytesHashCode(this);
}
@Override
public boolean equals(Object obj) {
return Helper.bytesEqual(this, (BytesReference) obj);
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_bytes_ChannelBufferBytesReference.java
|
77 |
public class OSharedResourceExternalTimeout extends OSharedResourceTimeout {
public OSharedResourceExternalTimeout(final int timeout) {
super(timeout);
}
@Override
public void acquireExclusiveLock() throws OTimeoutException {
super.acquireExclusiveLock();
}
@Override
public void acquireSharedLock() throws OTimeoutException {
super.acquireSharedLock();
}
@Override
public void releaseExclusiveLock() {
super.releaseExclusiveLock();
}
@Override
public void releaseSharedLock() {
super.releaseSharedLock();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedResourceExternalTimeout.java
|
260 |
public interface EmailTrackingOpens extends Serializable {
/**
* @return the id
*/
public abstract Long getId();
/**
* @param id the id to set
*/
public abstract void setId(Long id);
/**
* @return the dateOpened
*/
public abstract Date getDateOpened();
/**
* @param dateOpened the dateOpened to set
*/
public abstract void setDateOpened(Date dateOpened);
/**
* @return the userAgent
*/
public abstract String getUserAgent();
/**
* @param userAgent the userAgent to set
*/
public abstract void setUserAgent(String userAgent);
/**
* @return the emailTracking
*/
public abstract EmailTracking getEmailTracking();
/**
* @param emailTracking the emailTracking to set
*/
public abstract void setEmailTracking(EmailTracking emailTracking);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_domain_EmailTrackingOpens.java
|
6,270 |
public class IsFalseAssertion extends Assertion {
private static final ESLogger logger = Loggers.getLogger(IsFalseAssertion.class);
public IsFalseAssertion(String field) {
super(field, false);
}
@Override
@SuppressWarnings("unchecked")
protected void doAssert(Object actualValue, Object expectedValue) {
logger.trace("assert that [{}] doesn't have a true value", actualValue);
if (actualValue == null) {
return;
}
String actualString = actualValue.toString();
assertThat(errorMessage(), actualString, anyOf(
equalTo(""),
equalToIgnoringCase(Boolean.FALSE.toString()),
equalTo("0")
));
}
private String errorMessage() {
return "field [" + getField() + "] has a true value but it shouldn't";
}
}
| 1no label
|
src_test_java_org_elasticsearch_test_rest_section_IsFalseAssertion.java
|
3,708 |
public final class PoolExecutorThreadFactory extends AbstractExecutorThreadFactory {
private final String threadNamePrefix;
private final AtomicInteger idGen = new AtomicInteger(0);
// to reuse previous thread IDs
private final Queue<Integer> idQ = new LinkedBlockingQueue<Integer>(1000);
public PoolExecutorThreadFactory(ThreadGroup threadGroup, String threadNamePrefix, ClassLoader classLoader) {
super(threadGroup, classLoader);
this.threadNamePrefix = threadNamePrefix;
}
@Override
protected Thread createThread(Runnable r) {
Integer id = idQ.poll();
if (id == null) {
id = idGen.incrementAndGet();
}
String name = threadNamePrefix + id;
return new ManagedThread(r, name, id);
}
private class ManagedThread extends Thread {
protected final int id;
public ManagedThread(Runnable target, String name, int id) {
super(threadGroup, target, name);
this.id = id;
}
public void run() {
try {
super.run();
} catch (OutOfMemoryError e) {
OutOfMemoryErrorDispatcher.onOutOfMemory(e);
} finally {
try {
idQ.offer(id);
} catch (Throwable ignored) {
}
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_util_executor_PoolExecutorThreadFactory.java
|
323 |
public class NodeInfo extends NodeOperationResponse {
@Nullable
private ImmutableMap<String, String> serviceAttributes;
private Version version;
private Build build;
@Nullable
private Settings settings;
@Nullable
private OsInfo os;
@Nullable
private ProcessInfo process;
@Nullable
private JvmInfo jvm;
@Nullable
private ThreadPoolInfo threadPool;
@Nullable
private NetworkInfo network;
@Nullable
private TransportInfo transport;
@Nullable
private HttpInfo http;
@Nullable
private PluginsInfo plugins;
NodeInfo() {
}
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable ImmutableMap<String, String> serviceAttributes, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, @Nullable NetworkInfo network,
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsInfo plugins) {
super(node);
this.version = version;
this.build = build;
this.serviceAttributes = serviceAttributes;
this.settings = settings;
this.os = os;
this.process = process;
this.jvm = jvm;
this.threadPool = threadPool;
this.network = network;
this.transport = transport;
this.http = http;
this.plugins = plugins;
}
/**
* System's hostname. <code>null</code> in case of UnknownHostException
*/
@Nullable
public String getHostname() {
return getNode().getHostName();
}
/**
* The current ES version
*/
public Version getVersion() {
return version;
}
/**
* The build version of the node.
*/
public Build getBuild() {
return this.build;
}
/**
* The service attributes of the node.
*/
@Nullable
public ImmutableMap<String, String> getServiceAttributes() {
return this.serviceAttributes;
}
/**
* The settings of the node.
*/
@Nullable
public Settings getSettings() {
return this.settings;
}
/**
* Operating System level information.
*/
@Nullable
public OsInfo getOs() {
return this.os;
}
/**
* Process level information.
*/
@Nullable
public ProcessInfo getProcess() {
return process;
}
/**
* JVM level information.
*/
@Nullable
public JvmInfo getJvm() {
return jvm;
}
@Nullable
public ThreadPoolInfo getThreadPool() {
return this.threadPool;
}
/**
* Network level information.
*/
@Nullable
public NetworkInfo getNetwork() {
return network;
}
@Nullable
public TransportInfo getTransport() {
return transport;
}
@Nullable
public HttpInfo getHttp() {
return http;
}
@Nullable
public PluginsInfo getPlugins() {
return this.plugins;
}
public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
NodeInfo nodeInfo = new NodeInfo();
nodeInfo.readFrom(in);
return nodeInfo;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
version = Version.readVersion(in);
build = Build.readBuild(in);
if (in.readBoolean()) {
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.put(in.readString(), in.readString());
}
serviceAttributes = builder.build();
}
if (in.readBoolean()) {
settings = ImmutableSettings.readSettingsFromStream(in);
}
if (in.readBoolean()) {
os = OsInfo.readOsInfo(in);
}
if (in.readBoolean()) {
process = ProcessInfo.readProcessInfo(in);
}
if (in.readBoolean()) {
jvm = JvmInfo.readJvmInfo(in);
}
if (in.readBoolean()) {
threadPool = ThreadPoolInfo.readThreadPoolInfo(in);
}
if (in.readBoolean()) {
network = NetworkInfo.readNetworkInfo(in);
}
if (in.readBoolean()) {
transport = TransportInfo.readTransportInfo(in);
}
if (in.readBoolean()) {
http = HttpInfo.readHttpInfo(in);
}
if (in.readBoolean()) {
plugins = PluginsInfo.readPluginsInfo(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(version.id);
Build.writeBuild(build, out);
if (getServiceAttributes() == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeVInt(serviceAttributes.size());
for (Map.Entry<String, String> entry : serviceAttributes.entrySet()) {
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
}
if (settings == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
ImmutableSettings.writeSettingsToStream(settings, out);
}
if (os == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
os.writeTo(out);
}
if (process == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
process.writeTo(out);
}
if (jvm == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
jvm.writeTo(out);
}
if (threadPool == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
threadPool.writeTo(out);
}
if (network == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
network.writeTo(out);
}
if (transport == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
transport.writeTo(out);
}
if (http == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
http.writeTo(out);
}
if (plugins == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
plugins.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_NodeInfo.java
|
670 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name="BLC_CATEGORY")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@AdminPresentationClass(friendlyName = "CategoryImpl_baseCategory")
@SQLDelete(sql="UPDATE BLC_CATEGORY SET ARCHIVED = 'Y' WHERE CATEGORY_ID = ?")
public class CategoryImpl implements Category, Status, AdminMainEntity {
private static final long serialVersionUID = 1L;
private static final Log LOG = LogFactory.getLog(CategoryImpl.class);
private static String buildLink(Category category, boolean ignoreTopLevel) {
Category myCategory = category;
StringBuilder linkBuffer = new StringBuilder(50);
while (myCategory != null) {
if (!ignoreTopLevel || myCategory.getDefaultParentCategory() != null) {
if (linkBuffer.length() == 0) {
linkBuffer.append(myCategory.getUrlKey());
} else if(myCategory.getUrlKey() != null && !"/".equals(myCategory.getUrlKey())){
linkBuffer.insert(0, myCategory.getUrlKey() + '/');
}
}
myCategory = myCategory.getDefaultParentCategory();
}
return linkBuffer.toString();
}
private static void fillInURLMapForCategory(Map<String, List<Long>> categoryUrlMap, Category category, String startingPath, List<Long> startingCategoryList) throws CacheFactoryException {
String urlKey = category.getUrlKey();
if (urlKey == null) {
throw new CacheFactoryException("Cannot create childCategoryURLMap - the urlKey for a category("+category.getId()+") was null");
}
String currentPath = "";
if (! "/".equals(category.getUrlKey())) {
currentPath = startingPath + "/" + category.getUrlKey();
}
List<Long> newCategoryList = new ArrayList<Long>(startingCategoryList);
newCategoryList.add(category.getId());
categoryUrlMap.put(currentPath, newCategoryList);
for (CategoryXref currentCategory : category.getChildCategoryXrefs()) {
fillInURLMapForCategory(categoryUrlMap, currentCategory.getSubCategory(), currentPath, newCategoryList);
}
}
@Id
@GeneratedValue(generator= "CategoryId")
@GenericGenerator(
name="CategoryId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="CategoryImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.catalog.domain.CategoryImpl")
}
)
@Column(name = "CATEGORY_ID")
@AdminPresentation(friendlyName = "CategoryImpl_Category_ID", visibility = VisibilityEnum.HIDDEN_ALL)
protected Long id;
@Column(name = "NAME", nullable=false)
@Index(name="CATEGORY_NAME_INDEX", columnNames={"NAME"})
@AdminPresentation(friendlyName = "CategoryImpl_Category_Name", order = 1000,
group = Presentation.Group.Name.General, groupOrder = Presentation.Group.Order.General,
prominent = true, gridOrder = 1, columnWidth = "300px",
translatable = true)
protected String name;
@Column(name = "URL")
@AdminPresentation(friendlyName = "CategoryImpl_Category_Url", order = 2000,
group = Presentation.Group.Name.General, groupOrder = Presentation.Group.Order.General,
prominent = true, gridOrder = 2, columnWidth = "300px",
validationConfigurations = { @ValidationConfiguration(validationImplementation = "blUriPropertyValidator") })
@Index(name="CATEGORY_URL_INDEX", columnNames={"URL"})
protected String url;
@Column(name = "URL_KEY")
@Index(name="CATEGORY_URLKEY_INDEX", columnNames={"URL_KEY"})
@AdminPresentation(friendlyName = "CategoryImpl_Category_Url_Key",
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
group = Presentation.Group.Name.Advanced, groupOrder = Presentation.Group.Order.Advanced,
excluded = true)
protected String urlKey;
@Column(name = "DESCRIPTION")
@AdminPresentation(friendlyName = "CategoryImpl_Category_Description",
group = Presentation.Group.Name.General, groupOrder = Presentation.Group.Order.General,
largeEntry = true,
excluded = true,
translatable = true)
protected String description;
@Column(name = "TAX_CODE")
protected String taxCode;
@Column(name = "ACTIVE_START_DATE")
@AdminPresentation(friendlyName = "CategoryImpl_Category_Active_Start_Date", order = 1000,
group = Presentation.Group.Name.ActiveDateRange, groupOrder = Presentation.Group.Order.ActiveDateRange)
protected Date activeStartDate;
@Column(name = "ACTIVE_END_DATE")
@AdminPresentation(friendlyName = "CategoryImpl_Category_Active_End_Date", order = 2000,
group = Presentation.Group.Name.ActiveDateRange, groupOrder = Presentation.Group.Order.ActiveDateRange)
protected Date activeEndDate;
@Column(name = "DISPLAY_TEMPLATE")
@AdminPresentation(friendlyName = "CategoryImpl_Category_Display_Template", order = 1000,
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
group = Presentation.Group.Name.Advanced, groupOrder = Presentation.Group.Order.Advanced)
protected String displayTemplate;
@Lob
@Type(type = "org.hibernate.type.StringClobType")
@Column(name = "LONG_DESCRIPTION", length = Integer.MAX_VALUE - 1)
@AdminPresentation(friendlyName = "CategoryImpl_Category_Long_Description", order = 3000,
group = Presentation.Group.Name.General, groupOrder = Presentation.Group.Order.General,
largeEntry = true,
fieldType = SupportedFieldType.HTML_BASIC,
translatable = true)
protected String longDescription;
@ManyToOne(targetEntity = CategoryImpl.class)
@JoinColumn(name = "DEFAULT_PARENT_CATEGORY_ID")
@Index(name="CATEGORY_PARENT_INDEX", columnNames={"DEFAULT_PARENT_CATEGORY_ID"})
@AdminPresentation(friendlyName = "CategoryImpl_defaultParentCategory", order = 4000,
group = Presentation.Group.Name.General, groupOrder = Presentation.Group.Order.General)
@AdminPresentationToOneLookup()
protected Category defaultParentCategory;
@OneToMany(targetEntity = CategoryXrefImpl.class, mappedBy = "categoryXrefPK.category")
@Cascade(value={org.hibernate.annotations.CascadeType.MERGE, org.hibernate.annotations.CascadeType.PERSIST})
@OrderBy(value="displayOrder")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
@AdminPresentationAdornedTargetCollection(
targetObjectProperty = "categoryXrefPK.subCategory",
parentObjectProperty = "categoryXrefPK.category",
friendlyName = "allChildCategoriesTitle",
sortProperty = "displayOrder",
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
gridVisibleFields = { "name" })
protected List<CategoryXref> allChildCategoryXrefs = new ArrayList<CategoryXref>(10);
@OneToMany(targetEntity = CategoryXrefImpl.class, mappedBy = "categoryXrefPK.subCategory")
@Cascade(value={org.hibernate.annotations.CascadeType.MERGE, org.hibernate.annotations.CascadeType.PERSIST})
@OrderBy(value="displayOrder")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
@AdminPresentationAdornedTargetCollection(
targetObjectProperty = "categoryXrefPK.category",
parentObjectProperty = "categoryXrefPK.subCategory",
friendlyName = "allParentCategoriesTitle",
sortProperty = "displayOrder",
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
gridVisibleFields = { "name" })
protected List<CategoryXref> allParentCategoryXrefs = new ArrayList<CategoryXref>(10);
@OneToMany(targetEntity = CategoryProductXrefImpl.class, mappedBy = "categoryProductXref.category")
@Cascade(value={org.hibernate.annotations.CascadeType.MERGE, org.hibernate.annotations.CascadeType.PERSIST})
@OrderBy(value="displayOrder")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
@AdminPresentationAdornedTargetCollection(
targetObjectProperty = "categoryProductXref.product",
parentObjectProperty = "categoryProductXref.category",
friendlyName = "allProductsTitle",
sortProperty = "displayOrder",
tab = Presentation.Tab.Name.Products, tabOrder = Presentation.Tab.Order.Products,
gridVisibleFields = { "defaultSku.name" })
protected List<CategoryProductXref> allProductXrefs = new ArrayList<CategoryProductXref>(10);
@ElementCollection
@MapKeyColumn(name="NAME")
@Column(name="URL")
@CollectionTable(name="BLC_CATEGORY_IMAGE", joinColumns=@JoinColumn(name="CATEGORY_ID"))
@BatchSize(size = 50)
@Deprecated
protected Map<String, String> categoryImages = new HashMap<String, String>(10);
@ManyToMany(targetEntity = MediaImpl.class)
@JoinTable(name = "BLC_CATEGORY_MEDIA_MAP", inverseJoinColumns = @JoinColumn(name = "MEDIA_ID", referencedColumnName = "MEDIA_ID"))
@MapKeyColumn(name = "MAP_KEY")
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
@AdminPresentationMap(
friendlyName = "SkuImpl_Sku_Media",
tab = Presentation.Tab.Name.Media, tabOrder = Presentation.Tab.Order.Media,
keyPropertyFriendlyName = "SkuImpl_Sku_Media_Key",
deleteEntityUponRemove = true,
mediaField = "url",
keys = {
@AdminPresentationMapKey(keyName = "primary", friendlyKeyName = "mediaPrimary"),
@AdminPresentationMapKey(keyName = "alt1", friendlyKeyName = "mediaAlternate1"),
@AdminPresentationMapKey(keyName = "alt2", friendlyKeyName = "mediaAlternate2"),
@AdminPresentationMapKey(keyName = "alt3", friendlyKeyName = "mediaAlternate3"),
@AdminPresentationMapKey(keyName = "alt4", friendlyKeyName = "mediaAlternate4"),
@AdminPresentationMapKey(keyName = "alt5", friendlyKeyName = "mediaAlternate5"),
@AdminPresentationMapKey(keyName = "alt6", friendlyKeyName = "mediaAlternate6")
}
)
protected Map<String, Media> categoryMedia = new HashMap<String , Media>(10);
@OneToMany(mappedBy = "category", targetEntity = FeaturedProductImpl.class, cascade = {CascadeType.ALL})
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@OrderBy(value="sequence")
@BatchSize(size = 50)
@AdminPresentationAdornedTargetCollection(friendlyName = "featuredProductsTitle", order = 1000,
tab = Presentation.Tab.Name.Marketing, tabOrder = Presentation.Tab.Order.Marketing,
targetObjectProperty = "product",
sortProperty = "sequence",
maintainedAdornedTargetFields = { "promotionMessage" },
gridVisibleFields = { "defaultSku.name", "promotionMessage" })
protected List<FeaturedProduct> featuredProducts = new ArrayList<FeaturedProduct>(10);
@OneToMany(mappedBy = "category", targetEntity = CrossSaleProductImpl.class, cascade = {CascadeType.ALL})
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@OrderBy(value="sequence")
@AdminPresentationAdornedTargetCollection(friendlyName = "crossSaleProductsTitle", order = 2000,
tab = Presentation.Tab.Name.Marketing, tabOrder = Presentation.Tab.Order.Marketing,
targetObjectProperty = "relatedSaleProduct",
sortProperty = "sequence",
maintainedAdornedTargetFields = { "promotionMessage" },
gridVisibleFields = { "defaultSku.name", "promotionMessage" })
protected List<RelatedProduct> crossSaleProducts = new ArrayList<RelatedProduct>();
@OneToMany(mappedBy = "category", targetEntity = UpSaleProductImpl.class, cascade = {CascadeType.ALL})
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@OrderBy(value="sequence")
@AdminPresentationAdornedTargetCollection(friendlyName = "upsaleProductsTitle", order = 3000,
tab = Presentation.Tab.Name.Marketing, tabOrder = Presentation.Tab.Order.Marketing,
targetObjectProperty = "relatedSaleProduct",
sortProperty = "sequence",
maintainedAdornedTargetFields = { "promotionMessage" },
gridVisibleFields = { "defaultSku.name", "promotionMessage" })
protected List<RelatedProduct> upSaleProducts = new ArrayList<RelatedProduct>();
@OneToMany(mappedBy = "category", targetEntity = CategorySearchFacetImpl.class, cascade = {CascadeType.ALL})
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@OrderBy(value="sequence")
@AdminPresentationAdornedTargetCollection(friendlyName = "categoryFacetsTitle", order = 1000,
tab = Presentation.Tab.Name.SearchFacets, tabOrder = Presentation.Tab.Order.SearchFacets,
targetObjectProperty = "searchFacet",
sortProperty = "sequence",
gridVisibleFields = { "field", "label", "searchDisplayPriority" })
protected List<CategorySearchFacet> searchFacets = new ArrayList<CategorySearchFacet>();
@ManyToMany(targetEntity = SearchFacetImpl.class)
@JoinTable(name = "BLC_CAT_SEARCH_FACET_EXCL_XREF", joinColumns = @JoinColumn(name = "CATEGORY_ID"),
inverseJoinColumns = @JoinColumn(name = "SEARCH_FACET_ID", nullable = true))
@Cascade(value={org.hibernate.annotations.CascadeType.MERGE, org.hibernate.annotations.CascadeType.PERSIST})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
@AdminPresentationAdornedTargetCollection(
order = 2000,
joinEntityClass = "org.broadleafcommerce.core.search.domain.CategoryExcludedSearchFacetImpl",
targetObjectProperty = "searchFacet",
parentObjectProperty = "category",
friendlyName = "excludedFacetsTitle",
tab = Presentation.Tab.Name.SearchFacets, tabOrder = Presentation.Tab.Order.SearchFacets,
gridVisibleFields = {"field", "label", "searchDisplayPriority"})
protected List<SearchFacet> excludedSearchFacets = new ArrayList<SearchFacet>(10);
@OneToMany(mappedBy = "category", targetEntity = CategoryAttributeImpl.class, cascade = {CascadeType.ALL}, orphanRemoval = true)
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@MapKey(name="name")
@BatchSize(size = 50)
@AdminPresentationMap(friendlyName = "categoryAttributesTitle",
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
deleteEntityUponRemove = true, forceFreeFormKeys = true, keyPropertyFriendlyName = "ProductAttributeImpl_Attribute_Name"
)
protected Map<String, CategoryAttribute> categoryAttributes = new HashMap<String, CategoryAttribute>();
@Column(name = "INVENTORY_TYPE")
@AdminPresentation(friendlyName = "CategoryImpl_Category_InventoryType", order = 2000,
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
group = Presentation.Group.Name.Advanced, groupOrder = Presentation.Group.Order.Advanced,
fieldType = SupportedFieldType.BROADLEAF_ENUMERATION,
broadleafEnumeration = "org.broadleafcommerce.core.inventory.service.type.InventoryType")
protected String inventoryType;
@Column(name = "FULFILLMENT_TYPE")
@AdminPresentation(friendlyName = "CategoryImpl_Category_FulfillmentType", order = 3000,
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
group = Presentation.Group.Name.Advanced, groupOrder = Presentation.Group.Order.Advanced,
fieldType = SupportedFieldType.BROADLEAF_ENUMERATION,
broadleafEnumeration = "org.broadleafcommerce.core.order.service.type.FulfillmentType")
protected String fulfillmentType;
@Embedded
protected ArchiveStatus archiveStatus = new ArchiveStatus();
@Transient
@Hydrated(factoryMethod = "createChildCategoryURLMap")
@Deprecated
protected Map<String, List<Long>> childCategoryURLMap;
@Transient
@Hydrated(factoryMethod = "createChildCategoryIds")
protected List<Long> childCategoryIds;
@Transient
protected List<CategoryXref> childCategoryXrefs = new ArrayList<CategoryXref>(50);
@Transient
protected List<Category> legacyChildCategories = new ArrayList<Category>(50);
@Transient
protected List<Category> allLegacyChildCategories = new ArrayList<Category>(50);
@Transient
protected List<FeaturedProduct> filteredFeaturedProducts = null;
@Transient
protected List<RelatedProduct> filteredCrossSales = null;
@Transient
protected List<RelatedProduct> filteredUpSales = null;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return DynamicTranslationProvider.getValue(this, "name", name);
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public String getUrl() {
// TODO: if null return
// if blank return
// if startswith "/" return
// if contains a ":" and no "?" or (contains a ":" before a "?") return
// else "add a /" at the beginning
if(url == null || url.equals("") || url.startsWith("/")) {
return url;
} else if ((url.contains(":") && !url.contains("?")) || url.indexOf('?', url.indexOf(':')) != -1) {
return url;
} else {
return "/" + url;
}
}
@Override
public void setUrl(String url) {
this.url = url;
}
@Override
public String getUrlKey() {
if ((urlKey == null || "".equals(urlKey.trim())) && name != null) {
return UrlUtil.generateUrlKey(name);
}
return urlKey;
}
@Override
public String getGeneratedUrl() {
return buildLink(this, false);
}
@Override
public void setUrlKey(String urlKey) {
this.urlKey = urlKey;
}
@Override
public String getDescription() {
return DynamicTranslationProvider.getValue(this, "description", description);
}
@Override
public void setDescription(String description) {
this.description = description;
}
@Override
public Date getActiveStartDate() {
if ('Y'==getArchived()) {
return null;
}
return activeStartDate;
}
@Override
public void setActiveStartDate(Date activeStartDate) {
this.activeStartDate = (activeStartDate == null) ? null : new Date(activeStartDate.getTime());
}
@Override
public Date getActiveEndDate() {
return activeEndDate;
}
@Override
public void setActiveEndDate(Date activeEndDate) {
this.activeEndDate = (activeEndDate == null) ? null : new Date(activeEndDate.getTime());
}
@Override
public boolean isActive() {
if (LOG.isDebugEnabled()) {
if (!DateUtil.isActive(activeStartDate, activeEndDate, true)) {
LOG.debug("category, " + id + ", inactive due to date");
}
if ('Y'==getArchived()) {
LOG.debug("category, " + id + ", inactive due to archived status");
}
}
return DateUtil.isActive(activeStartDate, activeEndDate, true) && 'Y'!=getArchived();
}
@Override
public String getDisplayTemplate() {
return displayTemplate;
}
@Override
public void setDisplayTemplate(String displayTemplate) {
this.displayTemplate = displayTemplate;
}
@Override
public String getLongDescription() {
return DynamicTranslationProvider.getValue(this, "longDescription", longDescription);
}
@Override
public void setLongDescription(String longDescription) {
this.longDescription = longDescription;
}
@Override
public Category getDefaultParentCategory() {
return defaultParentCategory;
}
@Override
public void setDefaultParentCategory(Category defaultParentCategory) {
this.defaultParentCategory = defaultParentCategory;
}
@Override
public List<CategoryXref> getAllChildCategoryXrefs(){
return allChildCategoryXrefs;
}
@Override
public List<CategoryXref> getChildCategoryXrefs() {
if (childCategoryXrefs.isEmpty()) {
for (CategoryXref category : allChildCategoryXrefs) {
if (category.getSubCategory().isActive()) {
childCategoryXrefs.add(category);
}
}
}
return Collections.unmodifiableList(childCategoryXrefs);
}
@Override
public void setChildCategoryXrefs(List<CategoryXref> childCategories) {
this.childCategoryXrefs.clear();
for(CategoryXref category : childCategories){
this.childCategoryXrefs.add(category);
}
}
@Override
public void setAllChildCategoryXrefs(List<CategoryXref> childCategories){
allChildCategoryXrefs.clear();
for(CategoryXref category : childCategories){
allChildCategoryXrefs.add(category);
}
}
@Override
@Deprecated
public List<Category> getAllChildCategories(){
if (allLegacyChildCategories.isEmpty()) {
for (CategoryXref category : allChildCategoryXrefs) {
allLegacyChildCategories.add(category.getSubCategory());
}
}
return Collections.unmodifiableList(allLegacyChildCategories);
}
@Override
public boolean hasAllChildCategories(){
return !allChildCategoryXrefs.isEmpty();
}
@Override
@Deprecated
public void setAllChildCategories(List<Category> childCategories){
throw new UnsupportedOperationException("Not Supported - Use setAllChildCategoryXrefs()");
}
@Override
@Deprecated
public List<Category> getChildCategories() {
if (legacyChildCategories.isEmpty()) {
for (CategoryXref category : allChildCategoryXrefs) {
if (category.getSubCategory().isActive()) {
legacyChildCategories.add(category.getSubCategory());
}
}
}
return Collections.unmodifiableList(legacyChildCategories);
}
@Override
public boolean hasChildCategories() {
return !getChildCategoryXrefs().isEmpty();
}
@Override
@Deprecated
public void setChildCategories(List<Category> childCategories) {
throw new UnsupportedOperationException("Not Supported - Use setChildCategoryXrefs()");
}
@Override
public List<Long> getChildCategoryIds() {
if (childCategoryIds == null) {
HydratedSetup.populateFromCache(this, "childCategoryIds");
}
return childCategoryIds;
}
@Override
public void setChildCategoryIds(List<Long> childCategoryIds) {
this.childCategoryIds = childCategoryIds;
}
public List<Long> createChildCategoryIds() {
childCategoryIds = new ArrayList<Long>();
for (CategoryXref category : allChildCategoryXrefs) {
if (category.getSubCategory().isActive()) {
childCategoryIds.add(category.getSubCategory().getId());
}
}
return childCategoryIds;
}
@Override
@Deprecated
public Map<String, String> getCategoryImages() {
return categoryImages;
}
@Override
@Deprecated
public String getCategoryImage(String imageKey) {
return categoryImages.get(imageKey);
}
@Override
@Deprecated
public void setCategoryImages(Map<String, String> categoryImages) {
this.categoryImages.clear();
for(Map.Entry<String, String> me : categoryImages.entrySet()) {
this.categoryImages.put(me.getKey(), me.getValue());
}
}
@Override
@Deprecated
public Map<String, List<Long>> getChildCategoryURLMap() {
if (childCategoryURLMap == null) {
HydratedSetup.populateFromCache(this, "childCategoryURLMap");
}
return childCategoryURLMap;
}
public Map<String, List<Long>> createChildCategoryURLMap() {
try {
Map<String, List<Long>> newMap = new HashMap<String, List<Long>>(50);
fillInURLMapForCategory(newMap, this, "", new ArrayList<Long>(10));
return newMap;
} catch (CacheFactoryException e) {
throw new RuntimeException(e);
}
}
@Override
@Deprecated
public void setChildCategoryURLMap(Map<String, List<Long>> childCategoryURLMap) {
this.childCategoryURLMap = childCategoryURLMap;
}
@Override
public List<Category> buildFullCategoryHierarchy(List<Category> currentHierarchy) {
if (currentHierarchy == null) {
currentHierarchy = new ArrayList<Category>();
currentHierarchy.add(this);
}
List<Category> myParentCategories = new ArrayList<Category>();
if (defaultParentCategory != null) {
myParentCategories.add(defaultParentCategory);
}
if (allParentCategoryXrefs != null && allParentCategoryXrefs.size() > 0) {
for (CategoryXref parent : allParentCategoryXrefs) {
myParentCategories.add(parent.getCategory());
}
}
for (Category category : myParentCategories) {
if (!currentHierarchy.contains(category)) {
currentHierarchy.add(category);
category.buildFullCategoryHierarchy(currentHierarchy);
}
}
return currentHierarchy;
}
@Override
public List<Category> buildCategoryHierarchy(List<Category> currentHierarchy) {
if (currentHierarchy == null) {
currentHierarchy = new ArrayList<Category>();
currentHierarchy.add(this);
}
if (defaultParentCategory != null && ! currentHierarchy.contains(defaultParentCategory)) {
currentHierarchy.add(defaultParentCategory);
defaultParentCategory.buildCategoryHierarchy(currentHierarchy);
}
return currentHierarchy;
}
@Override
public List<CategoryXref> getAllParentCategoryXrefs() {
return allParentCategoryXrefs;
}
@Override
public void setAllParentCategoryXrefs(List<CategoryXref> allParentCategories) {
this.allParentCategoryXrefs.clear();
allParentCategoryXrefs.addAll(allParentCategories);
}
@Override
@Deprecated
public List<Category> getAllParentCategories() {
List<Category> parents = new ArrayList<Category>(allParentCategoryXrefs.size());
for (CategoryXref xref : allParentCategoryXrefs) {
parents.add(xref.getCategory());
}
return Collections.unmodifiableList(parents);
}
@Override
@Deprecated
public void setAllParentCategories(List<Category> allParentCategories) {
throw new UnsupportedOperationException("Not Supported - Use setAllParentCategoryXrefs()");
}
@Override
public List<FeaturedProduct> getFeaturedProducts() {
if (filteredFeaturedProducts == null && featuredProducts != null) {
filteredFeaturedProducts = new ArrayList<FeaturedProduct>(featuredProducts.size());
filteredFeaturedProducts.addAll(featuredProducts);
CollectionUtils.filter(filteredFeaturedProducts, new Predicate() {
@Override
public boolean evaluate(Object arg) {
return 'Y' != ((Status) ((FeaturedProduct) arg).getProduct()).getArchived();
}
});
}
return filteredFeaturedProducts;
}
@Override
public void setFeaturedProducts(List<FeaturedProduct> featuredProducts) {
this.featuredProducts.clear();
for(FeaturedProduct featuredProduct : featuredProducts){
this.featuredProducts.add(featuredProduct);
}
}
@Override
public List<RelatedProduct> getCrossSaleProducts() {
if (filteredCrossSales == null && crossSaleProducts != null) {
filteredCrossSales = new ArrayList<RelatedProduct>(crossSaleProducts.size());
filteredCrossSales.addAll(crossSaleProducts);
CollectionUtils.filter(filteredCrossSales, new Predicate() {
@Override
public boolean evaluate(Object arg) {
return 'Y'!=((Status)((CrossSaleProductImpl) arg).getRelatedProduct()).getArchived();
}
});
}
return filteredCrossSales;
}
@Override
public void setCrossSaleProducts(List<RelatedProduct> crossSaleProducts) {
this.crossSaleProducts.clear();
for(RelatedProduct relatedProduct : crossSaleProducts){
this.crossSaleProducts.add(relatedProduct);
}
}
@Override
public List<RelatedProduct> getUpSaleProducts() {
if (filteredUpSales == null && upSaleProducts != null) {
filteredUpSales = new ArrayList<RelatedProduct>(upSaleProducts.size());
filteredUpSales.addAll(upSaleProducts);
CollectionUtils.filter(filteredUpSales, new Predicate() {
@Override
public boolean evaluate(Object arg) {
return 'Y'!=((Status)((UpSaleProductImpl) arg).getRelatedProduct()).getArchived();
}
});
}
return filteredUpSales;
}
@Override
public List<RelatedProduct> getCumulativeCrossSaleProducts() {
Set<RelatedProduct> returnProductsSet = new LinkedHashSet<RelatedProduct>();
List<Category> categoryHierarchy = buildCategoryHierarchy(null);
for (Category category : categoryHierarchy) {
returnProductsSet.addAll(category.getCrossSaleProducts());
}
return new ArrayList<RelatedProduct>(returnProductsSet);
}
@Override
public List<RelatedProduct> getCumulativeUpSaleProducts() {
Set<RelatedProduct> returnProductsSet = new LinkedHashSet<RelatedProduct>();
List<Category> categoryHierarchy = buildCategoryHierarchy(null);
for (Category category : categoryHierarchy) {
returnProductsSet.addAll(category.getUpSaleProducts());
}
return new ArrayList<RelatedProduct>(returnProductsSet);
}
@Override
public List<FeaturedProduct> getCumulativeFeaturedProducts() {
Set<FeaturedProduct> returnProductsSet = new LinkedHashSet<FeaturedProduct>();
List<Category> categoryHierarchy = buildCategoryHierarchy(null);
for (Category category : categoryHierarchy) {
returnProductsSet.addAll(category.getFeaturedProducts());
}
return new ArrayList<FeaturedProduct>(returnProductsSet);
}
@Override
public void setUpSaleProducts(List<RelatedProduct> upSaleProducts) {
this.upSaleProducts.clear();
for(RelatedProduct relatedProduct : upSaleProducts){
this.upSaleProducts.add(relatedProduct);
}
this.upSaleProducts = upSaleProducts;
}
@Override
public List<CategoryProductXref> getActiveProductXrefs() {
List<CategoryProductXref> result = new ArrayList<CategoryProductXref>();
for (CategoryProductXref product : allProductXrefs) {
if (product.getProduct().isActive()) {
result.add(product);
}
}
return Collections.unmodifiableList(result);
}
@Override
public List<CategoryProductXref> getAllProductXrefs() {
return allProductXrefs;
}
@Override
public void setAllProductXrefs(List<CategoryProductXref> allProducts) {
this.allProductXrefs.clear();
allProductXrefs.addAll(allProducts);
}
@Override
@Deprecated
public List<Product> getActiveProducts() {
List<Product> result = new ArrayList<Product>();
for (CategoryProductXref product : allProductXrefs) {
if (product.getProduct().isActive()) {
result.add(product.getProduct());
}
}
return Collections.unmodifiableList(result);
}
@Override
@Deprecated
public List<Product> getAllProducts() {
List<Product> result = new ArrayList<Product>();
for (CategoryProductXref product : allProductXrefs) {
result.add(product.getProduct());
}
return Collections.unmodifiableList(result);
}
@Override
@Deprecated
public void setAllProducts(List<Product> allProducts) {
throw new UnsupportedOperationException("Not Supported - Use setAllProductXrefs()");
}
@Override
public List<CategorySearchFacet> getSearchFacets() {
return searchFacets;
}
@Override
public void setSearchFacets(List<CategorySearchFacet> searchFacets) {
this.searchFacets = searchFacets;
}
@Override
public List<SearchFacet> getExcludedSearchFacets() {
return excludedSearchFacets;
}
@Override
public void setExcludedSearchFacets(List<SearchFacet> excludedSearchFacets) {
this.excludedSearchFacets = excludedSearchFacets;
}
@Override
public InventoryType getInventoryType() {
return InventoryType.getInstance(this.inventoryType);
}
@Override
public void setInventoryType(InventoryType inventoryType) {
this.inventoryType = inventoryType.getType();
}
@Override
public FulfillmentType getFulfillmentType() {
return FulfillmentType.getInstance(this.fulfillmentType);
}
@Override
public void setFulfillmentType(FulfillmentType fulfillmentType) {
this.fulfillmentType = fulfillmentType.getType();
}
@Override
public List<CategorySearchFacet> getCumulativeSearchFacets() {
final List<CategorySearchFacet> returnFacets = new ArrayList<CategorySearchFacet>();
returnFacets.addAll(getSearchFacets());
Collections.sort(returnFacets, facetPositionComparator);
// Add in parent facets unless they are excluded
List<CategorySearchFacet> parentFacets = null;
if (defaultParentCategory != null) {
parentFacets = defaultParentCategory.getCumulativeSearchFacets();
CollectionUtils.filter(parentFacets, new Predicate() {
@Override
public boolean evaluate(Object arg) {
CategorySearchFacet csf = (CategorySearchFacet) arg;
return !getExcludedSearchFacets().contains(csf.getSearchFacet()) && !returnFacets.contains(csf.getSearchFacet());
}
});
}
if (parentFacets != null) {
returnFacets.addAll(parentFacets);
}
return returnFacets;
}
@Override
public Map<String, Media> getCategoryMedia() {
return categoryMedia;
}
@Override
public void setCategoryMedia(Map<String, Media> categoryMedia) {
this.categoryMedia.clear();
for(Map.Entry<String, Media> me : categoryMedia.entrySet()) {
this.categoryMedia.put(me.getKey(), me.getValue());
}
}
@Override
public Map<String, CategoryAttribute> getCategoryAttributesMap() {
return categoryAttributes;
}
@Override
public void setCategoryAttributesMap(Map<String, CategoryAttribute> categoryAttributes) {
this.categoryAttributes = categoryAttributes;
}
@Override
public List<CategoryAttribute> getCategoryAttributes() {
List<CategoryAttribute> ca = new ArrayList<CategoryAttribute>(categoryAttributes.values());
return Collections.unmodifiableList(ca);
}
@Override
public void setCategoryAttributes(List<CategoryAttribute> categoryAttributes) {
this.categoryAttributes = new HashMap<String, CategoryAttribute>();
for (CategoryAttribute categoryAttribute : categoryAttributes) {
this.categoryAttributes.put(categoryAttribute.getName(), categoryAttribute);
}
}
@Override
public CategoryAttribute getCategoryAttributeByName(String name) {
for (CategoryAttribute attribute : getCategoryAttributes()) {
if (attribute.getName().equals(name)) {
return attribute;
}
}
return null;
}
@Override
public Map<String, CategoryAttribute> getMappedCategoryAttributes() {
Map<String, CategoryAttribute> map = new HashMap<String, CategoryAttribute>();
for (CategoryAttribute attr : getCategoryAttributes()) {
map.put(attr.getName(), attr);
}
return map;
}
@Override
public Character getArchived() {
if (archiveStatus == null) {
archiveStatus = new ArchiveStatus();
}
return archiveStatus.getArchived();
}
@Override
public void setArchived(Character archived) {
if (archiveStatus == null) {
archiveStatus = new ArchiveStatus();
}
archiveStatus.setArchived(archived);
}
@Override
public int hashCode() {
int prime = 31;
int result = 1;
result = prime * result + (name == null ? 0 : name.hashCode());
result = prime * result + (url == null ? 0 : url.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
CategoryImpl other = (CategoryImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
if (url == null) {
if (other.url != null) {
return false;
}
} else if (!url.equals(other.url)) {
return false;
}
return true;
}
protected static Comparator<CategorySearchFacet> facetPositionComparator = new Comparator<CategorySearchFacet>() {
@Override
public int compare(CategorySearchFacet o1, CategorySearchFacet o2) {
return o1.getSequence().compareTo(o2.getSequence());
}
};
public static class Presentation {
public static class Tab {
public static class Name {
public static final String Marketing = "CategoryImpl_Marketing_Tab";
public static final String Media = "CategoryImpl_Media_Tab";
public static final String Advanced = "CategoryImpl_Advanced_Tab";
public static final String Products = "CategoryImpl_Products_Tab";
public static final String SearchFacets = "CategoryImpl_categoryFacetsTab";
}
public static class Order {
public static final int Marketing = 2000;
public static final int Media = 3000;
public static final int Advanced = 4000;
public static final int Products = 5000;
public static final int SearchFacets = 3500;
}
}
public static class Group {
public static class Name {
public static final String General = "CategoryImpl_Category_Description";
public static final String ActiveDateRange = "CategoryImpl_Active_Date_Range";
public static final String Advanced = "CategoryImpl_Advanced";
}
public static class Order {
public static final int General = 1000;
public static final int ActiveDateRange = 2000;
public static final int Advanced = 1000;
}
}
}
@Override
public String getMainEntityName() {
return getName();
}
@Override
public String getTaxCode() {
return this.taxCode;
}
@Override
public void setTaxCode(String taxCode) {
this.taxCode = taxCode;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_CategoryImpl.java
|
30 |
final class NestedCompletionProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final String op;
private final int loc;
private final int index;
private final boolean basic;
private final Declaration dec;
NestedCompletionProposal(Declaration dec, int loc,
int index, boolean basic, String op) {
this.op = op;
this.loc = loc;
this.index = index;
this.basic = basic;
this.dec = dec;
}
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument document) {
//the following awfulness is necessary because the
//insertion point may have changed (and even its
//text may have changed, since the proposal was
//instantiated).
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
if (offset>0 && document.getChar(offset)==' ') {
offset++;
}
int nextOffset = findCharCount(index+1, document,
loc+startOfArgs, endOfLine,
",;", "", true);
int middleOffset = findCharCount(1, document,
offset, nextOffset,
"=", "", true)+1;
if (middleOffset>0 &&
document.getChar(middleOffset)=='>') {
middleOffset++;
}
while (middleOffset>0 &&
document.getChar(middleOffset)==' ') {
middleOffset++;
}
if (middleOffset>offset &&
middleOffset<nextOffset) {
offset = middleOffset;
}
String str = getText(false);
if (nextOffset==-1) {
nextOffset = offset;
}
if (document.getChar(nextOffset)=='}') {
str += " ";
}
document.replace(offset, nextOffset-offset, str);
}
catch (BadLocationException e) {
e.printStackTrace();
}
//adding imports drops us out of linked mode :(
/*try {
DocumentChange tc = new DocumentChange("imports", document);
tc.setEdit(new MultiTextEdit());
HashSet<Declaration> decs = new HashSet<Declaration>();
CompilationUnit cu = cpc.getRootNode();
importDeclaration(decs, d, cu);
if (d instanceof Functional) {
List<ParameterList> pls = ((Functional) d).getParameterLists();
if (!pls.isEmpty()) {
for (Parameter p: pls.get(0).getParameters()) {
MethodOrValue pm = p.getModel();
if (pm instanceof Method) {
for (ParameterList ppl: ((Method) pm).getParameterLists()) {
for (Parameter pp: ppl.getParameters()) {
importSignatureTypes(pp.getModel(), cu, decs);
}
}
}
}
}
}
applyImports(tc, decs, cu, document);
tc.perform(new NullProgressMonitor());
}
catch (Exception e) {
e.printStackTrace();
}*/
}
private String getText(boolean description) {
StringBuilder sb = new StringBuilder()
.append(op).append(dec.getName(getUnit()));
if (dec instanceof Functional && !basic) {
appendPositionalArgs(dec, getUnit(), sb,
false, description);
}
return sb.toString();
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getDisplayString() {
return getText(true);
}
@Override
public Image getImage() {
return getImageForDeclaration(dec);
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int currentOffset,
DocumentEvent event) {
if (event==null) {
return true;
}
else {
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
String content = document.get(offset, currentOffset - offset);
int eq = content.indexOf("=");
if (eq>0) {
content = content.substring(eq+1);
}
String filter = content.trim().toLowerCase();
String decName = dec.getName(getUnit());
if ((op+decName).toLowerCase().startsWith(filter) ||
decName.toLowerCase().startsWith(filter)) {
return true;
}
}
catch (BadLocationException e) {
// ignore concurrently modified document
}
return false;
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java
|
1,466 |
public class OSQLFunctionBothE extends OSQLFunctionMove {
public static final String NAME = "bothE";
public OSQLFunctionBothE() {
super(NAME, 0, 1);
}
@Override
protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) {
return v2e(graph, iRecord, Direction.BOTH, iLabels);
}
}
| 1no label
|
graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionBothE.java
|
189 |
public class ClientConfig {
/**
* To pass properties
*/
private Properties properties = new Properties();
/**
* The Group Configuration properties like:
* Name and Password that is used to connect to the cluster.
*/
private GroupConfig groupConfig = new GroupConfig();
/**
* The Security Configuration for custom Credentials:
* Name and Password that is used to connect to the cluster.
*/
private ClientSecurityConfig securityConfig = new ClientSecurityConfig();
/**
* The Network Configuration properties like:
* addresses to connect, smart-routing, socket-options...
*/
private ClientNetworkConfig networkConfig = new ClientNetworkConfig();
/**
* Used to distribute the operations to multiple Endpoints.
*/
private LoadBalancer loadBalancer;
/**
* List of listeners that Hazelcast will automatically add as a part of initialization process.
* Currently only supports {@link com.hazelcast.core.LifecycleListener}.
*/
private List<ListenerConfig> listenerConfigs = new LinkedList<ListenerConfig>();
/**
* pool-size for internal ExecutorService which handles responses etc.
*/
private int executorPoolSize = -1;
private SerializationConfig serializationConfig = new SerializationConfig();
private List<ProxyFactoryConfig> proxyFactoryConfigs = new LinkedList<ProxyFactoryConfig>();
private ManagedContext managedContext;
private ClassLoader classLoader;
public String getProperty(String name) {
String value = properties.getProperty(name);
return value != null ? value : System.getProperty(name);
}
public ClientConfig setProperty(String name, String value) {
properties.put(name, value);
return this;
}
public Properties getProperties() {
return properties;
}
private Map<String, NearCacheConfig> nearCacheConfigMap = new HashMap<String, NearCacheConfig>();
public ClientConfig setProperties(final Properties properties) {
this.properties = properties;
return this;
}
public ClientSecurityConfig getSecurityConfig() {
return securityConfig;
}
public void setSecurityConfig(ClientSecurityConfig securityConfig) {
this.securityConfig = securityConfig;
}
public ClientNetworkConfig getNetworkConfig() {
return networkConfig;
}
public void setNetworkConfig(ClientNetworkConfig networkConfig) {
this.networkConfig = networkConfig;
}
/**
* please use {@link ClientConfig#addNearCacheConfig(NearCacheConfig)}
*
* @param mapName
* @param nearCacheConfig
* @return
*/
@Deprecated
public ClientConfig addNearCacheConfig(String mapName, NearCacheConfig nearCacheConfig) {
nearCacheConfig.setName(mapName);
return addNearCacheConfig(nearCacheConfig);
}
public ClientConfig addNearCacheConfig(NearCacheConfig nearCacheConfig) {
nearCacheConfigMap.put(nearCacheConfig.getName(), nearCacheConfig);
return this;
}
public ClientConfig addListenerConfig(ListenerConfig listenerConfig) {
getListenerConfigs().add(listenerConfig);
return this;
}
public ClientConfig addProxyFactoryConfig(ProxyFactoryConfig proxyFactoryConfig) {
this.proxyFactoryConfigs.add(proxyFactoryConfig);
return this;
}
public NearCacheConfig getNearCacheConfig(String mapName) {
return lookupByPattern(nearCacheConfigMap, mapName);
}
public Map<String, NearCacheConfig> getNearCacheConfigMap() {
return nearCacheConfigMap;
}
public ClientConfig setNearCacheConfigMap(Map<String, NearCacheConfig> nearCacheConfigMap) {
this.nearCacheConfigMap = nearCacheConfigMap;
return this;
}
/**
* Use {@link ClientNetworkConfig#isSmartRouting} instead
*/
@Deprecated
public boolean isSmartRouting() {
return networkConfig.isSmartRouting();
}
/**
* Use {@link ClientNetworkConfig#setSmartRouting} instead
*/
@Deprecated
public ClientConfig setSmartRouting(boolean smartRouting) {
networkConfig.setSmartRouting(smartRouting);
return this;
}
/**
* Use {@link ClientNetworkConfig#getSocketInterceptorConfig} instead
*/
@Deprecated
public SocketInterceptorConfig getSocketInterceptorConfig() {
return networkConfig.getSocketInterceptorConfig();
}
/**
* Use {@link ClientNetworkConfig#setSocketInterceptorConfig} instead
*/
@Deprecated
public ClientConfig setSocketInterceptorConfig(SocketInterceptorConfig socketInterceptorConfig) {
networkConfig.setSocketInterceptorConfig(socketInterceptorConfig);
return this;
}
/**
* Use {@link ClientNetworkConfig#getConnectionAttemptPeriod} instead
*/
@Deprecated
public int getConnectionAttemptPeriod() {
return networkConfig.getConnectionAttemptPeriod();
}
/**
* Use {@link ClientNetworkConfig#setConnectionAttemptPeriod} instead
*/
@Deprecated
public ClientConfig setConnectionAttemptPeriod(int connectionAttemptPeriod) {
networkConfig.setConnectionAttemptPeriod(connectionAttemptPeriod);
return this;
}
/**
* Use {@link ClientNetworkConfig#getConnectionAttemptLimit} instead
*/
@Deprecated
public int getConnectionAttemptLimit() {
return networkConfig.getConnectionAttemptLimit();
}
/**
* Use {@link ClientNetworkConfig#setConnectionAttemptLimit} instead
*/
@Deprecated
public ClientConfig setConnectionAttemptLimit(int connectionAttemptLimit) {
networkConfig.setConnectionAttemptLimit(connectionAttemptLimit);
return this;
}
/**
* Use {@link ClientNetworkConfig#getConnectionTimeout} instead
*/
@Deprecated
public int getConnectionTimeout() {
return networkConfig.getConnectionTimeout();
}
/**
* Use {@link ClientNetworkConfig#setConnectionTimeout} instead
*/
@Deprecated
public ClientConfig setConnectionTimeout(int connectionTimeout) {
networkConfig.setConnectionTimeout(connectionTimeout);
return this;
}
public Credentials getCredentials() {
return securityConfig.getCredentials();
}
public ClientConfig setCredentials(Credentials credentials) {
securityConfig.setCredentials(credentials);
return this;
}
/**
* Use {@link ClientNetworkConfig#addAddress} instead
*/
@Deprecated
public ClientConfig addAddress(String... addresses) {
networkConfig.addAddress(addresses);
return this;
}
/**
* Use {@link ClientNetworkConfig#setAddresses} instead
*/
@Deprecated
public ClientConfig setAddresses(List<String> addresses) {
networkConfig.setAddresses(addresses);
return this;
}
/**
* Use {@link ClientNetworkConfig#getAddresses} instead
*/
@Deprecated
public List<String> getAddresses() {
return networkConfig.getAddresses();
}
public GroupConfig getGroupConfig() {
return groupConfig;
}
public ClientConfig setGroupConfig(GroupConfig groupConfig) {
this.groupConfig = groupConfig;
return this;
}
public List<ListenerConfig> getListenerConfigs() {
return listenerConfigs;
}
public ClientConfig setListenerConfigs(List<ListenerConfig> listenerConfigs) {
this.listenerConfigs = listenerConfigs;
return this;
}
public LoadBalancer getLoadBalancer() {
return loadBalancer;
}
public ClientConfig setLoadBalancer(LoadBalancer loadBalancer) {
this.loadBalancer = loadBalancer;
return this;
}
/**
* Use {@link ClientNetworkConfig#isRedoOperation} instead
*/
@Deprecated
public boolean isRedoOperation() {
return networkConfig.isRedoOperation();
}
/**
* Use {@link ClientNetworkConfig#setRedoOperation} instead
*/
@Deprecated
public ClientConfig setRedoOperation(boolean redoOperation) {
networkConfig.setRedoOperation(redoOperation);
return this;
}
/**
* Use {@link ClientNetworkConfig#getSocketOptions} instead
*/
@Deprecated
public SocketOptions getSocketOptions() {
return networkConfig.getSocketOptions();
}
/**
* Use {@link ClientNetworkConfig#setSocketOptions} instead
*/
@Deprecated
public ClientConfig setSocketOptions(SocketOptions socketOptions) {
networkConfig.setSocketOptions(socketOptions);
return this;
}
public ClassLoader getClassLoader() {
return classLoader;
}
public ClientConfig setClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
return this;
}
public ManagedContext getManagedContext() {
return managedContext;
}
public ClientConfig setManagedContext(ManagedContext managedContext) {
this.managedContext = managedContext;
return this;
}
public int getExecutorPoolSize() {
return executorPoolSize;
}
public ClientConfig setExecutorPoolSize(int executorPoolSize) {
this.executorPoolSize = executorPoolSize;
return this;
}
public List<ProxyFactoryConfig> getProxyFactoryConfigs() {
return proxyFactoryConfigs;
}
public ClientConfig setProxyFactoryConfigs(List<ProxyFactoryConfig> proxyFactoryConfigs) {
this.proxyFactoryConfigs = proxyFactoryConfigs;
return this;
}
public SerializationConfig getSerializationConfig() {
return serializationConfig;
}
public ClientConfig setSerializationConfig(SerializationConfig serializationConfig) {
this.serializationConfig = serializationConfig;
return this;
}
private static <T> T lookupByPattern(Map<String, T> map, String name) {
T t = map.get(name);
if (t == null) {
int lastMatchingPoint = -1;
for (Map.Entry<String, T> entry : map.entrySet()) {
String pattern = entry.getKey();
T value = entry.getValue();
final int matchingPoint = getMatchingPoint(name, pattern);
if (matchingPoint > lastMatchingPoint) {
lastMatchingPoint = matchingPoint;
t = value;
}
}
}
return t;
}
/**
* higher values means more specific matching
*
* @param name
* @param pattern
* @return -1 if name does not match at all, zero or positive otherwise
*/
private static int getMatchingPoint(final String name, final String pattern) {
final int index = pattern.indexOf('*');
if (index == -1) {
return -1;
}
final String firstPart = pattern.substring(0, index);
final int indexFirstPart = name.indexOf(firstPart, 0);
if (indexFirstPart == -1) {
return -1;
}
final String secondPart = pattern.substring(index + 1);
final int indexSecondPart = name.indexOf(secondPart, index + 1);
if (indexSecondPart == -1) {
return -1;
}
return firstPart.length() + secondPart.length();
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_config_ClientConfig.java
|
330 |
public class PluginsInfo implements Streamable, Serializable, ToXContent {
static final class Fields {
static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
}
private List<PluginInfo> infos;
public PluginsInfo() {
infos = new ArrayList<PluginInfo>();
}
public PluginsInfo(int size) {
infos = new ArrayList<PluginInfo>(size);
}
public List<PluginInfo> getInfos() {
return infos;
}
public void add(PluginInfo info) {
infos.add(info);
}
public static PluginsInfo readPluginsInfo(StreamInput in) throws IOException {
PluginsInfo infos = new PluginsInfo();
infos.readFrom(in);
return infos;
}
@Override
public void readFrom(StreamInput in) throws IOException {
int plugins_size = in.readInt();
for (int i = 0; i < plugins_size; i++) {
infos.add(PluginInfo.readPluginInfo(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(infos.size());
for (PluginInfo plugin : infos) {
plugin.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(Fields.PLUGINS);
for (PluginInfo pluginInfo : infos) {
pluginInfo.toXContent(builder, params);
}
builder.endArray();
return builder;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_PluginsInfo.java
|
94 |
class ReadOnlyTransactionImpl implements Transaction
{
private static final int RS_ENLISTED = 0;
private static final int RS_SUSPENDED = 1;
private static final int RS_DELISTED = 2;
private static final int RS_READONLY = 3; // set in prepare
private final byte globalId[];
private int status = Status.STATUS_ACTIVE;
private boolean active = true;
private final LinkedList<ResourceElement> resourceList =
new LinkedList<>();
private List<Synchronization> syncHooks =
new ArrayList<>();
private final int eventIdentifier;
private final ReadOnlyTxManager txManager;
private final StringLogger logger;
ReadOnlyTransactionImpl( byte[] xidGlobalId, ReadOnlyTxManager txManager, StringLogger logger )
{
this.txManager = txManager;
this.logger = logger;
globalId = xidGlobalId;
eventIdentifier = txManager.getNextEventIdentifier();
}
@Override
public synchronized String toString()
{
StringBuilder txString = new StringBuilder( "Transaction[Status="
+ txManager.getTxStatusAsString( status ) + ",ResourceList=" );
Iterator<ResourceElement> itr = resourceList.iterator();
while ( itr.hasNext() )
{
txString.append( itr.next().toString() );
if ( itr.hasNext() )
{
txString.append( "," );
}
}
return txString.toString();
}
@Override
public synchronized void commit() throws RollbackException,
HeuristicMixedException, IllegalStateException
{
// make sure tx not suspended
txManager.commit();
}
@Override
public synchronized void rollback() throws IllegalStateException,
SystemException
{
// make sure tx not suspended
txManager.rollback();
}
@Override
public synchronized boolean enlistResource( XAResource xaRes )
throws RollbackException, IllegalStateException
{
if ( xaRes == null )
{
throw new IllegalArgumentException( "Null xa resource" );
}
if ( status == Status.STATUS_ACTIVE ||
status == Status.STATUS_PREPARING )
{
try
{
if ( resourceList.size() == 0 )
{
//
byte branchId[] = txManager.getBranchId( xaRes );
Xid xid = new XidImpl( globalId, branchId );
resourceList.add( new ResourceElement( xid, xaRes ) );
xaRes.start( xid, XAResource.TMNOFLAGS );
return true;
}
Xid sameRmXid = null;
for ( ResourceElement re : resourceList )
{
if ( sameRmXid == null && re.getResource().isSameRM( xaRes ) )
{
sameRmXid = re.getXid();
}
if ( xaRes == re.getResource() )
{
if ( re.getStatus() == RS_SUSPENDED )
{
xaRes.start( re.getXid(), XAResource.TMRESUME );
}
else
{
// either enlisted or delisted
// is TMJOIN correct then?
xaRes.start( re.getXid(), XAResource.TMJOIN );
}
re.setStatus( RS_ENLISTED );
return true;
}
}
if ( sameRmXid != null ) // should we join?
{
resourceList.add( new ResourceElement( sameRmXid, xaRes ) );
xaRes.start( sameRmXid, XAResource.TMJOIN );
}
else
// new branch
{
// ResourceElement re = resourceList.getFirst();
byte branchId[] = txManager.getBranchId( xaRes );
Xid xid = new XidImpl( globalId, branchId );
resourceList.add( new ResourceElement( xid, xaRes ) );
xaRes.start( xid, XAResource.TMNOFLAGS );
}
return true;
}
catch ( XAException e )
{
logger.error( "Unable to enlist resource[" + xaRes + "]", e );
status = Status.STATUS_MARKED_ROLLBACK;
return false;
}
}
else if ( status == Status.STATUS_ROLLING_BACK ||
status == Status.STATUS_ROLLEDBACK ||
status == Status.STATUS_MARKED_ROLLBACK )
{
throw new RollbackException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
throw new IllegalStateException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
@Override
public synchronized boolean delistResource( XAResource xaRes, int flag )
throws IllegalStateException
{
if ( xaRes == null )
{
throw new IllegalArgumentException( "Null xa resource" );
}
if ( flag != XAResource.TMSUCCESS && flag != XAResource.TMSUSPEND &&
flag != XAResource.TMFAIL )
{
throw new IllegalArgumentException( "Illegal flag: " + flag );
}
ResourceElement re = null;
for ( ResourceElement reMatch : resourceList )
{
if ( reMatch.getResource() == xaRes )
{
re = reMatch;
break;
}
}
if ( re == null )
{
return false;
}
if ( status == Status.STATUS_ACTIVE ||
status == Status.STATUS_MARKED_ROLLBACK )
{
try
{
xaRes.end( re.getXid(), flag );
if ( flag == XAResource.TMSUSPEND || flag == XAResource.TMFAIL )
{
re.setStatus( RS_SUSPENDED );
}
else
{
re.setStatus( RS_DELISTED );
}
return true;
}
catch ( XAException e )
{
logger.error("Unable to delist resource[" + xaRes + "]", e );
status = Status.STATUS_MARKED_ROLLBACK;
return false;
}
}
throw new IllegalStateException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
// TODO: figure out if this needs synchronization or make status volatile
public int getStatus()
{
return status;
}
void setStatus( int status )
{
this.status = status;
}
private boolean beforeCompletionRunning = false;
private List<Synchronization> syncHooksAdded = new ArrayList<>();
@Override
public synchronized void registerSynchronization( Synchronization s )
throws RollbackException, IllegalStateException
{
if ( s == null )
{
throw new IllegalArgumentException( "Null parameter" );
}
if ( status == Status.STATUS_ACTIVE ||
status == Status.STATUS_PREPARING ||
status == Status.STATUS_MARKED_ROLLBACK )
{
if ( !beforeCompletionRunning )
{
syncHooks.add( s );
}
else
{
// avoid CME if synchronization is added in before completion
syncHooksAdded.add( s );
}
}
else if ( status == Status.STATUS_ROLLING_BACK ||
status == Status.STATUS_ROLLEDBACK )
{
throw new RollbackException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
else
{
throw new IllegalStateException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
}
synchronized void doBeforeCompletion()
{
beforeCompletionRunning = true;
try
{
for ( Synchronization s : syncHooks )
{
try
{
s.beforeCompletion();
}
catch ( Throwable t )
{
logger.warn( "Caught exception from tx syncronization[" + s
+ "] beforeCompletion()", t );
}
}
// execute any hooks added since we entered doBeforeCompletion
while ( !syncHooksAdded.isEmpty() )
{
List<Synchronization> addedHooks = syncHooksAdded;
syncHooksAdded = new ArrayList<>();
for ( Synchronization s : addedHooks )
{
s.beforeCompletion();
syncHooks.add( s );
}
}
}
finally
{
beforeCompletionRunning = false;
}
}
synchronized void doAfterCompletion()
{
for ( Synchronization s : syncHooks )
{
try
{
s.afterCompletion( status );
}
catch ( Throwable t )
{
logger.warn( "Caught exception from tx syncronization[" + s
+ "] afterCompletion()", t );
}
}
syncHooks = null; // help gc
}
@Override
public void setRollbackOnly() throws IllegalStateException
{
if ( status == Status.STATUS_ACTIVE ||
status == Status.STATUS_PREPARING ||
status == Status.STATUS_PREPARED ||
status == Status.STATUS_MARKED_ROLLBACK ||
status == Status.STATUS_ROLLING_BACK )
{
status = Status.STATUS_MARKED_ROLLBACK;
}
else
{
throw new IllegalStateException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
}
@Override
public boolean equals( Object o )
{
if ( !(o instanceof ReadOnlyTransactionImpl) )
{
return false;
}
ReadOnlyTransactionImpl other = (ReadOnlyTransactionImpl) o;
return this.eventIdentifier == other.eventIdentifier;
}
private volatile int hashCode = 0;
@Override
public int hashCode()
{
if ( hashCode == 0 )
{
hashCode = 3217 * eventIdentifier;
}
return hashCode;
}
int getResourceCount()
{
return resourceList.size();
}
void doRollback() throws XAException
{
status = Status.STATUS_ROLLING_BACK;
LinkedList<Xid> rolledBackXids = new LinkedList<>();
for ( ResourceElement re : resourceList )
{
if ( !rolledBackXids.contains( re.getXid() ) )
{
rolledBackXids.add( re.getXid() );
re.getResource().rollback( re.getXid() );
}
}
status = Status.STATUS_ROLLEDBACK;
}
private static class ResourceElement
{
private Xid xid = null;
private XAResource resource = null;
private int status;
ResourceElement( Xid xid, XAResource resource )
{
this.xid = xid;
this.resource = resource;
status = RS_ENLISTED;
}
Xid getXid()
{
return xid;
}
XAResource getResource()
{
return resource;
}
int getStatus()
{
return status;
}
void setStatus( int status )
{
this.status = status;
}
@Override
public String toString()
{
String statusString;
switch ( status )
{
case RS_ENLISTED:
statusString = "ENLISTED";
break;
case RS_DELISTED:
statusString = "DELISTED";
break;
case RS_SUSPENDED:
statusString = "SUSPENDED";
break;
case RS_READONLY:
statusString = "READONLY";
break;
default:
statusString = "UNKNOWN";
}
return "Xid[" + xid + "] XAResource[" + resource + "] Status["
+ statusString + "]";
}
}
synchronized void markAsActive()
{
if ( active )
{
throw new IllegalStateException( "Transaction[" + this
+ "] already active" );
}
active = true;
}
synchronized void markAsSuspended()
{
if ( !active )
{
throw new IllegalStateException( "Transaction[" + this
+ "] already suspended" );
}
active = false;
}
}
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_ReadOnlyTransactionImpl.java
|
3,853 |
public class GeohashCellFilter {
public static final String NAME = "geohash_cell";
public static final String NEIGHBORS = "neighbors";
public static final String PRECISION = "precision";
/**
* Create a new geohash filter for a given set of geohashes. In general this method
* returns a boolean filter combining the geohashes OR-wise.
*
* @param context Context of the filter
* @param fieldMapper field mapper for geopoints
* @param geohash mandatory geohash
* @param geohashes optional array of additional geohashes
* @return a new GeoBoundinboxfilter
*/
public static Filter create(QueryParseContext context, GeoPointFieldMapper fieldMapper, String geohash, @Nullable List<String> geohashes) {
if (fieldMapper.geoHashStringMapper() == null) {
throw new ElasticsearchIllegalArgumentException("geohash filter needs geohash_prefix to be enabled");
}
StringFieldMapper geoHashMapper = fieldMapper.geoHashStringMapper();
if (geohashes == null || geohashes.size() == 0) {
return geoHashMapper.termFilter(geohash, context);
} else {
geohashes.add(geohash);
return geoHashMapper.termsFilter(geohashes, context);
}
}
/**
* Builder for a geohashfilter. It needs the fields <code>fieldname</code> and
* <code>geohash</code> to be set. the default for a neighbor filteing is
* <code>false</code>.
*/
public static class Builder extends BaseFilterBuilder {
// we need to store the geohash rather than the corresponding point,
// because a transformation from a geohash to a point an back to the
// geohash will extend the accuracy of the hash to max precision
// i.e. by filing up with z's.
private String field;
private String geohash;
private int levels = -1;
private boolean neighbors;
public Builder(String field) {
this(field, null, false);
}
public Builder(String field, GeoPoint point) {
this(field, point.geohash(), false);
}
public Builder(String field, String geohash) {
this(field, geohash, false);
}
public Builder(String field, String geohash, boolean neighbors) {
super();
this.field = field;
this.geohash = geohash;
this.neighbors = neighbors;
}
public Builder point(GeoPoint point) {
this.geohash = point.getGeohash();
return this;
}
public Builder point(double lat, double lon) {
this.geohash = GeoHashUtils.encode(lat, lon);
return this;
}
public Builder geohash(String geohash) {
this.geohash = geohash;
return this;
}
public Builder precision(int levels) {
this.levels = levels;
return this;
}
public Builder precision(String precision) {
double meters = DistanceUnit.parse(precision, DistanceUnit.DEFAULT, DistanceUnit.METERS);
return precision(GeoUtils.geoHashLevelsForPrecision(meters));
}
public Builder neighbors(boolean neighbors) {
this.neighbors = neighbors;
return this;
}
public Builder field(String field) {
this.field = field;
return this;
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
if (neighbors) {
builder.field(NEIGHBORS, neighbors);
}
if(levels > 0) {
builder.field(PRECISION, levels);
}
builder.field(field, geohash);
builder.endObject();
}
}
public static class Parser implements FilterParser {
@Inject
public Parser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
String fieldName = null;
String geohash = null;
int levels = -1;
boolean neighbors = false;
XContentParser.Token token;
if ((token = parser.currentToken()) != Token.START_OBJECT) {
throw new ElasticsearchParseException(NAME + " must be an object");
}
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == Token.FIELD_NAME) {
String field = parser.text();
if (PRECISION.equals(field)) {
token = parser.nextToken();
if(token == Token.VALUE_NUMBER) {
levels = parser.intValue();
} else if(token == Token.VALUE_STRING) {
double meters = DistanceUnit.parse(parser.text(), DistanceUnit.DEFAULT, DistanceUnit.METERS);
levels = GeoUtils.geoHashLevelsForPrecision(meters);
}
} else if (NEIGHBORS.equals(field)) {
parser.nextToken();
neighbors = parser.booleanValue();
} else {
fieldName = field;
token = parser.nextToken();
if(token == Token.VALUE_STRING) {
// A string indicates either a gehash or a lat/lon string
String location = parser.text();
if(location.indexOf(",")>0) {
geohash = GeoPoint.parse(parser).geohash();
} else {
geohash = location;
}
} else {
geohash = GeoPoint.parse(parser).geohash();
}
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
}
if (geohash == null) {
throw new QueryParsingException(parseContext.index(), "no geohash value provided to geohash_cell filter");
}
MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
if (smartMappers == null || !smartMappers.hasMapper()) {
throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]");
}
FieldMapper<?> mapper = smartMappers.mapper();
if (!(mapper instanceof GeoPointFieldMapper)) {
throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field");
}
GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper);
if (!geoMapper.isEnableGeohashPrefix()) {
throw new QueryParsingException(parseContext.index(), "can't execute geohash_cell on field [" + fieldName + "], geohash_prefix is not enabled");
}
if(levels > 0) {
int len = Math.min(levels, geohash.length());
geohash = geohash.substring(0, len);
}
if (neighbors) {
return create(parseContext, geoMapper, geohash, GeoHashUtils.neighbors(geohash));
} else {
return create(parseContext, geoMapper, geohash, null);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_GeohashCellFilter.java
|
269 |
public class ElasticsearchIllegalStateException extends ElasticsearchException {
public ElasticsearchIllegalStateException() {
super(null);
}
public ElasticsearchIllegalStateException(String msg) {
super(msg);
}
public ElasticsearchIllegalStateException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 0true
|
src_main_java_org_elasticsearch_ElasticsearchIllegalStateException.java
|
729 |
private static class FindInvocationsVisitor extends Visitor {
private Declaration declaration;
private final Set<Tree.PositionalArgumentList> posResults =
new HashSet<Tree.PositionalArgumentList>();
private final Set<Tree.NamedArgumentList> namedResults =
new HashSet<Tree.NamedArgumentList>();
Set<Tree.PositionalArgumentList> getPositionalArgLists() {
return posResults;
}
Set<Tree.NamedArgumentList> getNamedArgLists() {
return namedResults;
}
private FindInvocationsVisitor(Declaration declaration) {
this.declaration=declaration;
}
@Override
public void visit(Tree.InvocationExpression that) {
super.visit(that);
Tree.Primary primary = that.getPrimary();
if (primary instanceof Tree.MemberOrTypeExpression) {
if (((Tree.MemberOrTypeExpression) primary).getDeclaration()
.refines(declaration)) {
Tree.PositionalArgumentList pal = that.getPositionalArgumentList();
if (pal!=null) {
posResults.add(pal);
}
Tree.NamedArgumentList nal = that.getNamedArgumentList();
if (nal!=null) {
namedResults.add(nal);
}
}
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_refactor_ChangeParametersRefactoring.java
|
1,901 |
public class PartitionContainer {
private final MapService mapService;
private final int partitionId;
private final ConcurrentMap<String, RecordStore> maps = new ConcurrentHashMap<String, RecordStore>(1000);
public PartitionContainer(final MapService mapService, final int partitionId) {
this.mapService = mapService;
this.partitionId = partitionId;
}
private final ConstructorFunction<String, RecordStore> recordStoreConstructor
= new ConstructorFunction<String, RecordStore>() {
public RecordStore createNew(String name) {
return new DefaultRecordStore(name, mapService, partitionId);
}
};
public ConcurrentMap<String, RecordStore> getMaps() {
return maps;
}
public int getPartitionId() {
return partitionId;
}
public MapService getMapService() {
return mapService;
}
public RecordStore getRecordStore(String name) {
return ConcurrencyUtil.getOrPutSynchronized(maps, name, this,recordStoreConstructor);
}
public RecordStore getExistingRecordStore(String mapName) {
return maps.get(mapName);
}
void destroyMap(String name) {
RecordStore recordStore = maps.remove(name);
if (recordStore != null)
recordStore.clearPartition();
}
void clear() {
for (RecordStore recordStore : maps.values()) {
recordStore.clearPartition();
}
maps.clear();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_PartitionContainer.java
|
4,127 |
static class StatsHolder {
public final MeanMetric queryMetric = new MeanMetric();
public final MeanMetric fetchMetric = new MeanMetric();
public final CounterMetric queryCurrent = new CounterMetric();
public final CounterMetric fetchCurrent = new CounterMetric();
public SearchStats.Stats stats() {
return new SearchStats.Stats(
queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(),
fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count());
}
public long totalCurrent() {
return queryCurrent.count() + fetchCurrent.count();
}
public void clear() {
queryMetric.clear();
fetchMetric.clear();
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_search_stats_ShardSearchService.java
|
256 |
public static interface Provider {
StoreRateLimiting rateLimiting();
}
| 0true
|
src_main_java_org_apache_lucene_store_StoreRateLimiting.java
|
187 |
public abstract class RecursiveAction extends ForkJoinTask<Void> {
private static final long serialVersionUID = 5232453952276485070L;
/**
* The main computation performed by this task.
*/
protected abstract void compute();
/**
* Always returns {@code null}.
*
* @return {@code null} always
*/
public final Void getRawResult() { return null; }
/**
* Requires null completion value.
*/
protected final void setRawResult(Void mustBeNull) { }
/**
* Implements execution conventions for RecursiveActions.
*/
protected final boolean exec() {
compute();
return true;
}
}
| 0true
|
src_main_java_jsr166y_RecursiveAction.java
|
1,885 |
public class EntityForm {
protected static final Log LOG = LogFactory.getLog(EntityForm.class);
public static final String HIDDEN_GROUP = "hiddenGroup";
public static final String MAP_KEY_GROUP = "keyGroup";
public static final String DEFAULT_GROUP_NAME = "Default";
public static final Integer DEFAULT_GROUP_ORDER = 99999;
public static final String DEFAULT_TAB_NAME = "General";
public static final Integer DEFAULT_TAB_ORDER = 100;
protected String id;
protected String idProperty = "id";
protected String ceilingEntityClassname;
protected String entityType;
protected String mainEntityName;
protected String sectionKey;
protected Set<Tab> tabs = new TreeSet<Tab>(new Comparator<Tab>() {
@Override
public int compare(Tab o1, Tab o2) {
return new CompareToBuilder()
.append(o1.getOrder(), o2.getOrder())
.append(o1.getTitle(), o2.getTitle())
.toComparison();
}
});
// This is used to data-bind when this entity form is submitted
protected Map<String, Field> fields = null;
// This is used in cases where there is a sub-form on this page that is dynamically
// rendered based on other values on this entity form. It is keyed by the name of the
// property that drives the dynamic form.
protected Map<String, EntityForm> dynamicForms = new HashMap<String, EntityForm>();
// These values are used when dynamic forms are in play. They are not rendered to the client,
// but they can be used when performing actions on the submit event
protected Map<String, DynamicEntityFormInfo> dynamicFormInfos = new HashMap<String, DynamicEntityFormInfo>();
protected List<EntityFormAction> actions = new ArrayList<EntityFormAction>();
/**
* @return a flattened, field name keyed representation of all of
* the fields in all of the groups for this form. This set will also includes all of the dynamic form
* fields.
*
* Note that if there collisions between the dynamic form fields and the fields on this form (meaning that they
* have the same name), then the dynamic form field will be excluded from the map and the preference will be given
* to first-level entities
*
* @see {@link #getFields(boolean)}
*/
public Map<String, Field> getFields() {
if (fields == null) {
Map<String, Field> map = new LinkedHashMap<String, Field>();
for (Tab tab : tabs) {
for (FieldGroup group : tab.getFieldGroups()) {
for (Field field : group.getFields()) {
map.put(field.getName(), field);
}
}
}
fields = map;
}
for (Entry<String, EntityForm> entry : dynamicForms.entrySet()) {
Map<String, Field> dynamicFormFields = entry.getValue().getFields();
for (Entry<String, Field> dynamicField : dynamicFormFields.entrySet()) {
if (fields.containsKey(dynamicField.getKey())) {
LOG.info("Excluding dynamic field " + dynamicField.getKey() + " as there is already an occurrance in" +
" this entityForm");
} else {
fields.put(dynamicField.getKey(), dynamicField.getValue());
}
}
}
return fields;
}
/**
* Clears out the cached 'fields' variable which is used to render the form on the frontend. Use this method
* if you want to force the entityForm to rebuild itself based on the tabs and groups that have been assigned and
* populated
*/
public void clearFieldsMap() {
fields = null;
}
public List<ListGrid> getAllListGrids() {
List<ListGrid> list = new ArrayList<ListGrid>();
for (Tab tab : tabs) {
for (ListGrid lg : tab.getListGrids()) {
list.add(lg);
}
}
return list;
}
/**
* Convenience method for grabbing a grid by its collection field name. This is very similar to {@link #findField(String)}
* but differs in that this only searches through the sub collections for the current entity
*
* @param collectionFieldName the field name of the collection on the top-level entity
* @return
*/
public ListGrid findListGrid(String collectionFieldName) {
for (ListGrid grid : getAllListGrids()) {
if (grid.getSubCollectionFieldName().equals(collectionFieldName)) {
return grid;
}
}
return null;
}
public Tab findTab(String tabTitle) {
for (Tab tab : tabs) {
if (tab.getTitle() != null && tab.getTitle().equals(tabTitle)) {
return tab;
}
}
return null;
}
public Tab findTabForField(String fieldName) {
fieldName = sanitizeFieldName(fieldName);
for (Tab tab : tabs) {
for (FieldGroup fieldGroup : tab.getFieldGroups()) {
for (Field field : fieldGroup.getFields()) {
if (field.getName().equals(fieldName)) {
return tab;
}
}
}
}
return null;
}
public Field findField(String fieldName) {
fieldName = sanitizeFieldName(fieldName);
for (Tab tab : tabs) {
for (FieldGroup fieldGroup : tab.getFieldGroups()) {
for (Field field : fieldGroup.getFields()) {
if (field.getName().equals(fieldName)) {
return field;
}
}
}
}
return null;
}
/**
* Since this field name could come from the frontend (where all fields are referenced like fields[name].value,
* we need to strip that part out to look up the real field name in this entity
* @param fieldName
* @return
*/
public String sanitizeFieldName(String fieldName) {
if (fieldName.contains("[")) {
fieldName = fieldName.substring(fieldName.indexOf('[') + 1, fieldName.indexOf(']'));
}
return fieldName;
}
public Field removeField(String fieldName) {
Field fieldToRemove = null;
FieldGroup containingGroup = null;
findField: {
for (Tab tab : tabs) {
for (FieldGroup fieldGroup : tab.getFieldGroups()) {
for (Field field : fieldGroup.getFields()) {
if (field.getName().equals(fieldName)) {
fieldToRemove = field;
containingGroup = fieldGroup;
break findField;
}
}
}
}
}
if (fieldToRemove != null) {
containingGroup.removeField(fieldToRemove);
}
if (fields != null) {
fields.remove(fieldName);
}
return fieldToRemove;
}
public void removeTab(Tab tab) {
tabs.remove(tab);
}
public ListGrid removeListGrid(String subCollectionFieldName) {
ListGrid lgToRemove = null;
Tab containingTab = null;
findLg: {
for (Tab tab : tabs) {
for (ListGrid lg : tab.getListGrids()) {
if (subCollectionFieldName.equals(lg.getSubCollectionFieldName())) {
lgToRemove = lg;
containingTab = tab;
break findLg;
}
}
}
}
if (lgToRemove != null) {
containingTab.removeListGrid(lgToRemove);
}
if (containingTab.getListGrids().size() == 0 && containingTab.getFields().size() == 0) {
removeTab(containingTab);
}
return lgToRemove;
}
public void addHiddenField(Field field) {
if (StringUtils.isBlank(field.getFieldType())) {
field.setFieldType(SupportedFieldType.HIDDEN.toString());
}
addField(field, HIDDEN_GROUP, DEFAULT_GROUP_ORDER, DEFAULT_TAB_NAME, DEFAULT_TAB_ORDER);
}
public void addField(Field field) {
addField(field, DEFAULT_GROUP_NAME, DEFAULT_GROUP_ORDER, DEFAULT_TAB_NAME, DEFAULT_TAB_ORDER);
}
public void addMapKeyField(Field field) {
addField(field, MAP_KEY_GROUP, 0, DEFAULT_TAB_NAME, DEFAULT_TAB_ORDER);
}
public void addField(Field field, String groupName, Integer groupOrder, String tabName, Integer tabOrder) {
// System.out.println(String.format("Adding field [%s] to group [%s] to tab [%s]", field.getName(), groupName, tabName));
groupName = groupName == null ? DEFAULT_GROUP_NAME : groupName;
groupOrder = groupOrder == null ? DEFAULT_GROUP_ORDER : groupOrder;
tabName = tabName == null ? DEFAULT_TAB_NAME : tabName;
tabOrder = tabOrder == null ? DEFAULT_TAB_ORDER : tabOrder;
Tab tab = findTab(tabName);
if (tab == null) {
tab = new Tab();
tab.setTitle(tabName);
tab.setOrder(tabOrder);
tabs.add(tab);
}
FieldGroup fieldGroup = tab.findGroup(groupName);
if (fieldGroup == null) {
fieldGroup = new FieldGroup();
fieldGroup.setTitle(groupName);
fieldGroup.setOrder(groupOrder);
tab.getFieldGroups().add(fieldGroup);
}
fieldGroup.addField(field);
}
public void addListGrid(ListGrid listGrid, String tabName, Integer tabOrder) {
Tab tab = findTab(tabName);
if (tab == null) {
tab = new Tab();
tab.setTitle(tabName);
tab.setOrder(tabOrder);
tabs.add(tab);
}
tab.getListGrids().add(listGrid);
}
public void addAction(EntityFormAction action) {
actions.add(action);
}
public void removeAction(EntityFormAction action) {
actions.remove(action);
}
public void removeAllActions() {
actions.clear();
}
public EntityForm getDynamicForm(String name) {
return getDynamicForms().get(name);
}
public void putDynamicForm(String name, EntityForm ef) {
getDynamicForms().put(name, ef);
}
public DynamicEntityFormInfo getDynamicFormInfo(String name) {
return getDynamicFormInfos().get(name);
}
public void putDynamicFormInfo(String name, DynamicEntityFormInfo info) {
getDynamicFormInfos().put(name, info);
}
public void setReadOnly() {
if (getFields() != null) {
for (Entry<String, Field> entry : getFields().entrySet()) {
entry.getValue().setReadOnly(true);
}
}
if (getAllListGrids() != null) {
for (ListGrid lg : getAllListGrids()) {
lg.setReadOnly(true);
}
}
if (getDynamicForms() != null) {
for (Entry<String, EntityForm> entry : getDynamicForms().entrySet()) {
entry.getValue().setReadOnly();
}
}
actions.clear();
}
public List<EntityFormAction> getActions() {
List<EntityFormAction> clonedActions = new ArrayList<EntityFormAction>(actions);
Collections.reverse(clonedActions);
return Collections.unmodifiableList(clonedActions);
}
/* *********************** */
/* GENERIC GETTERS/SETTERS */
/* *********************** */
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getIdProperty() {
return idProperty;
}
public void setIdProperty(String idProperty) {
this.idProperty = idProperty;
}
public String getCeilingEntityClassname() {
return ceilingEntityClassname;
}
public void setCeilingEntityClassname(String ceilingEntityClassname) {
this.ceilingEntityClassname = ceilingEntityClassname;
}
public String getEntityType() {
return entityType;
}
public void setEntityType(String entityType) {
this.entityType = entityType;
}
public String getMainEntityName() {
return StringUtils.isBlank(mainEntityName) ? "" : mainEntityName;
}
public void setMainEntityName(String mainEntityName) {
this.mainEntityName = mainEntityName;
}
public String getSectionKey() {
return sectionKey.charAt(0) == '/' ? sectionKey : '/' + sectionKey;
}
public void setSectionKey(String sectionKey) {
this.sectionKey = sectionKey;
}
public Set<Tab> getTabs() {
return tabs;
}
public void setTabs(Set<Tab> tabs) {
this.tabs = tabs;
}
public Map<String, EntityForm> getDynamicForms() {
return dynamicForms;
}
public void setDynamicForms(Map<String, EntityForm> dynamicForms) {
this.dynamicForms = dynamicForms;
}
public Map<String, DynamicEntityFormInfo> getDynamicFormInfos() {
return dynamicFormInfos;
}
public void setDynamicFormInfos(Map<String, DynamicEntityFormInfo> dynamicFormInfos) {
this.dynamicFormInfos = dynamicFormInfos;
}
public void setActions(List<EntityFormAction> actions) {
this.actions = actions;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_web_form_entity_EntityForm.java
|
1,598 |
public class OperationTypes implements Serializable {
private static final long serialVersionUID = 1L;
private OperationType fetchType = OperationType.BASIC;
private OperationType removeType = OperationType.BASIC;
private OperationType addType = OperationType.BASIC;
private OperationType updateType = OperationType.BASIC;
private OperationType inspectType = OperationType.BASIC;
public OperationTypes() {
//do nothing
}
public OperationTypes(OperationType fetchType, OperationType removeType, OperationType addType, OperationType updateType, OperationType inspectType) {
this.removeType = removeType;
this.addType = addType;
this.updateType = updateType;
this.fetchType = fetchType;
this.inspectType = inspectType;
}
/**
* How should the system execute a removal of this item.
* <p/>
* OperationType BASIC will result in the item being removed based on its primary key
* OperationType NONDESTRUCTIVEREMOVE will result in the item being removed from the containing list in the containing entity. This
* is useful when you don't want the item to actually be deleted, but simply removed from the parent collection.
* OperationType ADORNEDTARGETLIST will result in a join structure being deleted (not either of the associated entities).
* org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl is an example of a join structure entity.
* OperationType MAP will result in the item being removed from the requisite map in the containing entity.
*
* @return the type of remove operation
*/
public OperationType getRemoveType() {
return removeType;
}
/**
* How should the system execute a removal of this item.
* <p/>
* OperationType BASIC will result in the item being removed based on its primary key
* OperationType NONDESTRUCTIVEREMOVE will result in the item being removed from the containing list in the containing entity. This
* is useful when you don't want the item to be removed to actually be deleted, but simply removed from the parent collection.
* OperationType ADORNEDTARGETLIST will result in a join structure being deleted (not either of the associated entities).
* org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl is an example of a join structure entity.
* OperationType MAP will result in the item being removed from the requisite map in the containing entity.
*
* @param removeType
*/
public void setRemoveType(OperationType removeType) {
this.removeType = removeType;
}
/**
* How should the system execute an addition for this item
* <p/>
* OperationType BASIC will result in the item being inserted
* OperationType NONDESTRUCTIVEREMOVE is not supported and will result in the same behavior as BASIC. Note, any foreign key associations in the
* persistence perspective (@see PersistencePerspective) will be honored during the BASIC based add.
* OperationType ADORNEDTARGETLIST will result in a join structure entity being added (not either of the associated entities).
* org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl is an example of a join structure entity.
* OperationType MAP will result in the item being added to the requisite map in the containing entity.
*
* @return the type of the add operation
*/
public OperationType getAddType() {
return addType;
}
/**
* How should the system execute an addition for this item
* <p/>
* OperationType BASIC will result in the item being inserted
* OperationType NONDESTRUCTIVEREMOVE is not supported and will result in the same behavior as BASIC. Note, any foreign key associations in the
* persistence perspective (@see PersistencePerspective) will be honored during the BASIC based add.
* OperationType ADORNEDTARGETLIST will result in a join structure entity being added (not either of the associated entities).
* org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl is an example of a join structure entity.
* OperationType MAP will result in the item being added to the requisite map in the containing entity.
*
* @param addType
*/
public void setAddType(OperationType addType) {
this.addType = addType;
}
/**
* How should the system execute an update for this item
* <p/>
* OperationType BASIC will result in the item being updated based on it's primary key
* OperationType NONDESTRUCTIVEREMOVE is not supported and will result in the same behavior as BASIC. Note, any foreign key associations in the
* persistence perspective (@see PersistencePerspective) will be honored during the BASIC based update.
* OperationType ADORNEDTARGETLIST will result in a join structure entity being updated (not either of the associated entities).
* org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl is an example of a join structure entity.
* OperationType MAP will result in the item being updated to the requisite map in the containing entity.
*
* @return the type of the update operation
*/
public OperationType getUpdateType() {
return updateType;
}
/**
* How should the system execute an update for this item
* <p/>
* OperationType BASIC will result in the item being updated based on it's primary key
* OperationType NONDESTRUCTIVEREMOVE is not supported and will result in the same behavior as BASIC. Note, any foreign key associations in the
* persistence perspective (@see PersistencePerspective) will be honored during the BASIC based update.
* OperationType ADORNEDTARGETLIST will result in a join structure entity being updated (not either of the associated entities).
* org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl is an example of a join structure entity.
* OperationType MAP will result in the item being updated to the requisite map in the containing entity.
*
* @param updateType
*/
public void setUpdateType(OperationType updateType) {
this.updateType = updateType;
}
/**
* How should the system execute a fetch
* <p/>
* OperationType BASIC will result in a search for items having one or more basic properties matches
* OperationType FOREINKEY is not support and will result in the same behavior as BASIC. Note, any foreign key associations will be included
* as part of the query.
* OperationType ADORNEDTARGETLIST will result in search for items that match one of the associations in a join structure. For example, CategoryProductXrefImpl
* is used in a AdornedTargetList fetch to retrieve all products for a particular category.
* OperationType MAP will result retrieval of all map entries for the requisite map in the containing entity.
*
* @return the type of the fetch operation
*/
public OperationType getFetchType() {
return fetchType;
}
/**
* How should the system execute a fetch
* <p/>
* OperationType BASIC will result in a search for items having one or more basic properties matches
* OperationType FOREINKEY is not support and will result in the same behavior as BASIC. Note, any foreign key associations will be included
* as part of the query.
* OperationType ADORNEDTARGETLIST will result in search for items that match one of the associations in a join structure. For example, CategoryProductXrefImpl
* is used in a AdornedTargetList fetch to retrieve all products for a particular category.
* OperationType MAP will result retrieval of all map entries for the requisite map in the containing entity.
*
* @param fetchType
*/
public void setFetchType(OperationType fetchType) {
this.fetchType = fetchType;
}
/**
* OperationType values are generally ignored for inspect and should be defined as BASIC for consistency in most circumstances.
* This API is meant to support future persistence modules where specialized inspect phase management may be required.
*
* @return the type of the inspect operation
*/
public OperationType getInspectType() {
return inspectType;
}
/**
* OperationType values are generally ignored for inspect and should be defined as BASIC for consistency in most circumstances.
* This API is meant to support future persistence modules where specialized inspect phase management may be required.
*
* @param inspectType
*/
public void setInspectType(OperationType inspectType) {
this.inspectType = inspectType;
}
public OperationTypes cloneOperationTypes() {
OperationTypes operationTypes = new OperationTypes();
operationTypes.setAddType(addType);
operationTypes.setFetchType(fetchType);
operationTypes.setInspectType(inspectType);
operationTypes.setRemoveType(removeType);
operationTypes.setUpdateType(updateType);
return operationTypes;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof OperationTypes)) return false;
OperationTypes that = (OperationTypes) o;
if (addType != that.addType) return false;
if (fetchType != that.fetchType) return false;
if (inspectType != that.inspectType) return false;
if (removeType != that.removeType) return false;
if (updateType != that.updateType) return false;
return true;
}
@Override
public int hashCode() {
int result = fetchType != null ? fetchType.hashCode() : 0;
result = 31 * result + (removeType != null ? removeType.hashCode() : 0);
result = 31 * result + (addType != null ? addType.hashCode() : 0);
result = 31 * result + (updateType != null ? updateType.hashCode() : 0);
result = 31 * result + (inspectType != null ? inspectType.hashCode() : 0);
return result;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_OperationTypes.java
|
1,761 |
@Component("blPersistenceManager")
@Scope("prototype")
public class PersistenceManagerImpl implements InspectHelper, PersistenceManager, ApplicationContextAware {
private static final Log LOG = LogFactory.getLog(PersistenceManagerImpl.class);
@Resource(name="blDynamicEntityDao")
protected DynamicEntityDao dynamicEntityDao;
@Resource(name="blCustomPersistenceHandlers")
protected List<CustomPersistenceHandler> customPersistenceHandlers = new ArrayList<CustomPersistenceHandler>();
@Resource(name="blCustomPersistenceHandlerFilters")
protected List<CustomPersistenceHandlerFilter> customPersistenceHandlerFilters = new ArrayList<CustomPersistenceHandlerFilter>();
@Resource(name="blTargetEntityManagers")
protected Map<String, String> targetEntityManagers = new HashMap<String, String>();
@Resource(name="blAdminSecurityRemoteService")
protected SecurityVerifier adminRemoteSecurityService;
@Resource(name="blPersistenceModules")
protected PersistenceModule[] modules;
protected TargetModeType targetMode;
protected ApplicationContext applicationContext;
@PostConstruct
public void postConstruct() {
for (PersistenceModule module : modules) {
module.setPersistenceManager(this);
}
}
// public void close() throws Exception {
// //do nothing
// }
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
@Override
public Class<?>[] getAllPolymorphicEntitiesFromCeiling(Class<?> ceilingClass) {
return dynamicEntityDao.getAllPolymorphicEntitiesFromCeiling(ceilingClass);
}
@Override
public Class<?>[] getUpDownInheritance(String testClassname) throws ClassNotFoundException {
return getUpDownInheritance(Class.forName(testClassname));
}
@Override
public Class<?>[] getUpDownInheritance(Class<?> testClass) {
Class<?>[] pEntities = dynamicEntityDao.getAllPolymorphicEntitiesFromCeiling(testClass);
Class<?> topConcreteClass = pEntities[pEntities.length - 1];
List<Class<?>> temp = new ArrayList<Class<?>>(pEntities.length);
temp.addAll(Arrays.asList(pEntities));
Collections.reverse(temp);
boolean eof = false;
while (!eof) {
Class<?> superClass = topConcreteClass.getSuperclass();
PersistentClass persistentClass = dynamicEntityDao.getPersistentClass(superClass.getName());
if (persistentClass == null) {
eof = true;
} else {
temp.add(0, superClass);
topConcreteClass = superClass;
}
}
return temp.toArray(new Class<?>[temp.size()]);
}
@Override
public Class<?>[] getPolymorphicEntities(String ceilingEntityFullyQualifiedClassname) throws ClassNotFoundException {
Class<?>[] entities = getAllPolymorphicEntitiesFromCeiling(Class.forName(ceilingEntityFullyQualifiedClassname));
return entities;
}
@Override
public Map<String, FieldMetadata> getSimpleMergedProperties(String entityName, PersistencePerspective persistencePerspective) {
return dynamicEntityDao.getSimpleMergedProperties(entityName, persistencePerspective);
}
@Override
public ClassMetadata getMergedClassMetadata(final Class<?>[] entities, Map<MergedPropertyType, Map<String, FieldMetadata>> mergedProperties) {
ClassMetadata classMetadata = new ClassMetadata();
classMetadata.setPolymorphicEntities(dynamicEntityDao.getClassTree(entities));
List<Property> propertiesList = new ArrayList<Property>();
for (PersistenceModule module : modules) {
module.extractProperties(entities, mergedProperties, propertiesList);
}
/*
* Insert inherited fields whose order has been specified
*/
for (int i = 0; i < entities.length - 1; i++) {
for (Property myProperty : propertiesList) {
if (myProperty.getMetadata().getInheritedFromType().equals(entities[i].getName()) && myProperty.getMetadata().getOrder() != null) {
for (Property property : propertiesList) {
if (!property.getMetadata().getInheritedFromType().equals(entities[i].getName()) && property.getMetadata().getOrder() != null && property.getMetadata().getOrder() >= myProperty.getMetadata().getOrder()) {
property.getMetadata().setOrder(property.getMetadata().getOrder() + 1);
}
}
}
}
}
Property[] properties = new Property[propertiesList.size()];
properties = propertiesList.toArray(properties);
Arrays.sort(properties, new Comparator<Property>() {
@Override
public int compare(Property o1, Property o2) {
Integer tabOrder1 = o1.getMetadata().getTabOrder() == null ? 99999 : o1.getMetadata().getTabOrder();
Integer tabOrder2 = o2.getMetadata().getTabOrder() == null ? 99999 : o2.getMetadata().getTabOrder();
Integer groupOrder1 = null;
Integer groupOrder2 = null;
if (o1.getMetadata() instanceof BasicFieldMetadata) {
BasicFieldMetadata b1 = (BasicFieldMetadata) o1.getMetadata();
groupOrder1 = b1.getGroupOrder();
}
groupOrder1 = groupOrder1 == null ? 99999 : groupOrder1;
if (o2.getMetadata() instanceof BasicFieldMetadata) {
BasicFieldMetadata b2 = (BasicFieldMetadata) o2.getMetadata();
groupOrder2 = b2.getGroupOrder();
}
groupOrder2 = groupOrder2 == null ? 99999 : groupOrder2;
Integer fieldOrder1 = o1.getMetadata().getOrder() == null ? 99999 : o1.getMetadata().getOrder();
Integer fieldOrder2 = o2.getMetadata().getOrder() == null ? 99999 : o2.getMetadata().getOrder();
String friendlyName1 = o1.getMetadata().getFriendlyName() == null ? "zzzz" : o1.getMetadata().getFriendlyName();
String friendlyName2 = o2.getMetadata().getFriendlyName() == null ? "zzzz" : o2.getMetadata().getFriendlyName();
String name1 = o1.getName() == null ? "zzzzz" : o1.getName();
String name2 = o2.getName() == null ? "zzzzz" : o2.getName();
return new CompareToBuilder()
.append(tabOrder1, tabOrder2)
.append(groupOrder1, groupOrder2)
.append(fieldOrder1, fieldOrder2)
.append(friendlyName1, friendlyName2)
.append(name1, name2)
.toComparison();
}
});
classMetadata.setProperties(properties);
classMetadata.setCurrencyCode(Money.defaultCurrency().getCurrencyCode());
return classMetadata;
}
@Override
public DynamicResultSet inspect(PersistencePackage persistencePackage) throws ServiceException, ClassNotFoundException {
// check to see if there is a custom handler registered
for (CustomPersistenceHandler handler : getCustomPersistenceHandlers()) {
if (handler.canHandleInspect(persistencePackage)) {
if (!handler.willHandleSecurity(persistencePackage)) {
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.INSPECT);
}
DynamicResultSet results = handler.inspect(persistencePackage, dynamicEntityDao, this);
return results;
}
}
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.INSPECT);
Class<?>[] entities = getPolymorphicEntities(persistencePackage.getCeilingEntityFullyQualifiedClassname());
Map<MergedPropertyType, Map<String, FieldMetadata>> allMergedProperties = new HashMap<MergedPropertyType, Map<String, FieldMetadata>>();
for (PersistenceModule module : modules) {
module.updateMergedProperties(persistencePackage, allMergedProperties);
}
ClassMetadata mergedMetadata = getMergedClassMetadata(entities, allMergedProperties);
DynamicResultSet results = new DynamicResultSet(mergedMetadata);
return results;
}
@Override
public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto) throws ServiceException {
//check to see if there is a custom handler registered
for (CustomPersistenceHandler handler : getCustomPersistenceHandlers()) {
if (handler.canHandleFetch(persistencePackage)) {
if (!handler.willHandleSecurity(persistencePackage)) {
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.FETCH);
}
DynamicResultSet results = handler.fetch(persistencePackage, cto, dynamicEntityDao, (RecordHelper) getCompatibleModule(OperationType.BASIC));
return postFetch(results, persistencePackage, cto);
}
}
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.FETCH);
PersistenceModule myModule = getCompatibleModule(persistencePackage.getPersistencePerspective().getOperationTypes().getFetchType());
try {
return postFetch(myModule.fetch(persistencePackage, cto), persistencePackage, cto);
} catch (ServiceException e) {
if (e.getCause() instanceof NoPossibleResultsException) {
DynamicResultSet drs = new DynamicResultSet(null, new Entity[] {}, 0);
return postFetch(drs, persistencePackage, cto);
}
throw e;
}
}
protected DynamicResultSet postFetch(DynamicResultSet resultSet, PersistencePackage persistencePackage,
CriteriaTransferObject cto)
throws ServiceException {
// Expose the start index so that we can utilize when building the UI
resultSet.setStartIndex(cto.getFirstResult());
resultSet.setPageSize(cto.getMaxResults());
return resultSet;
}
@Override
public Entity add(PersistencePackage persistencePackage) throws ServiceException {
//check to see if there is a custom handler registered
//execute the root PersistencePackage
Entity response;
checkRoot: {
//if there is a validation exception in the root check, let it bubble, as we need a valid, persisted
//entity to execute the subPackage code later
for (CustomPersistenceHandler handler : getCustomPersistenceHandlers()) {
if (handler.canHandleAdd(persistencePackage)) {
if (!handler.willHandleSecurity(persistencePackage)) {
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.ADD);
}
response = handler.add(persistencePackage, dynamicEntityDao, (RecordHelper) getCompatibleModule(OperationType.BASIC));
break checkRoot;
}
}
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.ADD);
PersistenceModule myModule = getCompatibleModule(persistencePackage.getPersistencePerspective().getOperationTypes().getAddType());
response = myModule.add(persistencePackage);
}
if (!MapUtils.isEmpty(persistencePackage.getSubPackages())) {
// Once the entity has been saved, we can utilize its id for the subsequent dynamic forms
Class<?> entityClass;
try {
entityClass = Class.forName(response.getType()[0]);
} catch (ClassNotFoundException e) {
throw new ServiceException(e);
}
Map<String, Object> idMetadata = getDynamicEntityDao().getIdMetadata(entityClass);
String idProperty = (String) idMetadata.get("name");
String idVal = response.findProperty(idProperty).getValue();
Map<String, List<String>> subPackageValidationErrors = new HashMap<String, List<String>>();
for (Map.Entry<String,PersistencePackage> subPackage : persistencePackage.getSubPackages().entrySet()) {
Entity subResponse;
try {
subPackage.getValue().setCustomCriteria(new String[]{subPackage.getValue().getCustomCriteria()[0], idVal});
//Run through any subPackages -- add up any validation errors
checkHandler: {
for (CustomPersistenceHandler handler : getCustomPersistenceHandlers()) {
if (handler.canHandleAdd(subPackage.getValue())) {
if (!handler.willHandleSecurity(subPackage.getValue())) {
adminRemoteSecurityService.securityCheck(subPackage.getValue().getCeilingEntityFullyQualifiedClassname(), EntityOperationType.ADD);
}
subResponse = handler.add(subPackage.getValue(), dynamicEntityDao, (RecordHelper) getCompatibleModule(OperationType.BASIC));
subPackage.getValue().setEntity(subResponse);
break checkHandler;
}
}
adminRemoteSecurityService.securityCheck(subPackage.getValue().getCeilingEntityFullyQualifiedClassname(), EntityOperationType.ADD);
PersistenceModule subModule = getCompatibleModule(subPackage.getValue().getPersistencePerspective().getOperationTypes().getAddType());
subResponse = subModule.add(persistencePackage);
subPackage.getValue().setEntity(subResponse);
}
} catch (ValidationException e) {
subPackage.getValue().setEntity(e.getEntity());
}
}
//Build up validation errors in all of the subpackages, even those that might not have thrown ValidationExceptions
for (Map.Entry<String, PersistencePackage> subPackage : persistencePackage.getSubPackages().entrySet()) {
for (Map.Entry<String, List<String>> error : subPackage.getValue().getEntity().getValidationErrors().entrySet()) {
subPackageValidationErrors.put(subPackage.getKey() + DynamicEntityFormInfo.FIELD_SEPARATOR + error.getKey(), error.getValue());
}
}
response.getValidationErrors().putAll(subPackageValidationErrors);
}
if (response.isValidationFailure()) {
throw new ValidationException(response, "The entity has failed validation");
}
return postAdd(response, persistencePackage);
}
protected Entity postAdd(Entity entity, PersistencePackage persistencePackage) throws ServiceException {
//do nothing
return entity;
}
@Override
public Entity update(PersistencePackage persistencePackage) throws ServiceException {
//check to see if there is a custom handler registered
//execute the root PersistencePackage
Entity response;
try {
checkRoot: {
for (CustomPersistenceHandler handler : getCustomPersistenceHandlers()) {
if (handler.canHandleUpdate(persistencePackage)) {
if (!handler.willHandleSecurity(persistencePackage)) {
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.UPDATE);
}
response = handler.update(persistencePackage, dynamicEntityDao, (RecordHelper) getCompatibleModule(OperationType.BASIC));
break checkRoot;
}
}
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.UPDATE);
PersistenceModule myModule = getCompatibleModule(persistencePackage.getPersistencePerspective().getOperationTypes().getUpdateType());
response = myModule.update(persistencePackage);
}
} catch (ValidationException e) {
response = e.getEntity();
}
Map<String, List<String>> subPackageValidationErrors = new HashMap<String, List<String>>();
for (Map.Entry<String,PersistencePackage> subPackage : persistencePackage.getSubPackages().entrySet()) {
try {
//Run through any subPackages -- add up any validation errors
checkHandler: {
for (CustomPersistenceHandler handler : getCustomPersistenceHandlers()) {
if (handler.canHandleUpdate(subPackage.getValue())) {
if (!handler.willHandleSecurity(subPackage.getValue())) {
adminRemoteSecurityService.securityCheck(subPackage.getValue().getCeilingEntityFullyQualifiedClassname(), EntityOperationType.UPDATE);
}
Entity subResponse = handler.update(subPackage.getValue(), dynamicEntityDao, (RecordHelper) getCompatibleModule(OperationType.BASIC));
subPackage.getValue().setEntity(subResponse);
break checkHandler;
}
}
adminRemoteSecurityService.securityCheck(subPackage.getValue().getCeilingEntityFullyQualifiedClassname(), EntityOperationType.UPDATE);
PersistenceModule subModule = getCompatibleModule(subPackage.getValue().getPersistencePerspective().getOperationTypes().getUpdateType());
Entity subResponse = subModule.update(persistencePackage);
subPackage.getValue().setEntity(subResponse);
}
} catch (ValidationException e) {
subPackage.getValue().setEntity(e.getEntity());
}
}
//Build up validation errors in all of the subpackages, even those that might not have thrown ValidationExceptions
for (Map.Entry<String, PersistencePackage> subPackage : persistencePackage.getSubPackages().entrySet()) {
for (Map.Entry<String, List<String>> error : subPackage.getValue().getEntity().getValidationErrors().entrySet()) {
subPackageValidationErrors.put(subPackage.getKey() + DynamicEntityFormInfo.FIELD_SEPARATOR + error.getKey(), error.getValue());
}
}
response.getValidationErrors().putAll(subPackageValidationErrors);
if (response.isValidationFailure()) {
throw new ValidationException(response, "The entity has failed validation");
}
return postUpdate(response, persistencePackage);
}
protected Entity postUpdate(Entity entity, PersistencePackage persistencePackage) throws ServiceException {
//do nothing
return entity;
}
@Override
public void remove(PersistencePackage persistencePackage) throws ServiceException {
//check to see if there is a custom handler registered
for (CustomPersistenceHandler handler : getCustomPersistenceHandlers()) {
if (handler.canHandleRemove(persistencePackage)) {
if (!handler.willHandleSecurity(persistencePackage)) {
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.REMOVE);
}
handler.remove(persistencePackage, dynamicEntityDao, (RecordHelper) getCompatibleModule(OperationType.BASIC));
return;
}
}
adminRemoteSecurityService.securityCheck(persistencePackage.getCeilingEntityFullyQualifiedClassname(), EntityOperationType.REMOVE);
PersistenceModule myModule = getCompatibleModule(persistencePackage.getPersistencePerspective().getOperationTypes().getRemoveType());
myModule.remove(persistencePackage);
}
@Override
public PersistenceModule getCompatibleModule(OperationType operationType) {
PersistenceModule myModule = null;
for (PersistenceModule module : modules) {
if (module.isCompatible(operationType)) {
myModule = module;
break;
}
}
if (myModule == null) {
LOG.error("Unable to find a compatible remote service module for the operation type: " + operationType);
throw new RuntimeException("Unable to find a compatible remote service module for the operation type: " + operationType);
}
return myModule;
}
@Override
public DynamicEntityDao getDynamicEntityDao() {
return dynamicEntityDao;
}
@Override
public void setDynamicEntityDao(DynamicEntityDao dynamicEntityDao) {
this.dynamicEntityDao = dynamicEntityDao;
}
@Override
public Map<String, String> getTargetEntityManagers() {
return targetEntityManagers;
}
@Override
public void setTargetEntityManagers(Map<String, String> targetEntityManagers) {
this.targetEntityManagers = targetEntityManagers;
}
@Override
public TargetModeType getTargetMode() {
return targetMode;
}
@Override
public void setTargetMode(TargetModeType targetMode) {
String targetManagerRef = targetEntityManagers.get(targetMode.getType());
EntityManager targetManager = (EntityManager) applicationContext.getBean(targetManagerRef);
if (targetManager == null) {
throw new RuntimeException("Unable to find a target entity manager registered with the key: " + targetMode + ". Did you add an entity manager with this key to the targetEntityManagers property?");
}
dynamicEntityDao.setStandardEntityManager(targetManager);
this.targetMode = targetMode;
}
@Override
public List<CustomPersistenceHandler> getCustomPersistenceHandlers() {
List<CustomPersistenceHandler> cloned = new ArrayList<CustomPersistenceHandler>();
cloned.addAll(customPersistenceHandlers);
if (getCustomPersistenceHandlerFilters() != null) {
for (CustomPersistenceHandlerFilter filter : getCustomPersistenceHandlerFilters()) {
Iterator<CustomPersistenceHandler> itr = cloned.iterator();
while (itr.hasNext()) {
CustomPersistenceHandler handler = itr.next();
if (!filter.shouldUseHandler(handler.getClass().getName())) {
itr.remove();
}
}
}
}
Collections.sort(cloned, new Comparator<CustomPersistenceHandler>() {
@Override
public int compare(CustomPersistenceHandler o1, CustomPersistenceHandler o2) {
return new Integer(o1.getOrder()).compareTo(new Integer(o2.getOrder()));
}
});
return cloned;
}
@Override
public void setCustomPersistenceHandlers(List<CustomPersistenceHandler> customPersistenceHandlers) {
this.customPersistenceHandlers = customPersistenceHandlers;
}
public SecurityVerifier getAdminRemoteSecurityService() {
return adminRemoteSecurityService;
}
public void setAdminRemoteSecurityService(AdminSecurityServiceRemote adminRemoteSecurityService) {
this.adminRemoteSecurityService = adminRemoteSecurityService;
}
public List<CustomPersistenceHandlerFilter> getCustomPersistenceHandlerFilters() {
return customPersistenceHandlerFilters;
}
public void setCustomPersistenceHandlerFilters(List<CustomPersistenceHandlerFilter> customPersistenceHandlerFilters) {
this.customPersistenceHandlerFilters = customPersistenceHandlerFilters;
}
public PersistenceModule[] getModules() {
return modules;
}
public void setModules(PersistenceModule[] modules) {
this.modules = modules;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_persistence_PersistenceManagerImpl.java
|
919 |
final class LockResourceImpl implements DataSerializable, LockResource {
private Data key;
private String owner;
private long threadId;
private int lockCount;
private long expirationTime = -1;
private long acquireTime = -1L;
private boolean transactional;
private Map<String, ConditionInfo> conditions;
private List<ConditionKey> signalKeys;
private List<AwaitOperation> expiredAwaitOps;
private LockStoreImpl lockStore;
public LockResourceImpl() {
}
public LockResourceImpl(Data key, LockStoreImpl lockStore) {
this.key = key;
this.lockStore = lockStore;
}
@Override
public Data getKey() {
return key;
}
@Override
public boolean isLocked() {
return lockCount > 0;
}
@Override
public boolean isLockedBy(String owner, long threadId) {
return (this.threadId == threadId && owner != null && owner.equals(this.owner));
}
boolean lock(String owner, long threadId, long leaseTime) {
return lock(owner, threadId, leaseTime, false);
}
boolean lock(String owner, long threadId, long leaseTime, boolean transactional) {
if (lockCount == 0) {
this.owner = owner;
this.threadId = threadId;
lockCount++;
acquireTime = Clock.currentTimeMillis();
setExpirationTime(leaseTime);
this.transactional = transactional;
return true;
} else if (isLockedBy(owner, threadId)) {
lockCount++;
setExpirationTime(leaseTime);
this.transactional = transactional;
return true;
}
this.transactional = false;
return false;
}
boolean extendLeaseTime(String caller, long threadId, long leaseTime) {
if (!isLockedBy(caller, threadId)) {
return false;
}
if (expirationTime < Long.MAX_VALUE) {
setExpirationTime(expirationTime - Clock.currentTimeMillis() + leaseTime);
lockStore.scheduleEviction(key, leaseTime);
}
return true;
}
private void setExpirationTime(long leaseTime) {
if (leaseTime < 0) {
expirationTime = Long.MAX_VALUE;
} else {
expirationTime = Clock.currentTimeMillis() + leaseTime;
if (expirationTime < 0) {
expirationTime = Long.MAX_VALUE;
} else {
lockStore.scheduleEviction(key, leaseTime);
}
}
}
boolean unlock(String owner, long threadId) {
if (lockCount == 0) {
return false;
}
if (!isLockedBy(owner, threadId)) {
return false;
}
lockCount--;
if (lockCount == 0) {
clear();
}
return true;
}
boolean canAcquireLock(String caller, long threadId) {
return lockCount == 0 || getThreadId() == threadId && getOwner().equals(caller);
}
boolean addAwait(String conditionId, String caller, long threadId) {
if (conditions == null) {
conditions = new HashMap<String, ConditionInfo>(2);
}
ConditionInfo condition = conditions.get(conditionId);
if (condition == null) {
condition = new ConditionInfo(conditionId);
conditions.put(conditionId, condition);
}
return condition.addWaiter(caller, threadId);
}
boolean removeAwait(String conditionId, String caller, long threadId) {
if (conditions == null) {
return false;
}
ConditionInfo condition = conditions.get(conditionId);
if (condition == null) {
return false;
}
boolean ok = condition.removeWaiter(caller, threadId);
if (condition.getAwaitCount() == 0) {
conditions.remove(conditionId);
}
return ok;
}
boolean startAwaiting(String conditionId, String caller, long threadId) {
if (conditions == null) {
return false;
}
ConditionInfo condition = conditions.get(conditionId);
if (condition == null) {
return false;
}
return condition.startWaiter(caller, threadId);
}
int getAwaitCount(String conditionId) {
if (conditions == null) {
return 0;
}
ConditionInfo condition = conditions.get(conditionId);
if (condition == null) {
return 0;
} else {
return condition.getAwaitCount();
}
}
void registerSignalKey(ConditionKey conditionKey) {
if (signalKeys == null) {
signalKeys = new LinkedList<ConditionKey>();
}
signalKeys.add(conditionKey);
}
ConditionKey getSignalKey() {
List<ConditionKey> keys = signalKeys;
if (isNullOrEmpty(keys)) {
return null;
}
return keys.iterator().next();
}
void removeSignalKey(ConditionKey conditionKey) {
if (signalKeys != null) {
signalKeys.remove(conditionKey);
}
}
void registerExpiredAwaitOp(AwaitOperation awaitResponse) {
if (expiredAwaitOps == null) {
expiredAwaitOps = new LinkedList<AwaitOperation>();
}
expiredAwaitOps.add(awaitResponse);
}
AwaitOperation pollExpiredAwaitOp() {
List<AwaitOperation> ops = expiredAwaitOps;
if (isNullOrEmpty(ops)) {
return null;
}
Iterator<AwaitOperation> iterator = ops.iterator();
AwaitOperation awaitResponse = iterator.next();
iterator.remove();
return awaitResponse;
}
void clear() {
threadId = 0;
lockCount = 0;
owner = null;
expirationTime = 0;
acquireTime = -1L;
cancelEviction();
}
void cancelEviction() {
lockStore.cancelEviction(key);
}
boolean isRemovable() {
return !isLocked()
&& isNullOrEmpty(conditions)
&& isNullOrEmpty(expiredAwaitOps);
}
@Override
public String getOwner() {
return owner;
}
@Override
public boolean isTransactional() {
return transactional;
}
@Override
public long getThreadId() {
return threadId;
}
@Override
public int getLockCount() {
return lockCount;
}
@Override
public long getAcquireTime() {
return acquireTime;
}
@Override
public long getRemainingLeaseTime() {
long now = Clock.currentTimeMillis();
if (now >= expirationTime) {
return 0;
}
return expirationTime - now;
}
void setLockStore(LockStoreImpl lockStore) {
this.lockStore = lockStore;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
key.writeData(out);
out.writeUTF(owner);
out.writeLong(threadId);
out.writeInt(lockCount);
out.writeLong(expirationTime);
out.writeLong(acquireTime);
out.writeBoolean(transactional);
int conditionCount = getConditionCount();
out.writeInt(conditionCount);
if (conditionCount > 0) {
for (ConditionInfo condition : conditions.values()) {
condition.writeData(out);
}
}
int signalCount = getSignalCount();
out.writeInt(signalCount);
if (signalCount > 0) {
for (ConditionKey signalKey : signalKeys) {
out.writeUTF(signalKey.getObjectName());
out.writeUTF(signalKey.getConditionId());
}
}
int expiredAwaitOpsCount = getExpiredAwaitsOpsCount();
out.writeInt(expiredAwaitOpsCount);
if (expiredAwaitOpsCount > 0) {
for (AwaitOperation op : expiredAwaitOps) {
op.writeData(out);
}
}
}
private int getExpiredAwaitsOpsCount() {
return expiredAwaitOps == null ? 0 : expiredAwaitOps.size();
}
private int getSignalCount() {
return signalKeys == null ? 0 : signalKeys.size();
}
private int getConditionCount() {
return conditions == null ? 0 : conditions.size();
}
@Override
public void readData(ObjectDataInput in) throws IOException {
key = new Data();
key.readData(in);
owner = in.readUTF();
threadId = in.readLong();
lockCount = in.readInt();
expirationTime = in.readLong();
acquireTime = in.readLong();
transactional = in.readBoolean();
int len = in.readInt();
if (len > 0) {
conditions = new HashMap<String, ConditionInfo>(len);
for (int i = 0; i < len; i++) {
ConditionInfo condition = new ConditionInfo();
condition.readData(in);
conditions.put(condition.getConditionId(), condition);
}
}
len = in.readInt();
if (len > 0) {
signalKeys = new ArrayList<ConditionKey>(len);
for (int i = 0; i < len; i++) {
signalKeys.add(new ConditionKey(in.readUTF(), key, in.readUTF()));
}
}
len = in.readInt();
if (len > 0) {
expiredAwaitOps = new ArrayList<AwaitOperation>(len);
for (int i = 0; i < len; i++) {
AwaitOperation op = new AwaitOperation();
op.readData(in);
expiredAwaitOps.add(op);
}
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
LockResourceImpl that = (LockResourceImpl) o;
if (threadId != that.threadId) {
return false;
}
if (owner != null ? !owner.equals(that.owner) : that.owner != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = owner != null ? owner.hashCode() : 0;
result = 31 * result + (int) (threadId ^ (threadId >>> 32));
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("LockResource");
sb.append("{owner='").append(owner).append('\'');
sb.append(", threadId=").append(threadId);
sb.append(", lockCount=").append(lockCount);
sb.append(", acquireTime=").append(acquireTime);
sb.append(", expirationTime=").append(expirationTime);
sb.append('}');
return sb.toString();
}
private static boolean isNullOrEmpty(Collection c) {
return c == null || c.isEmpty();
}
private static boolean isNullOrEmpty(Map m) {
return m == null || m.isEmpty();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_concurrent_lock_LockResourceImpl.java
|
873 |
public class FulfillmentGroupOfferPotential {
protected Offer offer;
protected Money totalSavings = new Money(BankersRounding.zeroAmount());
protected int priority;
public Offer getOffer() {
return offer;
}
public void setOffer(Offer offer) {
this.offer = offer;
}
public Money getTotalSavings() {
return totalSavings;
}
public void setTotalSavings(Money totalSavings) {
this.totalSavings = totalSavings;
}
public int getPriority() {
return priority;
}
public void setPriority(int priority) {
this.priority = priority;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((offer == null) ? 0 : offer.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FulfillmentGroupOfferPotential other = (FulfillmentGroupOfferPotential) obj;
if (offer == null) {
if (other.offer != null) {
return false;
}
} else if (!offer.equals(other.offer)) {
return false;
}
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_discount_FulfillmentGroupOfferPotential.java
|
485 |
static final class Fields {
static final XContentBuilderString TOKENS = new XContentBuilderString("tokens");
static final XContentBuilderString TOKEN = new XContentBuilderString("token");
static final XContentBuilderString START_OFFSET = new XContentBuilderString("start_offset");
static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset");
static final XContentBuilderString TYPE = new XContentBuilderString("type");
static final XContentBuilderString POSITION = new XContentBuilderString("position");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeResponse.java
|
219 |
public abstract class OrientConsole extends OConsoleApplication {
public OrientConsole(String[] args) {
super(args);
}
@Override
protected void onException(Throwable e) {
Throwable current = e;
while (current != null) {
err.print("\nError: " + current.toString());
current = current.getCause();
}
}
@Override
protected void onBefore() {
printApplicationInfo();
}
protected void printApplicationInfo() {
}
@Override
protected void onAfter() {
out.println();
}
@Override
public void help() {
super.help();
}
protected String format(final String iValue, final int iMaxSize) {
if (iValue == null)
return null;
if (iValue.length() > iMaxSize)
return iValue.substring(0, iMaxSize - 3) + "...";
return iValue;
}
}
| 0true
|
tools_src_main_java_com_orientechnologies_orient_console_OrientConsole.java
|
3,463 |
public class ShardGetService extends AbstractIndexShardComponent {
private final ScriptService scriptService;
private final MapperService mapperService;
private final IndexFieldDataService fieldDataService;
private IndexShard indexShard;
private final MeanMetric existsMetric = new MeanMetric();
private final MeanMetric missingMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric();
@Inject
public ShardGetService(ShardId shardId, @IndexSettings Settings indexSettings, ScriptService scriptService,
MapperService mapperService, IndexFieldDataService fieldDataService) {
super(shardId, indexSettings);
this.scriptService = scriptService;
this.mapperService = mapperService;
this.fieldDataService = fieldDataService;
}
public GetStats stats() {
return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
}
// sadly, to overcome cyclic dep, we need to do this and inject it ourselves...
public ShardGetService setIndexShard(IndexShard indexShard) {
this.indexShard = indexShard;
return this;
}
public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext)
throws ElasticsearchException {
currentMetric.inc();
try {
long now = System.nanoTime();
GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext);
if (getResult.isExists()) {
existsMetric.inc(System.nanoTime() - now);
} else {
missingMetric.inc(System.nanoTime() - now);
}
return getResult;
} finally {
currentMetric.dec();
}
}
/**
* Returns {@link GetResult} based on the specified {@link Engine.GetResult} argument.
* This method basically loads specified fields for the associated document in the engineGetResult.
* This method load the fields from the Lucene index and not from transaction log and therefore isn't realtime.
* <p/>
* Note: Call <b>must</b> release engine searcher associated with engineGetResult!
*/
public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) {
if (!engineGetResult.exists()) {
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
currentMetric.inc();
try {
long now = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapper(type);
if (docMapper == null) {
missingMetric.inc(System.nanoTime() - now);
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, fields);
GetResult getResult = innerGetLoadFromStoredFields(type, id, fields, fetchSourceContext, engineGetResult, docMapper);
if (getResult.isExists()) {
existsMetric.inc(System.nanoTime() - now);
} else {
missingMetric.inc(System.nanoTime() - now); // This shouldn't happen...
}
return getResult;
} finally {
currentMetric.dec();
}
}
/**
* decides what needs to be done based on the request input and always returns a valid non-null FetchSourceContext
*/
protected FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceContext context, @Nullable String[] gFields) {
if (context != null) {
return context;
}
if (gFields == null) {
return FetchSourceContext.FETCH_SOURCE;
}
for (String field : gFields) {
if (SourceFieldMapper.NAME.equals(field)) {
return FetchSourceContext.FETCH_SOURCE;
}
}
return FetchSourceContext.DO_NOT_FETCH_SOURCE;
}
public GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) throws ElasticsearchException {
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);
boolean loadSource = (gFields != null && gFields.length > 0) || fetchSourceContext.fetchSource();
Engine.GetResult get = null;
if (type == null || type.equals("_all")) {
for (String typeX : mapperService.types()) {
get = indexShard.get(new Engine.Get(realtime, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(typeX, id)))
.loadSource(loadSource).version(version).versionType(versionType));
if (get.exists()) {
type = typeX;
break;
} else {
get.release();
}
}
if (get == null) {
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
if (!get.exists()) {
// no need to release here as well..., we release in the for loop for non exists
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
} else {
get = indexShard.get(new Engine.Get(realtime, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(type, id)))
.loadSource(loadSource).version(version).versionType(versionType));
if (!get.exists()) {
get.release();
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
}
DocumentMapper docMapper = mapperService.documentMapper(type);
if (docMapper == null) {
get.release();
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
try {
// break between having loaded it from translog (so we only have _source), and having a document to load
if (get.docIdAndVersion() != null) {
return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, docMapper);
} else {
Translog.Source source = get.source();
Map<String, GetField> fields = null;
SearchLookup searchLookup = null;
// we can only load scripts that can run against the source
if (gFields != null && gFields.length > 0) {
Map<String, Object> sourceAsMap = null;
for (String field : gFields) {
if (SourceFieldMapper.NAME.equals(field)) {
// dealt with when normalizing fetchSourceContext.
continue;
}
Object value = null;
if (field.equals(RoutingFieldMapper.NAME) && docMapper.routingFieldMapper().fieldType().stored()) {
value = source.routing;
} else if (field.equals(ParentFieldMapper.NAME) && docMapper.parentFieldMapper().active() && docMapper.parentFieldMapper().fieldType().stored()) {
value = source.parent;
} else if (field.equals(TimestampFieldMapper.NAME) && docMapper.timestampFieldMapper().fieldType().stored()) {
value = source.timestamp;
} else if (field.equals(TTLFieldMapper.NAME) && docMapper.TTLFieldMapper().fieldType().stored()) {
// Call value for search with timestamp + ttl here to display the live remaining ttl value and be consistent with the search result display
if (source.ttl > 0) {
value = docMapper.TTLFieldMapper().valueForSearch(source.timestamp + source.ttl);
}
} else if (field.equals(SizeFieldMapper.NAME) && docMapper.rootMapper(SizeFieldMapper.class).fieldType().stored()) {
value = source.source.length();
} else {
if (searchLookup == null) {
searchLookup = new SearchLookup(mapperService, fieldDataService, new String[]{type});
searchLookup.source().setNextSource(source.source);
}
FieldMapper<?> x = docMapper.mappers().smartNameFieldMapper(field);
if (x == null) {
if (docMapper.objectMappers().get(field) != null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field");
}
} else if (docMapper.sourceMapper().enabled() || x.fieldType().stored()) {
List<Object> values = searchLookup.source().extractRawValues(field);
if (!values.isEmpty()) {
for (int i = 0; i < values.size(); i++) {
values.set(i, x.valueForSearch(values.get(i)));
}
value = values;
}
}
}
if (value != null) {
if (fields == null) {
fields = newHashMapWithExpectedSize(2);
}
if (value instanceof List) {
fields.put(field, new GetField(field, (List) value));
} else {
fields.put(field, new GetField(field, ImmutableList.of(value)));
}
}
}
}
// deal with source, but only if it's enabled (we always have it from the translog)
BytesReference sourceToBeReturned = null;
SourceFieldMapper sourceFieldMapper = docMapper.sourceMapper();
if (fetchSourceContext.fetchSource() && sourceFieldMapper.enabled()) {
sourceToBeReturned = source.source;
// Cater for source excludes/includes at the cost of performance
// We must first apply the field mapper filtering to make sure we get correct results
// in the case that the fetchSourceContext white lists something that's not included by the field mapper
Map<String, Object> filteredSource = null;
XContentType sourceContentType = null;
if (sourceFieldMapper.includes().length > 0 || sourceFieldMapper.excludes().length > 0) {
// TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source.source, true);
sourceContentType = typeMapTuple.v1();
filteredSource = XContentMapValues.filter(typeMapTuple.v2(), sourceFieldMapper.includes(), sourceFieldMapper.excludes());
}
if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
if (filteredSource == null) {
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source.source, true);
sourceContentType = typeMapTuple.v1();
filteredSource = typeMapTuple.v2();
}
filteredSource = XContentMapValues.filter(filteredSource, fetchSourceContext.includes(), fetchSourceContext.excludes());
}
if (filteredSource != null) {
try {
sourceToBeReturned = XContentFactory.contentBuilder(sourceContentType).map(filteredSource).bytes();
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
}
}
}
return new GetResult(shardId.index().name(), type, id, get.version(), get.exists(), sourceToBeReturned, fields);
}
} finally {
get.release();
}
}
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, DocumentMapper docMapper) {
Map<String, GetField> fields = null;
BytesReference source = null;
Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext);
if (fieldVisitor != null) {
try {
docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor);
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e);
}
source = fieldVisitor.source();
if (!fieldVisitor.fields().isEmpty()) {
fieldVisitor.postProcess(docMapper);
fields = new HashMap<String, GetField>(fieldVisitor.fields().size());
for (Map.Entry<String, List<Object>> entry : fieldVisitor.fields().entrySet()) {
fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue()));
}
}
}
// now, go and do the script thingy if needed
if (gFields != null && gFields.length > 0) {
SearchLookup searchLookup = null;
for (String field : gFields) {
Object value = null;
FieldMappers x = docMapper.mappers().smartName(field);
if (x == null) {
if (docMapper.objectMappers().get(field) != null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field");
}
} else if (!x.mapper().fieldType().stored()) {
if (searchLookup == null) {
searchLookup = new SearchLookup(mapperService, fieldDataService, new String[]{type});
searchLookup.setNextReader(docIdAndVersion.context);
searchLookup.source().setNextSource(source);
searchLookup.setNextDocId(docIdAndVersion.docId);
}
List<Object> values = searchLookup.source().extractRawValues(field);
if (!values.isEmpty()) {
for (int i = 0; i < values.size(); i++) {
values.set(i, x.mapper().valueForSearch(values.get(i)));
}
value = values;
}
}
if (value != null) {
if (fields == null) {
fields = newHashMapWithExpectedSize(2);
}
if (value instanceof List) {
fields.put(field, new GetField(field, (List) value));
} else {
fields.put(field, new GetField(field, ImmutableList.of(value)));
}
}
}
}
if (!fetchSourceContext.fetchSource()) {
source = null;
} else if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
Map<String, Object> filteredSource;
XContentType sourceContentType = null;
// TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source, true);
sourceContentType = typeMapTuple.v1();
filteredSource = XContentMapValues.filter(typeMapTuple.v2(), fetchSourceContext.includes(), fetchSourceContext.excludes());
try {
source = XContentFactory.contentBuilder(sourceContentType).map(filteredSource).bytes();
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
}
}
return new GetResult(shardId.index().name(), type, id, get.version(), get.exists(), source, fields);
}
private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceContext fetchSourceContext) {
if (fields == null || fields.length == 0) {
return fetchSourceContext.fetchSource() ? new JustSourceFieldsVisitor() : null;
}
return new CustomFieldsVisitor(Sets.newHashSet(fields), fetchSourceContext.fetchSource());
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_get_ShardGetService.java
|
3,618 |
public class TokenCountFieldMapper extends IntegerFieldMapper {
public static final String CONTENT_TYPE = "token_count";
public static class Defaults extends IntegerFieldMapper.Defaults {
}
public static class Builder extends NumberFieldMapper.Builder<Builder, TokenCountFieldMapper> {
private Integer nullValue = Defaults.NULL_VALUE;
private NamedAnalyzer analyzer;
public Builder(String name) {
super(name, new FieldType(Defaults.FIELD_TYPE));
builder = this;
}
public Builder nullValue(int nullValue) {
this.nullValue = nullValue;
return this;
}
public Builder analyzer(NamedAnalyzer analyzer) {
this.analyzer = analyzer;
return this;
}
public NamedAnalyzer analyzer() {
return analyzer;
}
@Override
public TokenCountFieldMapper build(BuilderContext context) {
fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);
TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues, nullValue,
ignoreMalformed(context), coerce(context), postingsProvider, docValuesProvider, similarity, normsLoading, fieldDataSettings, context.indexSettings(),
analyzer, multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll);
return fieldMapper;
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
@SuppressWarnings("unchecked")
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
TokenCountFieldMapper.Builder builder = tokenCountField(name);
parseNumberField(builder, name, node, parserContext);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String propName = Strings.toUnderscoreCase(entry.getKey());
Object propNode = entry.getValue();
if (propName.equals("null_value")) {
builder.nullValue(nodeIntegerValue(propNode));
} else if (propName.equals("analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
builder.analyzer(analyzer);
}
}
if (builder.analyzer() == null) {
throw new MapperParsingException("Analyzer must be set for field [" + name + "] but wasn't.");
}
return builder;
}
}
private NamedAnalyzer analyzer;
protected TokenCountFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues, Integer nullValue,
Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
SimilarityProvider similarity, Loading normsLoading, Settings fieldDataSettings, Settings indexSettings, NamedAnalyzer analyzer,
MultiFields multiFields, CopyTo copyTo) {
super(names, precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed, coerce, postingsProvider, docValuesProvider,
similarity, normsLoading, fieldDataSettings, indexSettings, multiFields, copyTo);
this.analyzer = analyzer;
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
ValueAndBoost valueAndBoost = StringFieldMapper.parseCreateFieldForString(context, null /* Out null value is an int so we convert*/, boost);
if (valueAndBoost.value() == null && nullValue() == null) {
return;
}
if (fieldType.indexed() || fieldType.stored() || hasDocValues()) {
int count;
if (valueAndBoost.value() == null) {
count = nullValue();
} else {
count = countPositions(analyzer.analyzer().tokenStream(name(), valueAndBoost.value()));
}
addIntegerFields(context, fields, count, valueAndBoost.boost());
}
if (fields.isEmpty()) {
context.ignoredValue(names.indexName(), valueAndBoost.value());
}
}
/**
* Count position increments in a token stream. Package private for testing.
* @param tokenStream token stream to count
* @return number of position increments in a token stream
* @throws IOException if tokenStream throws it
*/
static int countPositions(TokenStream tokenStream) throws IOException {
try {
int count = 0;
PositionIncrementAttribute position = tokenStream.addAttribute(PositionIncrementAttribute.class);
tokenStream.reset();
while (tokenStream.incrementToken()) {
count += position.getPositionIncrement();
}
tokenStream.end();
count += position.getPositionIncrement();
return count;
} finally {
tokenStream.close();
}
}
/**
* Name of analyzer.
* @return name of analyzer
*/
public String analyzer() {
return analyzer.name();
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
super.merge(mergeWith, mergeContext);
if (!this.getClass().equals(mergeWith.getClass())) {
return;
}
if (!mergeContext.mergeFlags().simulate()) {
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
}
}
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params);
builder.field("analyzer", analyzer());
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_core_TokenCountFieldMapper.java
|
3,765 |
public class LogDocMergePolicyProvider extends AbstractMergePolicyProvider<LogDocMergePolicy> {
private final IndexSettingsService indexSettingsService;
private volatile int minMergeDocs;
private volatile int maxMergeDocs;
private volatile int mergeFactor;
private final boolean calibrateSizeByDeletes;
private boolean asyncMerge;
private final Set<CustomLogDocMergePolicy> policies = new CopyOnWriteArraySet<CustomLogDocMergePolicy>();
private final ApplySettings applySettings = new ApplySettings();
@Inject
public LogDocMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
super(store);
Preconditions.checkNotNull(store, "Store must be provided to merge policy");
this.indexSettingsService = indexSettingsService;
this.minMergeDocs = componentSettings.getAsInt("min_merge_docs", LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS);
this.maxMergeDocs = componentSettings.getAsInt("max_merge_docs", LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS);
this.mergeFactor = componentSettings.getAsInt("merge_factor", LogDocMergePolicy.DEFAULT_MERGE_FACTOR);
this.calibrateSizeByDeletes = componentSettings.getAsBoolean("calibrate_size_by_deletes", true);
this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
logger.debug("using [log_doc] merge policy with merge_factor[{}], min_merge_docs[{}], max_merge_docs[{}], calibrate_size_by_deletes[{}], async_merge[{}]",
mergeFactor, minMergeDocs, maxMergeDocs, calibrateSizeByDeletes, asyncMerge);
indexSettingsService.addListener(applySettings);
}
@Override
public void close() throws ElasticsearchException {
indexSettingsService.removeListener(applySettings);
}
@Override
public LogDocMergePolicy newMergePolicy() {
CustomLogDocMergePolicy mergePolicy;
if (asyncMerge) {
mergePolicy = new EnableMergeLogDocMergePolicy(this);
} else {
mergePolicy = new CustomLogDocMergePolicy(this);
}
mergePolicy.setMinMergeDocs(minMergeDocs);
mergePolicy.setMaxMergeDocs(maxMergeDocs);
mergePolicy.setMergeFactor(mergeFactor);
mergePolicy.setCalibrateSizeByDeletes(calibrateSizeByDeletes);
mergePolicy.setNoCFSRatio(noCFSRatio);
policies.add(mergePolicy);
return mergePolicy;
}
public static final String INDEX_MERGE_POLICY_MIN_MERGE_DOCS = "index.merge.policy.min_merge_docs";
public static final String INDEX_MERGE_POLICY_MAX_MERGE_DOCS = "index.merge.policy.max_merge_docs";
public static final String INDEX_MERGE_POLICY_MERGE_FACTOR = "index.merge.policy.merge_factor";
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int minMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MIN_MERGE_DOCS, LogDocMergePolicyProvider.this.minMergeDocs);
if (minMergeDocs != LogDocMergePolicyProvider.this.minMergeDocs) {
logger.info("updating min_merge_docs from [{}] to [{}]", LogDocMergePolicyProvider.this.minMergeDocs, minMergeDocs);
LogDocMergePolicyProvider.this.minMergeDocs = minMergeDocs;
for (CustomLogDocMergePolicy policy : policies) {
policy.setMinMergeDocs(minMergeDocs);
}
}
int maxMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogDocMergePolicyProvider.this.maxMergeDocs);
if (maxMergeDocs != LogDocMergePolicyProvider.this.maxMergeDocs) {
logger.info("updating max_merge_docs from [{}] to [{}]", LogDocMergePolicyProvider.this.maxMergeDocs, maxMergeDocs);
LogDocMergePolicyProvider.this.maxMergeDocs = maxMergeDocs;
for (CustomLogDocMergePolicy policy : policies) {
policy.setMaxMergeDocs(maxMergeDocs);
}
}
int mergeFactor = settings.getAsInt(INDEX_MERGE_POLICY_MERGE_FACTOR, LogDocMergePolicyProvider.this.mergeFactor);
if (mergeFactor != LogDocMergePolicyProvider.this.mergeFactor) {
logger.info("updating merge_factor from [{}] to [{}]", LogDocMergePolicyProvider.this.mergeFactor, mergeFactor);
LogDocMergePolicyProvider.this.mergeFactor = mergeFactor;
for (CustomLogDocMergePolicy policy : policies) {
policy.setMergeFactor(mergeFactor);
}
}
final double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(LogDocMergePolicyProvider.this.noCFSRatio)));
final boolean compoundFormat = noCFSRatio != 0.0;
if (noCFSRatio != LogDocMergePolicyProvider.this.noCFSRatio) {
logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(LogDocMergePolicyProvider.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
LogDocMergePolicyProvider.this.noCFSRatio = noCFSRatio;
for (CustomLogDocMergePolicy policy : policies) {
policy.setNoCFSRatio(noCFSRatio);
}
}
}
}
public static class CustomLogDocMergePolicy extends LogDocMergePolicy {
private final LogDocMergePolicyProvider provider;
public CustomLogDocMergePolicy(LogDocMergePolicyProvider provider) {
super();
this.provider = provider;
}
@Override
public void close() {
super.close();
provider.policies.remove(this);
}
}
public static class EnableMergeLogDocMergePolicy extends CustomLogDocMergePolicy {
public EnableMergeLogDocMergePolicy(LogDocMergePolicyProvider provider) {
super(provider);
}
@Override
public MergeSpecification findMerges(MergeTrigger trigger, SegmentInfos infos) throws IOException {
// we don't enable merges while indexing documents, we do them in the background
if (trigger == MergeTrigger.SEGMENT_FLUSH) {
return null;
}
return super.findMerges(trigger, infos);
}
@Override
public MergePolicy clone() {
// Lucene IW makes a clone internally but since we hold on to this instance
// the clone will just be the identity.
return this;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_merge_policy_LogDocMergePolicyProvider.java
|
189 |
public interface RequestDTO {
/**
* @return returns the request not including the protocol, domain, or query string
*/
public String getRequestURI();
/**
* @return Returns the URL and parameters.
*/
public String getFullUrLWithQueryString();
/**
* @return true if this request came in through HTTPS
*/
public Boolean isSecure();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_RequestDTO.java
|
242 |
public class XAnalyzingSuggester extends Lookup {
/**
* FST<Weight,Surface>:
* input is the analyzed form, with a null byte between terms
* weights are encoded as costs: (Integer.MAX_VALUE-weight)
* surface is the original, unanalyzed form.
*/
private FST<Pair<Long,BytesRef>> fst = null;
/**
* Analyzer that will be used for analyzing suggestions at
* index time.
*/
private final Analyzer indexAnalyzer;
/**
* Analyzer that will be used for analyzing suggestions at
* query time.
*/
private final Analyzer queryAnalyzer;
/**
* True if exact match suggestions should always be returned first.
*/
private final boolean exactFirst;
/**
* True if separator between tokens should be preserved.
*/
private final boolean preserveSep;
/** Include this flag in the options parameter to {@link
* #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)} to always
* return the exact match first, regardless of score. This
* has no performance impact but could result in
* low-quality suggestions. */
public static final int EXACT_FIRST = 1;
/** Include this flag in the options parameter to {@link
* #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)} to preserve
* token separators when matching. */
public static final int PRESERVE_SEP = 2;
/** Represents the separation between tokens, if
* PRESERVE_SEP was specified */
public static final int SEP_LABEL = '\u001F';
/** Marks end of the analyzed input and start of dedup
* byte. */
public static final int END_BYTE = 0x0;
/** Maximum number of dup surface forms (different surface
* forms for the same analyzed form). */
private final int maxSurfaceFormsPerAnalyzedForm;
/** Maximum graph paths to index for a single analyzed
* surface form. This only matters if your analyzer
* makes lots of alternate paths (e.g. contains
* SynonymFilter). */
private final int maxGraphExpansions;
/** Highest number of analyzed paths we saw for any single
* input surface form. For analyzers that never create
* graphs this will always be 1. */
private int maxAnalyzedPathsForOneInput;
private boolean hasPayloads;
private final int sepLabel;
private final int payloadSep;
private final int endByte;
private final int holeCharacter;
public static final int PAYLOAD_SEP = '\u001F';
public static final int HOLE_CHARACTER = '\u001E';
/** Whether position holes should appear in the automaton. */
private boolean preservePositionIncrements;
/**
* Calls {@link #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)
* AnalyzingSuggester(analyzer, analyzer, EXACT_FIRST |
* PRESERVE_SEP, 256, -1)}
*/
public XAnalyzingSuggester(Analyzer analyzer) {
this(analyzer, analyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true, null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
}
/**
* Calls {@link #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)
* AnalyzingSuggester(indexAnalyzer, queryAnalyzer, EXACT_FIRST |
* PRESERVE_SEP, 256, -1)}
*/
public XAnalyzingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) {
this(indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true, null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
}
/**
* Creates a new suggester.
*
* @param indexAnalyzer Analyzer that will be used for
* analyzing suggestions while building the index.
* @param queryAnalyzer Analyzer that will be used for
* analyzing query text during lookup
* @param options see {@link #EXACT_FIRST}, {@link #PRESERVE_SEP}
* @param maxSurfaceFormsPerAnalyzedForm Maximum number of
* surface forms to keep for a single analyzed form.
* When there are too many surface forms we discard the
* lowest weighted ones.
* @param maxGraphExpansions Maximum number of graph paths
* to expand from the analyzed form. Set this to -1 for
* no limit.
*/
public XAnalyzingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer, int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
boolean preservePositionIncrements, FST<Pair<Long, BytesRef>> fst, boolean hasPayloads, int maxAnalyzedPathsForOneInput,
int sepLabel, int payloadSep, int endByte, int holeCharacter) {
// SIMON EDIT: I added fst, hasPayloads and maxAnalyzedPathsForOneInput
this.indexAnalyzer = indexAnalyzer;
this.queryAnalyzer = queryAnalyzer;
this.fst = fst;
this.hasPayloads = hasPayloads;
if ((options & ~(EXACT_FIRST | PRESERVE_SEP)) != 0) {
throw new IllegalArgumentException("options should only contain EXACT_FIRST and PRESERVE_SEP; got " + options);
}
this.exactFirst = (options & EXACT_FIRST) != 0;
this.preserveSep = (options & PRESERVE_SEP) != 0;
// NOTE: this is just an implementation limitation; if
// somehow this is a problem we could fix it by using
// more than one byte to disambiguate ... but 256 seems
// like it should be way more then enough.
if (maxSurfaceFormsPerAnalyzedForm <= 0 || maxSurfaceFormsPerAnalyzedForm > 256) {
throw new IllegalArgumentException("maxSurfaceFormsPerAnalyzedForm must be > 0 and < 256 (got: " + maxSurfaceFormsPerAnalyzedForm + ")");
}
this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
if (maxGraphExpansions < 1 && maxGraphExpansions != -1) {
throw new IllegalArgumentException("maxGraphExpansions must -1 (no limit) or > 0 (got: " + maxGraphExpansions + ")");
}
this.maxGraphExpansions = maxGraphExpansions;
this.maxAnalyzedPathsForOneInput = maxAnalyzedPathsForOneInput;
this.preservePositionIncrements = preservePositionIncrements;
this.sepLabel = sepLabel;
this.payloadSep = payloadSep;
this.endByte = endByte;
this.holeCharacter = holeCharacter;
}
/** Returns byte size of the underlying FST. */
public long sizeInBytes() {
return fst == null ? 0 : fst.sizeInBytes();
}
private static void copyDestTransitions(State from, State to, List<Transition> transitions) {
if (to.isAccept()) {
from.setAccept(true);
}
for(Transition t : to.getTransitions()) {
transitions.add(t);
}
}
// Replaces SEP with epsilon or remaps them if
// we were asked to preserve them:
private static void replaceSep(Automaton a, boolean preserveSep, int replaceSep) {
State[] states = a.getNumberedStates();
// Go in reverse topo sort so we know we only have to
// make one pass:
for(int stateNumber=states.length-1;stateNumber >=0;stateNumber--) {
final State state = states[stateNumber];
List<Transition> newTransitions = new ArrayList<Transition>();
for(Transition t : state.getTransitions()) {
assert t.getMin() == t.getMax();
if (t.getMin() == TokenStreamToAutomaton.POS_SEP) {
if (preserveSep) {
// Remap to SEP_LABEL:
newTransitions.add(new Transition(replaceSep, t.getDest()));
} else {
copyDestTransitions(state, t.getDest(), newTransitions);
a.setDeterministic(false);
}
} else if (t.getMin() == TokenStreamToAutomaton.HOLE) {
// Just remove the hole: there will then be two
// SEP tokens next to each other, which will only
// match another hole at search time. Note that
// it will also match an empty-string token ... if
// that's somehow a problem we can always map HOLE
// to a dedicated byte (and escape it in the
// input).
copyDestTransitions(state, t.getDest(), newTransitions);
a.setDeterministic(false);
} else {
newTransitions.add(t);
}
}
state.setTransitions(newTransitions.toArray(new Transition[newTransitions.size()]));
}
}
protected Automaton convertAutomaton(Automaton a) {
return a;
}
/** Just escapes the 0xff byte (which we still for SEP). */
private static final class EscapingTokenStreamToAutomaton extends TokenStreamToAutomaton {
final BytesRef spare = new BytesRef();
private char sepLabel;
public EscapingTokenStreamToAutomaton(char sepLabel) {
this.sepLabel = sepLabel;
}
@Override
protected BytesRef changeToken(BytesRef in) {
int upto = 0;
for(int i=0;i<in.length;i++) {
byte b = in.bytes[in.offset+i];
if (b == (byte) sepLabel) {
if (spare.bytes.length == upto) {
spare.grow(upto+2);
}
spare.bytes[upto++] = (byte) sepLabel;
spare.bytes[upto++] = b;
} else {
if (spare.bytes.length == upto) {
spare.grow(upto+1);
}
spare.bytes[upto++] = b;
}
}
spare.offset = 0;
spare.length = upto;
return spare;
}
}
public TokenStreamToAutomaton getTokenStreamToAutomaton() {
final TokenStreamToAutomaton tsta;
if (preserveSep) {
tsta = new EscapingTokenStreamToAutomaton((char) sepLabel);
} else {
// When we're not preserving sep, we don't steal 0xff
// byte, so we don't need to do any escaping:
tsta = new TokenStreamToAutomaton();
}
tsta.setPreservePositionIncrements(preservePositionIncrements);
return tsta;
}
private static class AnalyzingComparator implements Comparator<BytesRef> {
private final boolean hasPayloads;
public AnalyzingComparator(boolean hasPayloads) {
this.hasPayloads = hasPayloads;
}
private final ByteArrayDataInput readerA = new ByteArrayDataInput();
private final ByteArrayDataInput readerB = new ByteArrayDataInput();
private final BytesRef scratchA = new BytesRef();
private final BytesRef scratchB = new BytesRef();
@Override
public int compare(BytesRef a, BytesRef b) {
// First by analyzed form:
readerA.reset(a.bytes, a.offset, a.length);
scratchA.length = readerA.readShort();
scratchA.bytes = a.bytes;
scratchA.offset = readerA.getPosition();
readerB.reset(b.bytes, b.offset, b.length);
scratchB.bytes = b.bytes;
scratchB.length = readerB.readShort();
scratchB.offset = readerB.getPosition();
int cmp = scratchA.compareTo(scratchB);
if (cmp != 0) {
return cmp;
}
readerA.skipBytes(scratchA.length);
readerB.skipBytes(scratchB.length);
// Next by cost:
long aCost = readerA.readInt();
long bCost = readerB.readInt();
if (aCost < bCost) {
return -1;
} else if (aCost > bCost) {
return 1;
}
// Finally by surface form:
if (hasPayloads) {
scratchA.length = readerA.readShort();
scratchA.offset = readerA.getPosition();
scratchB.length = readerB.readShort();
scratchB.offset = readerB.getPosition();
} else {
scratchA.offset = readerA.getPosition();
scratchA.length = a.length - scratchA.offset;
scratchB.offset = readerB.getPosition();
scratchB.length = b.length - scratchB.offset;
}
return scratchA.compareTo(scratchB);
}
}
@Override
public void build(InputIterator iterator) throws IOException {
String prefix = getClass().getSimpleName();
File directory = Sort.defaultTempDir();
File tempInput = File.createTempFile(prefix, ".input", directory);
File tempSorted = File.createTempFile(prefix, ".sorted", directory);
hasPayloads = iterator.hasPayloads();
Sort.ByteSequencesWriter writer = new Sort.ByteSequencesWriter(tempInput);
Sort.ByteSequencesReader reader = null;
BytesRef scratch = new BytesRef();
TokenStreamToAutomaton ts2a = getTokenStreamToAutomaton();
boolean success = false;
byte buffer[] = new byte[8];
try {
ByteArrayDataOutput output = new ByteArrayDataOutput(buffer);
BytesRef surfaceForm;
while ((surfaceForm = iterator.next()) != null) {
Set<IntsRef> paths = toFiniteStrings(surfaceForm, ts2a);
maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, paths.size());
for (IntsRef path : paths) {
Util.toBytesRef(path, scratch);
// length of the analyzed text (FST input)
if (scratch.length > Short.MAX_VALUE-2) {
throw new IllegalArgumentException("cannot handle analyzed forms > " + (Short.MAX_VALUE-2) + " in length (got " + scratch.length + ")");
}
short analyzedLength = (short) scratch.length;
// compute the required length:
// analyzed sequence + weight (4) + surface + analyzedLength (short)
int requiredLength = analyzedLength + 4 + surfaceForm.length + 2;
BytesRef payload;
if (hasPayloads) {
if (surfaceForm.length > (Short.MAX_VALUE-2)) {
throw new IllegalArgumentException("cannot handle surface form > " + (Short.MAX_VALUE-2) + " in length (got " + surfaceForm.length + ")");
}
payload = iterator.payload();
// payload + surfaceLength (short)
requiredLength += payload.length + 2;
} else {
payload = null;
}
buffer = ArrayUtil.grow(buffer, requiredLength);
output.reset(buffer);
output.writeShort(analyzedLength);
output.writeBytes(scratch.bytes, scratch.offset, scratch.length);
output.writeInt(encodeWeight(iterator.weight()));
if (hasPayloads) {
for(int i=0;i<surfaceForm.length;i++) {
if (surfaceForm.bytes[i] == payloadSep) {
throw new IllegalArgumentException("surface form cannot contain unit separator character U+001F; this character is reserved");
}
}
output.writeShort((short) surfaceForm.length);
output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
output.writeBytes(payload.bytes, payload.offset, payload.length);
} else {
output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
}
assert output.getPosition() == requiredLength: output.getPosition() + " vs " + requiredLength;
writer.write(buffer, 0, output.getPosition());
}
}
writer.close();
// Sort all input/output pairs (required by FST.Builder):
new Sort(new AnalyzingComparator(hasPayloads)).sort(tempInput, tempSorted);
// Free disk space:
tempInput.delete();
reader = new Sort.ByteSequencesReader(tempSorted);
PairOutputs<Long,BytesRef> outputs = new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
Builder<Pair<Long,BytesRef>> builder = new Builder<Pair<Long,BytesRef>>(FST.INPUT_TYPE.BYTE1, outputs);
// Build FST:
BytesRef previousAnalyzed = null;
BytesRef analyzed = new BytesRef();
BytesRef surface = new BytesRef();
IntsRef scratchInts = new IntsRef();
ByteArrayDataInput input = new ByteArrayDataInput();
// Used to remove duplicate surface forms (but we
// still index the hightest-weight one). We clear
// this when we see a new analyzed form, so it cannot
// grow unbounded (at most 256 entries):
Set<BytesRef> seenSurfaceForms = new HashSet<BytesRef>();
int dedup = 0;
while (reader.read(scratch)) {
input.reset(scratch.bytes, scratch.offset, scratch.length);
short analyzedLength = input.readShort();
analyzed.grow(analyzedLength+2);
input.readBytes(analyzed.bytes, 0, analyzedLength);
analyzed.length = analyzedLength;
long cost = input.readInt();
surface.bytes = scratch.bytes;
if (hasPayloads) {
surface.length = input.readShort();
surface.offset = input.getPosition();
} else {
surface.offset = input.getPosition();
surface.length = scratch.length - surface.offset;
}
if (previousAnalyzed == null) {
previousAnalyzed = new BytesRef();
previousAnalyzed.copyBytes(analyzed);
seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
} else if (analyzed.equals(previousAnalyzed)) {
dedup++;
if (dedup >= maxSurfaceFormsPerAnalyzedForm) {
// More than maxSurfaceFormsPerAnalyzedForm
// dups: skip the rest:
continue;
}
if (seenSurfaceForms.contains(surface)) {
continue;
}
seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
} else {
dedup = 0;
previousAnalyzed.copyBytes(analyzed);
seenSurfaceForms.clear();
seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
}
// TODO: I think we can avoid the extra 2 bytes when
// there is no dup (dedup==0), but we'd have to fix
// the exactFirst logic ... which would be sort of
// hairy because we'd need to special case the two
// (dup/not dup)...
// NOTE: must be byte 0 so we sort before whatever
// is next
analyzed.bytes[analyzed.offset+analyzed.length] = 0;
analyzed.bytes[analyzed.offset+analyzed.length+1] = (byte) dedup;
analyzed.length += 2;
Util.toIntsRef(analyzed, scratchInts);
//System.out.println("ADD: " + scratchInts + " -> " + cost + ": " + surface.utf8ToString());
if (!hasPayloads) {
builder.add(scratchInts, outputs.newPair(cost, BytesRef.deepCopyOf(surface)));
} else {
int payloadOffset = input.getPosition() + surface.length;
int payloadLength = scratch.length - payloadOffset;
BytesRef br = new BytesRef(surface.length + 1 + payloadLength);
System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
br.bytes[surface.length] = (byte) payloadSep;
System.arraycopy(scratch.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength);
br.length = br.bytes.length;
builder.add(scratchInts, outputs.newPair(cost, br));
}
}
fst = builder.finish();
//PrintWriter pw = new PrintWriter("/tmp/out.dot");
//Util.toDot(fst, pw, true, true);
//pw.close();
success = true;
} finally {
if (success) {
IOUtils.close(reader, writer);
} else {
IOUtils.closeWhileHandlingException(reader, writer);
}
tempInput.delete();
tempSorted.delete();
}
}
@Override
public boolean store(OutputStream output) throws IOException {
DataOutput dataOut = new OutputStreamDataOutput(output);
try {
if (fst == null) {
return false;
}
fst.save(dataOut);
dataOut.writeVInt(maxAnalyzedPathsForOneInput);
dataOut.writeByte((byte) (hasPayloads ? 1 : 0));
} finally {
IOUtils.close(output);
}
return true;
}
@Override
public boolean load(InputStream input) throws IOException {
DataInput dataIn = new InputStreamDataInput(input);
try {
this.fst = new FST<Pair<Long,BytesRef>>(dataIn, new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
maxAnalyzedPathsForOneInput = dataIn.readVInt();
hasPayloads = dataIn.readByte() == 1;
} finally {
IOUtils.close(input);
}
return true;
}
private LookupResult getLookupResult(Long output1, BytesRef output2, CharsRef spare) {
LookupResult result;
if (hasPayloads) {
int sepIndex = -1;
for(int i=0;i<output2.length;i++) {
if (output2.bytes[output2.offset+i] == payloadSep) {
sepIndex = i;
break;
}
}
assert sepIndex != -1;
spare.grow(sepIndex);
final int payloadLen = output2.length - sepIndex - 1;
UnicodeUtil.UTF8toUTF16(output2.bytes, output2.offset, sepIndex, spare);
BytesRef payload = new BytesRef(payloadLen);
System.arraycopy(output2.bytes, sepIndex+1, payload.bytes, 0, payloadLen);
payload.length = payloadLen;
result = new LookupResult(spare.toString(), decodeWeight(output1), payload);
} else {
spare.grow(output2.length);
UnicodeUtil.UTF8toUTF16(output2, spare);
result = new LookupResult(spare.toString(), decodeWeight(output1));
}
return result;
}
private boolean sameSurfaceForm(BytesRef key, BytesRef output2) {
if (hasPayloads) {
// output2 has at least PAYLOAD_SEP byte:
if (key.length >= output2.length) {
return false;
}
for(int i=0;i<key.length;i++) {
if (key.bytes[key.offset+i] != output2.bytes[output2.offset+i]) {
return false;
}
}
return output2.bytes[output2.offset + key.length] == payloadSep;
} else {
return key.bytesEquals(output2);
}
}
@Override
public List<LookupResult> lookup(final CharSequence key, boolean onlyMorePopular, int num) {
assert num > 0;
if (onlyMorePopular) {
throw new IllegalArgumentException("this suggester only works with onlyMorePopular=false");
}
if (fst == null) {
return Collections.emptyList();
}
//System.out.println("lookup key=" + key + " num=" + num);
for (int i = 0; i < key.length(); i++) {
if (key.charAt(i) == holeCharacter) {
throw new IllegalArgumentException("lookup key cannot contain HOLE character U+001E; this character is reserved");
}
if (key.charAt(i) == sepLabel) {
throw new IllegalArgumentException("lookup key cannot contain unit separator character U+001F; this character is reserved");
}
}
final BytesRef utf8Key = new BytesRef(key);
try {
Automaton lookupAutomaton = toLookupAutomaton(key);
final CharsRef spare = new CharsRef();
//System.out.println(" now intersect exactFirst=" + exactFirst);
// Intersect automaton w/ suggest wFST and get all
// prefix starting nodes & their outputs:
//final PathIntersector intersector = getPathIntersector(lookupAutomaton, fst);
//System.out.println(" prefixPaths: " + prefixPaths.size());
BytesReader bytesReader = fst.getBytesReader();
FST.Arc<Pair<Long,BytesRef>> scratchArc = new FST.Arc<Pair<Long,BytesRef>>();
final List<LookupResult> results = new ArrayList<LookupResult>();
List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths = FSTUtil.intersectPrefixPaths(convertAutomaton(lookupAutomaton), fst);
if (exactFirst) {
int count = 0;
for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
if (fst.findTargetArc(endByte, path.fstNode, scratchArc, bytesReader) != null) {
// This node has END_BYTE arc leaving, meaning it's an
// "exact" match:
count++;
}
}
// Searcher just to find the single exact only
// match, if present:
Util.TopNSearcher<Pair<Long,BytesRef>> searcher;
searcher = new Util.TopNSearcher<Pair<Long,BytesRef>>(fst, count * maxSurfaceFormsPerAnalyzedForm, count * maxSurfaceFormsPerAnalyzedForm, weightComparator);
// NOTE: we could almost get away with only using
// the first start node. The only catch is if
// maxSurfaceFormsPerAnalyzedForm had kicked in and
// pruned our exact match from one of these nodes
// ...:
for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
if (fst.findTargetArc(endByte, path.fstNode, scratchArc, bytesReader) != null) {
// This node has END_BYTE arc leaving, meaning it's an
// "exact" match:
searcher.addStartPaths(scratchArc, fst.outputs.add(path.output, scratchArc.output), false, path.input);
}
}
MinResult<Pair<Long,BytesRef>> completions[] = searcher.search();
// NOTE: this is rather inefficient: we enumerate
// every matching "exactly the same analyzed form"
// path, and then do linear scan to see if one of
// these exactly matches the input. It should be
// possible (though hairy) to do something similar
// to getByOutput, since the surface form is encoded
// into the FST output, so we more efficiently hone
// in on the exact surface-form match. Still, I
// suspect very little time is spent in this linear
// seach: it's bounded by how many prefix start
// nodes we have and the
// maxSurfaceFormsPerAnalyzedForm:
for(MinResult<Pair<Long,BytesRef>> completion : completions) {
BytesRef output2 = completion.output.output2;
if (sameSurfaceForm(utf8Key, output2)) {
results.add(getLookupResult(completion.output.output1, output2, spare));
break;
}
}
if (results.size() == num) {
// That was quick:
return results;
}
}
Util.TopNSearcher<Pair<Long,BytesRef>> searcher;
searcher = new Util.TopNSearcher<Pair<Long,BytesRef>>(fst,
num - results.size(),
num * maxAnalyzedPathsForOneInput,
weightComparator) {
private final Set<BytesRef> seen = new HashSet<BytesRef>();
@Override
protected boolean acceptResult(IntsRef input, Pair<Long,BytesRef> output) {
// Dedup: when the input analyzes to a graph we
// can get duplicate surface forms:
if (seen.contains(output.output2)) {
return false;
}
seen.add(output.output2);
if (!exactFirst) {
return true;
} else {
// In exactFirst mode, don't accept any paths
// matching the surface form since that will
// create duplicate results:
if (sameSurfaceForm(utf8Key, output.output2)) {
// We found exact match, which means we should
// have already found it in the first search:
assert results.size() == 1;
return false;
} else {
return true;
}
}
}
};
prefixPaths = getFullPrefixPaths(prefixPaths, lookupAutomaton, fst);
for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
searcher.addStartPaths(path.fstNode, path.output, true, path.input);
}
MinResult<Pair<Long,BytesRef>> completions[] = searcher.search();
for(MinResult<Pair<Long,BytesRef>> completion : completions) {
LookupResult result = getLookupResult(completion.output.output1, completion.output.output2, spare);
// TODO: for fuzzy case would be nice to return
// how many edits were required
//System.out.println(" result=" + result);
results.add(result);
if (results.size() == num) {
// In the exactFirst=true case the search may
// produce one extra path
break;
}
}
return results;
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
}
/** Returns all completion paths to initialize the search. */
protected List<FSTUtil.Path<Pair<Long,BytesRef>>> getFullPrefixPaths(List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths,
Automaton lookupAutomaton,
FST<Pair<Long,BytesRef>> fst)
throws IOException {
return prefixPaths;
}
public final Set<IntsRef> toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException {
// Analyze surface form:
TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString());
return toFiniteStrings(ts2a, ts);
}
public final Set<IntsRef> toFiniteStrings(final TokenStreamToAutomaton ts2a, TokenStream ts) throws IOException {
// Analyze surface form:
// Create corresponding automaton: labels are bytes
// from each analyzed token, with byte 0 used as
// separator between tokens:
Automaton automaton = ts2a.toAutomaton(ts);
ts.close();
replaceSep(automaton, preserveSep, sepLabel);
assert SpecialOperations.isFinite(automaton);
// Get all paths from the automaton (there can be
// more than one path, eg if the analyzer created a
// graph using SynFilter or WDF):
// TODO: we could walk & add simultaneously, so we
// don't have to alloc [possibly biggish]
// intermediate HashSet in RAM:
return SpecialOperations.getFiniteStrings(automaton, maxGraphExpansions);
}
final Automaton toLookupAutomaton(final CharSequence key) throws IOException {
// Turn tokenstream into automaton:
TokenStream ts = queryAnalyzer.tokenStream("", key.toString());
Automaton automaton = (getTokenStreamToAutomaton()).toAutomaton(ts);
ts.close();
// TODO: we could use the end offset to "guess"
// whether the final token was a partial token; this
// would only be a heuristic ... but maybe an OK one.
// This way we could eg differentiate "net" from "net ",
// which we can't today...
replaceSep(automaton, preserveSep, sepLabel);
// TODO: we can optimize this somewhat by determinizing
// while we convert
BasicOperations.determinize(automaton);
return automaton;
}
/**
* Returns the weight associated with an input string,
* or null if it does not exist.
*/
public Object get(CharSequence key) {
throw new UnsupportedOperationException();
}
/** cost -> weight */
public static int decodeWeight(long encoded) {
return (int)(Integer.MAX_VALUE - encoded);
}
/** weight -> cost */
public static int encodeWeight(long value) {
if (value < 0 || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value);
}
return Integer.MAX_VALUE - (int)value;
}
static final Comparator<Pair<Long,BytesRef>> weightComparator = new Comparator<Pair<Long,BytesRef>> () {
@Override
public int compare(Pair<Long,BytesRef> left, Pair<Long,BytesRef> right) {
return left.output1.compareTo(right.output1);
}
};
public static class XBuilder {
private Builder<Pair<Long, BytesRef>> builder;
private int maxSurfaceFormsPerAnalyzedForm;
private IntsRef scratchInts = new IntsRef();
private final PairOutputs<Long, BytesRef> outputs;
private boolean hasPayloads;
private BytesRef analyzed = new BytesRef();
private final SurfaceFormAndPayload[] surfaceFormsAndPayload;
private int count;
private ObjectIntOpenHashMap<BytesRef> seenSurfaceForms = HppcMaps.Object.Integer.ensureNoNullKeys(256, 0.75f);
private int payloadSep;
public XBuilder(int maxSurfaceFormsPerAnalyzedForm, boolean hasPayloads, int payloadSep) {
this.payloadSep = payloadSep;
this.outputs = new PairOutputs<Long, BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
this.builder = new Builder<Pair<Long, BytesRef>>(FST.INPUT_TYPE.BYTE1, outputs);
this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
this.hasPayloads = hasPayloads;
surfaceFormsAndPayload = new SurfaceFormAndPayload[maxSurfaceFormsPerAnalyzedForm];
}
public void startTerm(BytesRef analyzed) {
this.analyzed.copyBytes(analyzed);
this.analyzed.grow(analyzed.length+2);
}
private final static class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
BytesRef payload;
long weight;
public SurfaceFormAndPayload(BytesRef payload, long cost) {
super();
this.payload = payload;
this.weight = cost;
}
@Override
public int compareTo(SurfaceFormAndPayload o) {
int res = compare(weight, o.weight);
if (res == 0 ){
return payload.compareTo(o.payload);
}
return res;
}
public static int compare(long x, long y) {
return (x < y) ? -1 : ((x == y) ? 0 : 1);
}
}
public void addSurface(BytesRef surface, BytesRef payload, long cost) throws IOException {
int surfaceIndex = -1;
long encodedWeight = cost == -1 ? cost : encodeWeight(cost);
/*
* we need to check if we have seen this surface form, if so only use the
* the surface form with the highest weight and drop the rest no matter if
* the payload differs.
*/
if (count >= maxSurfaceFormsPerAnalyzedForm) {
// More than maxSurfaceFormsPerAnalyzedForm
// dups: skip the rest:
return;
}
BytesRef surfaceCopy;
if (count > 0 && seenSurfaceForms.containsKey(surface)) {
surfaceIndex = seenSurfaceForms.lget();
SurfaceFormAndPayload surfaceFormAndPayload = surfaceFormsAndPayload[surfaceIndex];
if (encodedWeight >= surfaceFormAndPayload.weight) {
return;
}
surfaceCopy = BytesRef.deepCopyOf(surface);
} else {
surfaceIndex = count++;
surfaceCopy = BytesRef.deepCopyOf(surface);
seenSurfaceForms.put(surfaceCopy, surfaceIndex);
}
BytesRef payloadRef;
if (!hasPayloads) {
payloadRef = surfaceCopy;
} else {
int len = surface.length + 1 + payload.length;
final BytesRef br = new BytesRef(len);
System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
br.bytes[surface.length] = (byte) payloadSep;
System.arraycopy(payload.bytes, payload.offset, br.bytes, surface.length + 1, payload.length);
br.length = len;
payloadRef = br;
}
if (surfaceFormsAndPayload[surfaceIndex] == null) {
surfaceFormsAndPayload[surfaceIndex] = new SurfaceFormAndPayload(payloadRef, encodedWeight);
} else {
surfaceFormsAndPayload[surfaceIndex].payload = payloadRef;
surfaceFormsAndPayload[surfaceIndex].weight = encodedWeight;
}
}
public void finishTerm(long defaultWeight) throws IOException {
ArrayUtil.timSort(surfaceFormsAndPayload, 0, count);
int deduplicator = 0;
analyzed.bytes[analyzed.offset + analyzed.length] = 0;
analyzed.length += 2;
for (int i = 0; i < count; i++) {
analyzed.bytes[analyzed.offset + analyzed.length - 1 ] = (byte) deduplicator++;
Util.toIntsRef(analyzed, scratchInts);
SurfaceFormAndPayload candiate = surfaceFormsAndPayload[i];
long cost = candiate.weight == -1 ? encodeWeight(Math.min(Integer.MAX_VALUE, defaultWeight)) : candiate.weight;
builder.add(scratchInts, outputs.newPair(cost, candiate.payload));
}
seenSurfaceForms.clear();
count = 0;
}
public FST<Pair<Long, BytesRef>> build() throws IOException {
return builder.finish();
}
public boolean hasPayloads() {
return hasPayloads;
}
public int maxSurfaceFormsPerAnalyzedForm() {
return maxSurfaceFormsPerAnalyzedForm;
}
}
}
| 1no label
|
src_main_java_org_apache_lucene_search_suggest_analyzing_XAnalyzingSuggester.java
|
1,049 |
public class ItemListenerConfig extends ListenerConfig {
private boolean includeValue = true;
private ItemListenerConfigReadOnly readOnly;
public ItemListenerConfig() {
super();
}
public ItemListenerConfig(String className, boolean includeValue) {
super(className);
this.includeValue = includeValue;
}
public ItemListenerConfig(ItemListener implementation, boolean includeValue) {
super(implementation);
this.includeValue = includeValue;
}
public ItemListenerConfig(ItemListenerConfig config) {
includeValue = config.isIncludeValue();
implementation = config.getImplementation();
className = config.getClassName();
}
public ItemListenerConfigReadOnly getAsReadOnly() {
if (readOnly == null ){
readOnly = new ItemListenerConfigReadOnly(this);
}
return readOnly;
}
public ItemListener getImplementation() {
return (ItemListener) implementation;
}
public ItemListenerConfig setImplementation(final ItemListener implementation) {
super.setImplementation(implementation);
return this;
}
public boolean isIncludeValue() {
return includeValue;
}
public ItemListenerConfig setIncludeValue(boolean includeValue) {
this.includeValue = includeValue;
return this;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("ItemListenerConfig");
sb.append("{includeValue=").append(includeValue);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_config_ItemListenerConfig.java
|
58 |
public class TitanException extends RuntimeException {
private static final long serialVersionUID = 4056436257763972423L;
/**
* @param msg Exception message
*/
public TitanException(String msg) {
super(msg);
}
/**
* @param msg Exception message
* @param cause Cause of the exception
*/
public TitanException(String msg, Throwable cause) {
super(msg, cause);
}
/**
* Constructs an exception with a generic message
*
* @param cause Cause of the exception
*/
public TitanException(Throwable cause) {
this("Exception in Titan", cause);
}
/**
* Checks whether this exception is cause by an exception of the given type.
*
* @param causeExceptionType exception type
* @return true, if this exception is caused by the given type
*/
public boolean isCausedBy(Class<?> causeExceptionType) {
return ExceptionUtil.isCausedBy(this, causeExceptionType);
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanException.java
|
2,601 |
private class SymmetricCipherPacketWriter implements PacketWriter {
final Cipher cipher;
ByteBuffer packetBuffer = ByteBuffer.allocate(ioService.getSocketSendBufferSize() * IOService.KILO_BYTE);
boolean packetWritten;
SymmetricCipherPacketWriter() {
cipher = init();
}
private Cipher init() {
Cipher c;
try {
c = CipherHelper.createSymmetricWriterCipher(ioService.getSymmetricEncryptionConfig());
} catch (Exception e) {
logger.severe("Symmetric Cipher for WriteHandler cannot be initialized.", e);
CipherHelper.handleCipherException(e, connection);
throw ExceptionUtil.rethrow(e);
}
return c;
}
public boolean writePacket(Packet packet, ByteBuffer socketBuffer) throws Exception {
if (!packetWritten) {
if (socketBuffer.remaining() < CONST_BUFFER_NO) {
return false;
}
int size = cipher.getOutputSize(packet.size());
socketBuffer.putInt(size);
if (packetBuffer.capacity() < packet.size()) {
packetBuffer = ByteBuffer.allocate(packet.size());
}
if (!packet.writeTo(packetBuffer)) {
throw new HazelcastException("Packet didn't fit into the buffer!");
}
packetBuffer.flip();
packetWritten = true;
}
if (socketBuffer.hasRemaining()) {
int outputSize = cipher.getOutputSize(packetBuffer.remaining());
if (outputSize <= socketBuffer.remaining()) {
cipher.update(packetBuffer, socketBuffer);
} else {
int min = Math.min(packetBuffer.remaining(), socketBuffer.remaining());
int len = min / 2;
if (len > 0) {
int limitOld = packetBuffer.limit();
packetBuffer.limit(packetBuffer.position() + len);
cipher.update(packetBuffer, socketBuffer);
packetBuffer.limit(limitOld);
}
}
if (!packetBuffer.hasRemaining()) {
if (socketBuffer.remaining() >= cipher.getOutputSize(0)) {
socketBuffer.put(cipher.doFinal());
packetWritten = false;
packetBuffer.clear();
return true;
}
}
}
return false;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_SocketPacketWriter.java
|
363 |
public class DeleteRepositoryRequest extends AcknowledgedRequest<DeleteRepositoryRequest> {
private String name;
DeleteRepositoryRequest() {
}
/**
* Constructs a new unregister repository request with the provided name.
*
* @param name name of the repository
*/
public DeleteRepositoryRequest(String name) {
this.name = name;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (name == null) {
validationException = addValidationError("name is missing", validationException);
}
return validationException;
}
/**
* Sets the name of the repository to unregister.
*
* @param name name of the repository
*/
public DeleteRepositoryRequest name(String name) {
this.name = name;
return this;
}
/**
* The name of the repository.
*
* @return the name of the repository
*/
public String name() {
return this.name;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
name = in.readString();
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
writeTimeout(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_delete_DeleteRepositoryRequest.java
|
1,639 |
new Thread(new Runnable() {
@Override
public void run() {
while (!Thread.interrupted()) {
String senderNode = null;
ODistributedResponse message = null;
try {
message = nodeResponseQueue.take();
if (message != null) {
senderNode = message.getSenderNodeName();
dispatchResponseToThread(message);
}
} catch (InterruptedException e) {
// EXIT CURRENT THREAD
Thread.interrupted();
break;
} catch (Throwable e) {
ODistributedServerLog.error(this, manager.getLocalNodeName(), senderNode, DIRECTION.IN,
"error on reading distributed response", e, message != null ? message.getPayload() : "-");
}
}
}
}).start();
| 1no label
|
distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_OHazelcastDistributedMessageService.java
|
276 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ClientIdGeneratorTest {
static final String name = "test";
static HazelcastInstance hz;
static IdGenerator i;
@BeforeClass
public static void init() {
Hazelcast.newHazelcastInstance();
hz = HazelcastClient.newHazelcastClient(null);
i = hz.getIdGenerator(name);
}
@AfterClass
public static void destroy() {
hz.shutdown();
Hazelcast.shutdownAll();
}
@Test
public void testGenerator() throws Exception {
assertTrue(i.init(3569));
assertFalse(i.init(4569));
assertEquals(3570, i.newId());
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_idgenerator_ClientIdGeneratorTest.java
|
1,189 |
public class OQueryOperatorMinor extends OQueryOperatorEqualityNotNulls {
public OQueryOperatorMinor() {
super("<", 5, false);
}
@Override
@SuppressWarnings("unchecked")
protected boolean evaluateExpression(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft,
final Object iRight, OCommandContext iContext) {
final Object right = OType.convert(iRight, iLeft.getClass());
if (right == null)
return false;
return ((Comparable<Object>) iLeft).compareTo(right) < 0;
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
if (iRight == null || iLeft == null)
return OIndexReuseType.NO_INDEX;
return OIndexReuseType.INDEX_METHOD;
}
@Override
public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType,
List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) {
final OIndexDefinition indexDefinition = index.getDefinition();
final OIndexInternal<?> internalIndex = index.getInternal();
if (!internalIndex.canBeUsedInEqualityOperators() || !internalIndex.hasRangeQuerySupport())
return null;
final Object result;
if (indexDefinition.getParamCount() == 1) {
final Object key;
if (indexDefinition instanceof OIndexDefinitionMultiValue)
key = ((OIndexDefinitionMultiValue) indexDefinition).createSingleValue(keyParams.get(0));
else
key = indexDefinition.createValue(keyParams);
if (key == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(null, false, key, false, fetchLimit);
else if (resultListener != null) {
index.getValuesMinor(key, false, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesMinor(key, false);
} else {
// if we have situation like "field1 = 1 AND field2 < 2"
// then we fetch collection which left included boundary is the smallest composite key in the
// index that contains key with value field1=1 and which right not included boundary
// is the biggest composite key in the index that contains key with values field1=1 and field2=2.
final OCompositeIndexDefinition compositeIndexDefinition = (OCompositeIndexDefinition) indexDefinition;
final Object keyOne = compositeIndexDefinition.createSingleValue(keyParams.subList(0, keyParams.size() - 1));
if (keyOne == null)
return null;
final Object keyTwo = compositeIndexDefinition.createSingleValue(keyParams);
if (keyTwo == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(keyOne, true, keyTwo, false, fetchLimit);
else if (resultListener != null) {
index.getValuesBetween(keyOne, true, keyTwo, false, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesBetween(keyOne, true, keyTwo, false);
}
updateProfiler(iContext, index, keyParams, indexDefinition);
return result;
}
@Override
public ORID getBeginRidRange(Object iLeft, Object iRight) {
return null;
}
@Override
public ORID getEndRidRange(final Object iLeft, final Object iRight) {
if (iLeft instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot()))
if (iRight instanceof ORID)
return (ORID) iRight;
else {
if (iRight instanceof OSQLFilterItemParameter && ((OSQLFilterItemParameter) iRight).getValue(null, null) instanceof ORID)
return (ORID) ((OSQLFilterItemParameter) iRight).getValue(null, null);
}
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorMinor.java
|
174 |
@SuppressWarnings("serial")
public class OModifiableInteger extends Number implements Comparable<OModifiableInteger> {
public int value;
public OModifiableInteger() {
value = 0;
}
public OModifiableInteger(final int iValue) {
value = iValue;
}
public void setValue(final int iValue) {
value = iValue;
}
public int getValue() {
return value;
}
public void increment() {
value++;
}
public void increment(final int iValue) {
value += iValue;
}
public void decrement() {
value--;
}
public void decrement(final int iValue) {
value -= iValue;
}
public int compareTo(final OModifiableInteger anotherInteger) {
int thisVal = value;
int anotherVal = anotherInteger.value;
return (thisVal < anotherVal) ? -1 : ((thisVal == anotherVal) ? 0 : 1);
}
@Override
public byte byteValue() {
return (byte) value;
}
@Override
public short shortValue() {
return (short) value;
}
@Override
public float floatValue() {
return value;
}
@Override
public double doubleValue() {
return value;
}
@Override
public int intValue() {
return value;
}
@Override
public long longValue() {
return value;
}
public Integer toInteger() {
return Integer.valueOf(this.value);
}
@Override
public boolean equals(final Object o) {
if (o instanceof OModifiableInteger) {
return value == ((OModifiableInteger) o).value;
}
return false;
}
@Override
public int hashCode() {
return value;
}
@Override
public String toString() {
return String.valueOf(this.value);
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_types_OModifiableInteger.java
|
352 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class SimpleClientMapInterceptorTest {
static HazelcastInstance server1;
static HazelcastInstance server2;
static HazelcastInstance client;
static SimpleClientInterceptor interceptor;
@BeforeClass
public static void init() {
Config config = new Config();
config.getSerializationConfig().addPortableFactory(PortableHelpersFactory.ID, new PortableHelpersFactory());
server1 = Hazelcast.newHazelcastInstance(config);
server2 = Hazelcast.newHazelcastInstance(config);
ClientConfig clientConfig = new ClientConfig();
clientConfig.getSerializationConfig().addPortableFactory(PortableHelpersFactory.ID, new PortableHelpersFactory());
client = HazelcastClient.newHazelcastClient(clientConfig);
interceptor = new SimpleClientInterceptor();
}
@AfterClass
public static void destroy() {
client.shutdown();
Hazelcast.shutdownAll();
}
@Test
public void clientMapInterceptorTestIssue1238() throws InterruptedException {
final IMap<Object, Object> map = client.getMap("clientMapInterceptorTest");
String id = map.addInterceptor(interceptor);
map.put(1, "New York");
map.put(2, "Istanbul");
map.put(3, "Tokyo");
map.put(4, "London");
map.put(5, "Paris");
map.put(6, "Cairo");
map.put(7, "Hong Kong");
map.remove(1);
try {
map.remove(2);
fail();
} catch (Exception ignore) {
}
assertEquals(map.size(), 6);
assertEquals(map.get(1), null);
assertEquals(map.get(2), "ISTANBUL:");
assertEquals(map.get(3), "TOKYO:");
assertEquals(map.get(4), "LONDON:");
assertEquals(map.get(5), "PARIS:");
assertEquals(map.get(6), "CAIRO:");
assertEquals(map.get(7), "HONG KONG:");
map.removeInterceptor(id);
map.put(8, "Moscow");
assertEquals(map.get(8), "Moscow");
assertEquals(map.get(1), null);
assertEquals(map.get(2), "ISTANBUL");
assertEquals(map.get(3), "TOKYO");
assertEquals(map.get(4), "LONDON");
assertEquals(map.get(5), "PARIS");
assertEquals(map.get(6), "CAIRO");
assertEquals(map.get(7), "HONG KONG");
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_SimpleClientMapInterceptorTest.java
|
1,717 |
@Service("blDynamicEntityRemoteService")
@Transactional(value="blTransactionManager", rollbackFor = ServiceException.class)
public class DynamicEntityRemoteService implements DynamicEntityService, DynamicEntityRemote, ApplicationContextAware {
public static final String DEFAULTPERSISTENCEMANAGERREF = "blPersistenceManager";
private static final Log LOG = LogFactory.getLog(DynamicEntityRemoteService.class);
protected static final Map<BatchPersistencePackage, BatchDynamicResultSet> METADATA_CACHE = MapUtils.synchronizedMap(new LRUMap<BatchPersistencePackage, BatchDynamicResultSet>(100, 1000));
protected String persistenceManagerRef = DEFAULTPERSISTENCEMANAGERREF;
private ApplicationContext applicationContext;
@Resource(name="blExploitProtectionService")
protected ExploitProtectionService exploitProtectionService;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
protected ServiceException recreateSpecificServiceException(ServiceException e, String message, Throwable cause) {
try {
ServiceException newException;
if (cause == null) {
Constructor constructor = e.getClass().getConstructor(String.class);
newException = (ServiceException) constructor.newInstance(message);
} else {
Constructor constructor = e.getClass().getConstructor(String.class, Throwable.class);
newException = (ServiceException) constructor.newInstance(message, cause);
}
return newException;
} catch (Exception e1) {
throw new RuntimeException(e1);
}
}
@Override
public DynamicResultSet inspect(PersistencePackage persistencePackage) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
PersistenceManager persistenceManager = (PersistenceManager) applicationContext.getBean(persistenceManagerRef);
persistenceManager.setTargetMode(TargetModeType.SANDBOX);
return persistenceManager.inspect(persistencePackage);
} catch (ServiceException e) {
String message = exploitProtectionService.cleanString(e.getMessage());
throw recreateSpecificServiceException(e, message, e.getCause());
} catch (Exception e) {
LOG.error("Problem inspecting results for " + ceilingEntityFullyQualifiedClassname, e);
throw new ServiceException(exploitProtectionService.cleanString("Unable to fetch results for " + ceilingEntityFullyQualifiedClassname), e);
}
}
@Override
public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto) throws ServiceException {
try {
PersistenceManager persistenceManager = (PersistenceManager) applicationContext.getBean(persistenceManagerRef);
persistenceManager.setTargetMode(TargetModeType.SANDBOX);
return persistenceManager.fetch(persistencePackage, cto);
} catch (ServiceException e) {
LOG.error("Problem fetching results for " + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e);
String message = exploitProtectionService.cleanString(e.getMessage());
throw recreateSpecificServiceException(e, message, e.getCause());
}
}
protected void cleanEntity(Entity entity) throws ServiceException {
Property currentProperty = null;
try {
for (Property property : entity.getProperties()) {
currentProperty = property;
property.setRawValue(property.getValue());
property.setValue(exploitProtectionService.cleanStringWithResults(property.getValue()));
property.setUnHtmlEncodedValue(StringEscapeUtils.unescapeHtml(property.getValue()));
}
} catch (CleanStringException e) {
StringBuilder sb = new StringBuilder();
for (int j=0;j<e.getCleanResults().getNumberOfErrors();j++){
sb.append(j+1);
sb.append(") ");
sb.append((String) e.getCleanResults().getErrorMessages().get(j));
sb.append("\n");
}
sb.append("\nNote - ");
sb.append(exploitProtectionService.getAntiSamyPolicyFileLocation());
sb.append(" policy in effect. Set a new policy file to modify validation behavior/strictness.");
entity.addValidationError(currentProperty.getName(), sb.toString());
}
}
@Override
public Entity add(PersistencePackage persistencePackage) throws ServiceException {
cleanEntity(persistencePackage.getEntity());
if (persistencePackage.getEntity().isValidationFailure()) {
return persistencePackage.getEntity();
}
try {
PersistenceManager persistenceManager = (PersistenceManager) applicationContext.getBean(persistenceManagerRef);
persistenceManager.setTargetMode(TargetModeType.SANDBOX);
return persistenceManager.add(persistencePackage);
} catch (ServiceException e) {
//immediately throw validation exceptions without printing a stack trace
if (e instanceof ValidationException) {
throw e;
} else if (e.getCause() instanceof ValidationException) {
throw (ValidationException) e.getCause();
}
String message = exploitProtectionService.cleanString(e.getMessage());
LOG.error("Problem adding new " + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e);
throw recreateSpecificServiceException(e, message, e.getCause());
}
}
@Override
public Entity update(PersistencePackage persistencePackage) throws ServiceException {
cleanEntity(persistencePackage.getEntity());
if (persistencePackage.getEntity().isValidationFailure()) {
return persistencePackage.getEntity();
}
try {
PersistenceManager persistenceManager = (PersistenceManager) applicationContext.getBean(persistenceManagerRef);
persistenceManager.setTargetMode(TargetModeType.SANDBOX);
return persistenceManager.update(persistencePackage);
} catch (ServiceException e) {
//immediately throw validation exceptions without printing a stack trace
if (e instanceof ValidationException) {
throw e;
} else if (e.getCause() instanceof ValidationException) {
throw (ValidationException) e.getCause();
}
LOG.error("Problem updating " + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e);
String message = exploitProtectionService.cleanString(e.getMessage());
throw recreateSpecificServiceException(e, message, e.getCause());
}
}
@Override
public void remove(PersistencePackage persistencePackage) throws ServiceException {
try {
PersistenceManager persistenceManager = (PersistenceManager) applicationContext.getBean(persistenceManagerRef);
persistenceManager.setTargetMode(TargetModeType.SANDBOX);
persistenceManager.remove(persistencePackage);
} catch (ServiceException e) {
LOG.error("Problem removing " + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e);
String message = exploitProtectionService.cleanString(e.getMessage());
throw recreateSpecificServiceException(e, message, e.getCause());
}
}
@Override
public String getPersistenceManagerRef() {
return persistenceManagerRef;
}
@Override
public void setPersistenceManagerRef(String persistenceManagerRef) {
this.persistenceManagerRef = persistenceManagerRef;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_DynamicEntityRemoteService.java
|
1,098 |
public class OSQLFunctionDistinct extends OSQLFunctionAbstract {
public static final String NAME = "distinct";
private Set<Object> context = new LinkedHashSet<Object>();
public OSQLFunctionDistinct() {
super(NAME, 1, 1);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters,
OCommandContext iContext) {
final Object value = iParameters[0];
if (value != null && !context.contains(value)) {
context.add(value);
return value;
}
return null;
}
@Override
public boolean filterResult() {
return true;
}
public String getSyntax() {
return "Syntax error: distinct(<field>)";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_coll_OSQLFunctionDistinct.java
|
75 |
public class OSharedResourceAdaptiveExternal extends OSharedResourceAdaptive implements OSharedResource {
public OSharedResourceAdaptiveExternal(final boolean iConcurrent, final int iTimeout, final boolean ignoreThreadInterruption) {
super(iConcurrent, iTimeout, ignoreThreadInterruption);
}
@Override
public void acquireExclusiveLock() {
super.acquireExclusiveLock();
}
public boolean tryAcquireExclusiveLock() {
return super.tryAcquireExclusiveLock();
}
@Override
public void acquireSharedLock() {
super.acquireSharedLock();
}
public boolean tryAcquireSharedLock() {
return super.tryAcquireSharedLock();
}
@Override
public void releaseExclusiveLock() {
super.releaseExclusiveLock();
}
@Override
public void releaseSharedLock() {
super.releaseSharedLock();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedResourceAdaptiveExternal.java
|
2,767 |
public class NettyHttpRequest extends HttpRequest {
private final org.jboss.netty.handler.codec.http.HttpRequest request;
private final Channel channel;
private final Map<String, String> params;
private final String rawPath;
private final BytesReference content;
public NettyHttpRequest(org.jboss.netty.handler.codec.http.HttpRequest request, Channel channel) {
this.request = request;
this.channel = channel;
this.params = new HashMap<String, String>();
if (request.getContent().readable()) {
this.content = new ChannelBufferBytesReference(request.getContent());
} else {
this.content = BytesArray.EMPTY;
}
String uri = request.getUri();
int pathEndPos = uri.indexOf('?');
if (pathEndPos < 0) {
this.rawPath = uri;
} else {
this.rawPath = uri.substring(0, pathEndPos);
RestUtils.decodeQueryString(uri, pathEndPos + 1, params);
}
}
@Override
public Method method() {
HttpMethod httpMethod = request.getMethod();
if (httpMethod == HttpMethod.GET)
return Method.GET;
if (httpMethod == HttpMethod.POST)
return Method.POST;
if (httpMethod == HttpMethod.PUT)
return Method.PUT;
if (httpMethod == HttpMethod.DELETE)
return Method.DELETE;
if (httpMethod == HttpMethod.HEAD) {
return Method.HEAD;
}
if (httpMethod == HttpMethod.OPTIONS) {
return Method.OPTIONS;
}
return Method.GET;
}
@Override
public String uri() {
return request.getUri();
}
@Override
public String rawPath() {
return rawPath;
}
@Override
public Map<String, String> params() {
return params;
}
@Override
public boolean hasContent() {
return content.length() > 0;
}
@Override
public boolean contentUnsafe() {
// Netty http decoder always copies over the http content
return false;
}
@Override
public BytesReference content() {
return content;
}
/**
* Returns the remote address where this rest request channel is "connected to". The
* returned {@link SocketAddress} is supposed to be down-cast into more
* concrete type such as {@link java.net.InetSocketAddress} to retrieve
* the detailed information.
*/
@Override
public SocketAddress getRemoteAddress() {
return channel.getRemoteAddress();
}
/**
* Returns the local address where this request channel is bound to. The returned
* {@link SocketAddress} is supposed to be down-cast into more concrete
* type such as {@link java.net.InetSocketAddress} to retrieve the detailed
* information.
*/
@Override
public SocketAddress getLocalAddress() {
return channel.getLocalAddress();
}
@Override
public String header(String name) {
return request.headers().get(name);
}
@Override
public Iterable<Map.Entry<String, String>> headers() {
return request.headers().entries();
}
@Override
public boolean hasParam(String key) {
return params.containsKey(key);
}
@Override
public String param(String key) {
return params.get(key);
}
@Override
public String param(String key, String defaultValue) {
String value = params.get(key);
if (value == null) {
return defaultValue;
}
return value;
}
}
| 1no label
|
src_main_java_org_elasticsearch_http_netty_NettyHttpRequest.java
|
207 |
public class MapperQueryParser extends QueryParser {
public static final ImmutableMap<String, FieldQueryExtension> fieldQueryExtensions;
static {
fieldQueryExtensions = ImmutableMap.<String, FieldQueryExtension>builder()
.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension())
.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension())
.build();
}
private final QueryParseContext parseContext;
private QueryParserSettings settings;
private Analyzer quoteAnalyzer;
private boolean forcedAnalyzer;
private boolean forcedQuoteAnalyzer;
private FieldMapper currentMapper;
private boolean analyzeWildcard;
private String quoteFieldSuffix;
public MapperQueryParser(QueryParseContext parseContext) {
super(Lucene.QUERYPARSER_VERSION, null, null);
this.parseContext = parseContext;
}
public MapperQueryParser(QueryParserSettings settings, QueryParseContext parseContext) {
super(Lucene.QUERYPARSER_VERSION, settings.defaultField(), settings.defaultAnalyzer());
this.parseContext = parseContext;
reset(settings);
}
public void reset(QueryParserSettings settings) {
this.settings = settings;
this.field = settings.defaultField();
if (settings.fields() != null) {
if (settings.fields.size() == 1) {
// just mark it as the default field
this.field = settings.fields().get(0);
} else {
// otherwise, we need to have the default field being null...
this.field = null;
}
}
this.forcedAnalyzer = settings.forcedAnalyzer() != null;
this.setAnalyzer(forcedAnalyzer ? settings.forcedAnalyzer() : settings.defaultAnalyzer());
if (settings.forcedQuoteAnalyzer() != null) {
this.forcedQuoteAnalyzer = true;
this.quoteAnalyzer = settings.forcedQuoteAnalyzer();
} else if (forcedAnalyzer) {
this.forcedQuoteAnalyzer = true;
this.quoteAnalyzer = settings.forcedAnalyzer();
} else {
this.forcedAnalyzer = false;
this.quoteAnalyzer = settings.defaultQuoteAnalyzer();
}
this.quoteFieldSuffix = settings.quoteFieldSuffix();
setMultiTermRewriteMethod(settings.rewriteMethod());
setEnablePositionIncrements(settings.enablePositionIncrements());
setAutoGeneratePhraseQueries(settings.autoGeneratePhraseQueries());
setAllowLeadingWildcard(settings.allowLeadingWildcard());
setLowercaseExpandedTerms(settings.lowercaseExpandedTerms());
setPhraseSlop(settings.phraseSlop());
setDefaultOperator(settings.defaultOperator());
setFuzzyMinSim(settings.fuzzyMinSim());
setFuzzyPrefixLength(settings.fuzzyPrefixLength());
this.analyzeWildcard = settings.analyzeWildcard();
}
/**
* We override this one so we can get the fuzzy part to be treated as string, so people can do: "age:10~5" or "timestamp:2012-10-10~5d"
*/
@Override
Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException {
if (fuzzySlop.image.length() == 1) {
return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim));
}
return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1));
}
@Override
protected Query newTermQuery(Term term) {
if (currentMapper != null) {
Query termQuery = currentMapper.queryStringTermQuery(term);
if (termQuery != null) {
return termQuery;
}
}
return super.newTermQuery(term);
}
@Override
protected Query newMatchAllDocsQuery() {
return Queries.newMatchAllQuery();
}
@Override
public Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
FieldQueryExtension fieldQueryExtension = fieldQueryExtensions.get(field);
if (fieldQueryExtension != null) {
return fieldQueryExtension.query(parseContext, queryText);
}
Collection<String> fields = extractMultiFields(field);
if (fields != null) {
if (fields.size() == 1) {
return getFieldQuerySingle(fields.iterator().next(), queryText, quoted);
}
if (settings.useDisMax()) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
boolean added = false;
for (String mField : fields) {
Query q = getFieldQuerySingle(mField, queryText, quoted);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
}
}
if (!added) {
return null;
}
return disMaxQuery;
} else {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String mField : fields) {
Query q = getFieldQuerySingle(mField, queryText, quoted);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
} else {
return getFieldQuerySingle(field, queryText, quoted);
}
}
private Query getFieldQuerySingle(String field, String queryText, boolean quoted) throws ParseException {
if (!quoted && queryText.length() > 1) {
if (queryText.charAt(0) == '>') {
if (queryText.length() > 2) {
if (queryText.charAt(1) == '=') {
return getRangeQuerySingle(field, queryText.substring(2), null, true, true);
}
}
return getRangeQuerySingle(field, queryText.substring(1), null, false, true);
} else if (queryText.charAt(0) == '<') {
if (queryText.length() > 2) {
if (queryText.charAt(1) == '=') {
return getRangeQuerySingle(field, null, queryText.substring(2), true, true);
}
}
return getRangeQuerySingle(field, null, queryText.substring(1), true, false);
}
}
currentMapper = null;
Analyzer oldAnalyzer = getAnalyzer();
try {
MapperService.SmartNameFieldMappers fieldMappers = null;
if (quoted) {
setAnalyzer(quoteAnalyzer);
if (quoteFieldSuffix != null) {
fieldMappers = parseContext.smartFieldMappers(field + quoteFieldSuffix);
}
}
if (fieldMappers == null) {
fieldMappers = parseContext.smartFieldMappers(field);
}
if (fieldMappers != null) {
if (quoted) {
if (!forcedQuoteAnalyzer) {
setAnalyzer(fieldMappers.searchQuoteAnalyzer());
}
} else {
if (!forcedAnalyzer) {
setAnalyzer(fieldMappers.searchAnalyzer());
}
}
currentMapper = fieldMappers.fieldMappers().mapper();
if (currentMapper != null) {
Query query = null;
if (currentMapper.useTermQueryWithQueryString()) {
try {
if (fieldMappers.explicitTypeInNameWithDocMapper()) {
String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{fieldMappers.docMapper().type()});
try {
query = currentMapper.termQuery(queryText, parseContext);
} finally {
QueryParseContext.setTypes(previousTypes);
}
} else {
query = currentMapper.termQuery(queryText, parseContext);
}
} catch (RuntimeException e) {
if (settings.lenient()) {
return null;
} else {
throw e;
}
}
}
if (query == null) {
query = super.getFieldQuery(currentMapper.names().indexName(), queryText, quoted);
}
return wrapSmartNameQuery(query, fieldMappers, parseContext);
}
}
return super.getFieldQuery(field, queryText, quoted);
} finally {
setAnalyzer(oldAnalyzer);
}
}
@Override
protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
Collection<String> fields = extractMultiFields(field);
if (fields != null) {
if (settings.useDisMax()) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
boolean added = false;
for (String mField : fields) {
Query q = super.getFieldQuery(mField, queryText, slop);
if (q != null) {
added = true;
applyBoost(mField, q);
applySlop(q, slop);
disMaxQuery.add(q);
}
}
if (!added) {
return null;
}
return disMaxQuery;
} else {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String mField : fields) {
Query q = super.getFieldQuery(mField, queryText, slop);
if (q != null) {
applyBoost(mField, q);
applySlop(q, slop);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
} else {
return super.getFieldQuery(field, queryText, slop);
}
}
@Override
protected Query getRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) throws ParseException {
if ("*".equals(part1)) {
part1 = null;
}
if ("*".equals(part2)) {
part2 = null;
}
Collection<String> fields = extractMultiFields(field);
if (fields == null) {
return getRangeQuerySingle(field, part1, part2, startInclusive, endInclusive);
}
if (fields.size() == 1) {
return getRangeQuerySingle(fields.iterator().next(), part1, part2, startInclusive, endInclusive);
}
if (settings.useDisMax()) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
boolean added = false;
for (String mField : fields) {
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
}
}
if (!added) {
return null;
}
return disMaxQuery;
} else {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String mField : fields) {
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
}
private Query getRangeQuerySingle(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) {
currentMapper = null;
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
if (fieldMappers != null) {
currentMapper = fieldMappers.fieldMappers().mapper();
if (currentMapper != null) {
if (lowercaseExpandedTerms && !currentMapper.isNumeric()) {
part1 = part1 == null ? null : part1.toLowerCase(locale);
part2 = part2 == null ? null : part2.toLowerCase(locale);
}
try {
Query rangeQuery = currentMapper.rangeQuery(part1, part2, startInclusive, endInclusive, parseContext);
return wrapSmartNameQuery(rangeQuery, fieldMappers, parseContext);
} catch (RuntimeException e) {
if (settings.lenient()) {
return null;
}
throw e;
}
}
}
return newRangeQuery(field, part1, part2, startInclusive, endInclusive);
}
protected Query getFuzzyQuery(String field, String termStr, String minSimilarity) throws ParseException {
if (lowercaseExpandedTerms) {
termStr = termStr.toLowerCase(locale);
}
Collection<String> fields = extractMultiFields(field);
if (fields != null) {
if (fields.size() == 1) {
return getFuzzyQuerySingle(fields.iterator().next(), termStr, minSimilarity);
}
if (settings.useDisMax()) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
boolean added = false;
for (String mField : fields) {
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
}
}
if (!added) {
return null;
}
return disMaxQuery;
} else {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String mField : fields) {
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
return getBooleanQuery(clauses, true);
}
} else {
return getFuzzyQuerySingle(field, termStr, minSimilarity);
}
}
private Query getFuzzyQuerySingle(String field, String termStr, String minSimilarity) throws ParseException {
currentMapper = null;
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
if (fieldMappers != null) {
currentMapper = fieldMappers.fieldMappers().mapper();
if (currentMapper != null) {
try {
//LUCENE 4 UPGRADE I disabled transpositions here by default - maybe this needs to be changed
Query fuzzyQuery = currentMapper.fuzzyQuery(termStr, Fuzziness.build(minSimilarity), fuzzyPrefixLength, settings.fuzzyMaxExpansions(), false);
return wrapSmartNameQuery(fuzzyQuery, fieldMappers, parseContext);
} catch (RuntimeException e) {
if (settings.lenient()) {
return null;
}
throw e;
}
}
}
return super.getFuzzyQuery(field, termStr, Float.parseFloat(minSimilarity));
}
@Override
protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) {
String text = term.text();
int numEdits = FuzzyQuery.floatToEdits(minimumSimilarity, text.codePointCount(0, text.length()));
//LUCENE 4 UPGRADE I disabled transpositions here by default - maybe this needs to be changed
FuzzyQuery query = new FuzzyQuery(term, numEdits, prefixLength, settings.fuzzyMaxExpansions(), false);
QueryParsers.setRewriteMethod(query, settings.fuzzyRewriteMethod());
return query;
}
@Override
protected Query getPrefixQuery(String field, String termStr) throws ParseException {
if (lowercaseExpandedTerms) {
termStr = termStr.toLowerCase(locale);
}
Collection<String> fields = extractMultiFields(field);
if (fields != null) {
if (fields.size() == 1) {
return getPrefixQuerySingle(fields.iterator().next(), termStr);
}
if (settings.useDisMax()) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
boolean added = false;
for (String mField : fields) {
Query q = getPrefixQuerySingle(mField, termStr);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
}
}
if (!added) {
return null;
}
return disMaxQuery;
} else {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String mField : fields) {
Query q = getPrefixQuerySingle(mField, termStr);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
} else {
return getPrefixQuerySingle(field, termStr);
}
}
private Query getPrefixQuerySingle(String field, String termStr) throws ParseException {
currentMapper = null;
Analyzer oldAnalyzer = getAnalyzer();
try {
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
if (fieldMappers != null) {
if (!forcedAnalyzer) {
setAnalyzer(fieldMappers.searchAnalyzer());
}
currentMapper = fieldMappers.fieldMappers().mapper();
if (currentMapper != null) {
Query query = null;
if (currentMapper.useTermQueryWithQueryString()) {
if (fieldMappers.explicitTypeInNameWithDocMapper()) {
String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{fieldMappers.docMapper().type()});
try {
query = currentMapper.prefixQuery(termStr, multiTermRewriteMethod, parseContext);
} finally {
QueryParseContext.setTypes(previousTypes);
}
} else {
query = currentMapper.prefixQuery(termStr, multiTermRewriteMethod, parseContext);
}
}
if (query == null) {
query = getPossiblyAnalyzedPrefixQuery(currentMapper.names().indexName(), termStr);
}
return wrapSmartNameQuery(query, fieldMappers, parseContext);
}
}
return getPossiblyAnalyzedPrefixQuery(field, termStr);
} catch (RuntimeException e) {
if (settings.lenient()) {
return null;
}
throw e;
} finally {
setAnalyzer(oldAnalyzer);
}
}
private Query getPossiblyAnalyzedPrefixQuery(String field, String termStr) throws ParseException {
if (!analyzeWildcard) {
return super.getPrefixQuery(field, termStr);
}
// get Analyzer from superclass and tokenize the term
TokenStream source;
try {
source = getAnalyzer().tokenStream(field, termStr);
source.reset();
} catch (IOException e) {
return super.getPrefixQuery(field, termStr);
}
List<String> tlist = new ArrayList<String>();
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
while (true) {
try {
if (!source.incrementToken()) break;
} catch (IOException e) {
break;
}
tlist.add(termAtt.toString());
}
try {
source.close();
} catch (IOException e) {
// ignore
}
if (tlist.size() == 1) {
return super.getPrefixQuery(field, tlist.get(0));
} else {
// build a boolean query with prefix on each one...
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String token : tlist) {
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
}
return getBooleanQuery(clauses, true);
//return super.getPrefixQuery(field, termStr);
/* this means that the analyzer used either added or consumed
* (common for a stemmer) tokens, and we can't build a PrefixQuery */
// throw new ParseException("Cannot build PrefixQuery with analyzer "
// + getAnalyzer().getClass()
// + (tlist.size() > 1 ? " - token(s) added" : " - token consumed"));
}
}
@Override
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
if (termStr.equals("*")) {
// we want to optimize for match all query for the "*:*", and "*" cases
if ("*".equals(field) || Objects.equal(field, this.field)) {
String actualField = field;
if (actualField == null) {
actualField = this.field;
}
if (actualField == null) {
return newMatchAllDocsQuery();
}
if ("*".equals(actualField) || "_all".equals(actualField)) {
return newMatchAllDocsQuery();
}
// effectively, we check if a field exists or not
return fieldQueryExtensions.get(ExistsFieldQueryExtension.NAME).query(parseContext, actualField);
}
}
if (lowercaseExpandedTerms) {
termStr = termStr.toLowerCase(locale);
}
Collection<String> fields = extractMultiFields(field);
if (fields != null) {
if (fields.size() == 1) {
return getWildcardQuerySingle(fields.iterator().next(), termStr);
}
if (settings.useDisMax()) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
boolean added = false;
for (String mField : fields) {
Query q = getWildcardQuerySingle(mField, termStr);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
}
}
if (!added) {
return null;
}
return disMaxQuery;
} else {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String mField : fields) {
Query q = getWildcardQuerySingle(mField, termStr);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
} else {
return getWildcardQuerySingle(field, termStr);
}
}
private Query getWildcardQuerySingle(String field, String termStr) throws ParseException {
String indexedNameField = field;
currentMapper = null;
Analyzer oldAnalyzer = getAnalyzer();
try {
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
if (fieldMappers != null) {
if (!forcedAnalyzer) {
setAnalyzer(fieldMappers.searchAnalyzer());
}
currentMapper = fieldMappers.fieldMappers().mapper();
if (currentMapper != null) {
indexedNameField = currentMapper.names().indexName();
}
return wrapSmartNameQuery(getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr), fieldMappers, parseContext);
}
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
} catch (RuntimeException e) {
if (settings.lenient()) {
return null;
}
throw e;
} finally {
setAnalyzer(oldAnalyzer);
}
}
private Query getPossiblyAnalyzedWildcardQuery(String field, String termStr) throws ParseException {
if (!analyzeWildcard) {
return super.getWildcardQuery(field, termStr);
}
boolean isWithinToken = (!termStr.startsWith("?") && !termStr.startsWith("*"));
StringBuilder aggStr = new StringBuilder();
StringBuilder tmp = new StringBuilder();
for (int i = 0; i < termStr.length(); i++) {
char c = termStr.charAt(i);
if (c == '?' || c == '*') {
if (isWithinToken) {
try {
TokenStream source = getAnalyzer().tokenStream(field, tmp.toString());
source.reset();
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
if (source.incrementToken()) {
String term = termAtt.toString();
if (term.length() == 0) {
// no tokens, just use what we have now
aggStr.append(tmp);
} else {
aggStr.append(term);
}
} else {
// no tokens, just use what we have now
aggStr.append(tmp);
}
source.close();
} catch (IOException e) {
aggStr.append(tmp);
}
tmp.setLength(0);
}
isWithinToken = false;
aggStr.append(c);
} else {
tmp.append(c);
isWithinToken = true;
}
}
if (isWithinToken) {
try {
TokenStream source = getAnalyzer().tokenStream(field, tmp.toString());
source.reset();
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
if (source.incrementToken()) {
String term = termAtt.toString();
if (term.length() == 0) {
// no tokens, just use what we have now
aggStr.append(tmp);
} else {
aggStr.append(term);
}
} else {
// no tokens, just use what we have now
aggStr.append(tmp);
}
source.close();
} catch (IOException e) {
aggStr.append(tmp);
}
}
return super.getWildcardQuery(field, aggStr.toString());
}
@Override
protected Query getRegexpQuery(String field, String termStr) throws ParseException {
if (lowercaseExpandedTerms) {
termStr = termStr.toLowerCase(locale);
}
Collection<String> fields = extractMultiFields(field);
if (fields != null) {
if (fields.size() == 1) {
return getRegexpQuerySingle(fields.iterator().next(), termStr);
}
if (settings.useDisMax()) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(settings.tieBreaker());
boolean added = false;
for (String mField : fields) {
Query q = getRegexpQuerySingle(mField, termStr);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
}
}
if (!added) {
return null;
}
return disMaxQuery;
} else {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (String mField : fields) {
Query q = getRegexpQuerySingle(mField, termStr);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
} else {
return getRegexpQuerySingle(field, termStr);
}
}
private Query getRegexpQuerySingle(String field, String termStr) throws ParseException {
currentMapper = null;
Analyzer oldAnalyzer = getAnalyzer();
try {
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
if (fieldMappers != null) {
if (!forcedAnalyzer) {
setAnalyzer(fieldMappers.searchAnalyzer());
}
currentMapper = fieldMappers.fieldMappers().mapper();
if (currentMapper != null) {
Query query = null;
if (currentMapper.useTermQueryWithQueryString()) {
if (fieldMappers.explicitTypeInNameWithDocMapper()) {
String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{fieldMappers.docMapper().type()});
try {
query = currentMapper.regexpQuery(termStr, RegExp.ALL, multiTermRewriteMethod, parseContext);
} finally {
QueryParseContext.setTypes(previousTypes);
}
} else {
query = currentMapper.regexpQuery(termStr, RegExp.ALL, multiTermRewriteMethod, parseContext);
}
}
if (query == null) {
query = super.getRegexpQuery(field, termStr);
}
return wrapSmartNameQuery(query, fieldMappers, parseContext);
}
}
return super.getRegexpQuery(field, termStr);
} catch (RuntimeException e) {
if (settings.lenient()) {
return null;
}
throw e;
} finally {
setAnalyzer(oldAnalyzer);
}
}
@Override
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
Query q = super.getBooleanQuery(clauses, disableCoord);
if (q == null) {
return null;
}
return optimizeQuery(fixNegativeQueryIfNeeded(q));
}
private void applyBoost(String field, Query q) {
if (settings.boosts() != null) {
float boost = 1f;
if (settings.boosts().containsKey(field)) {
boost = settings.boosts().lget();
}
q.setBoost(boost);
}
}
private void applySlop(Query q, int slop) {
if (q instanceof XFilteredQuery) {
applySlop(((XFilteredQuery)q).getQuery(), slop);
}
if (q instanceof PhraseQuery) {
((PhraseQuery) q).setSlop(slop);
} else if (q instanceof MultiPhraseQuery) {
((MultiPhraseQuery) q).setSlop(slop);
}
}
private Collection<String> extractMultiFields(String field) {
Collection<String> fields = null;
if (field != null) {
fields = parseContext.simpleMatchToIndexNames(field);
} else {
fields = settings.fields();
}
return fields;
}
public Query parse(String query) throws ParseException {
if (query.trim().isEmpty()) {
// if the query string is empty we return no docs / empty result
// the behavior is simple to change in the client if all docs is required
// or a default query
return new MatchNoDocsQuery();
}
return super.parse(query);
}
}
| 0true
|
src_main_java_org_apache_lucene_queryparser_classic_MapperQueryParser.java
|
70 |
public interface OSharedContainer {
public boolean existsResource(final String iName);
public <T> T removeResource(final String iName);
public <T> T getResource(final String iName, final Callable<T> iCallback);
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedContainer.java
|
1,274 |
return nodesService.execute(new TransportClientNodesService.NodeCallback<ActionFuture<Response>>() {
@Override
public ActionFuture<Response> doWithNode(DiscoveryNode node) throws ElasticsearchException {
return proxy.execute(node, request);
}
});
| 1no label
|
src_main_java_org_elasticsearch_client_transport_support_InternalTransportIndicesAdminClient.java
|
663 |
public class TransportValidateQueryAction extends TransportBroadcastOperationAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
private final IndicesService indicesService;
private final ScriptService scriptService;
private final CacheRecycler cacheRecycler;
private final PageCacheRecycler pageCacheRecycler;
@Inject
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
super(settings, threadPool, clusterService, transportService);
this.indicesService = indicesService;
this.scriptService = scriptService;
this.cacheRecycler = cacheRecycler;
this.pageCacheRecycler = pageCacheRecycler;
}
@Override
protected void doExecute(ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {
request.nowInMillis = System.currentTimeMillis();
super.doExecute(request, listener);
}
@Override
protected String executor() {
return ThreadPool.Names.SEARCH;
}
@Override
protected String transportAction() {
return ValidateQueryAction.NAME;
}
@Override
protected ValidateQueryRequest newRequest() {
return new ValidateQueryRequest();
}
@Override
protected ShardValidateQueryRequest newShardRequest() {
return new ShardValidateQueryRequest();
}
@Override
protected ShardValidateQueryRequest newShardRequest(ShardRouting shard, ValidateQueryRequest request) {
String[] filteringAliases = clusterService.state().metaData().filteringAliases(shard.index(), request.indices());
return new ShardValidateQueryRequest(shard.index(), shard.id(), filteringAliases, request);
}
@Override
protected ShardValidateQueryResponse newShardResponse() {
return new ShardValidateQueryResponse();
}
@Override
protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) {
// Hard-code routing to limit request to a single shard, but still, randomize it...
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(Integer.toString(ThreadLocalRandom.current().nextInt(1000)), request.indices());
return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, "_local");
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, ValidateQueryRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, ValidateQueryRequest countRequest, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected ValidateQueryResponse newResponse(ValidateQueryRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
boolean valid = true;
List<ShardOperationFailedException> shardFailures = null;
List<QueryExplanation> queryExplanations = null;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = newArrayList();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
ShardValidateQueryResponse validateQueryResponse = (ShardValidateQueryResponse) shardResponse;
valid = valid && validateQueryResponse.isValid();
if (request.explain()) {
if (queryExplanations == null) {
queryExplanations = newArrayList();
}
queryExplanations.add(new QueryExplanation(
validateQueryResponse.getIndex(),
validateQueryResponse.isValid(),
validateQueryResponse.getExplanation(),
validateQueryResponse.getError()
));
}
successfulShards++;
}
}
return new ValidateQueryResponse(valid, queryExplanations, shardsResponses.length(), successfulShards, failedShards, shardFailures);
}
@Override
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) throws ElasticsearchException {
IndexQueryParserService queryParserService = indicesService.indexServiceSafe(request.index()).queryParserService();
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(request.shardId());
boolean valid;
String explanation = null;
String error = null;
if (request.source().length() == 0) {
valid = true;
} else {
SearchContext.setCurrent(new DefaultSearchContext(0,
new ShardSearchRequest().types(request.types()).nowInMillis(request.nowInMillis()),
null, indexShard.acquireSearcher("validate_query"), indexService, indexShard,
scriptService, cacheRecycler, pageCacheRecycler));
try {
ParsedQuery parsedQuery = queryParserService.parseQuery(request.source());
valid = true;
if (request.explain()) {
explanation = parsedQuery.query().toString();
}
} catch (QueryParsingException e) {
valid = false;
error = e.getDetailedMessage();
} catch (AssertionError e) {
valid = false;
error = e.getMessage();
} finally {
SearchContext.current().release();
SearchContext.removeCurrent();
}
}
return new ShardValidateQueryResponse(request.index(), request.shardId(), valid, explanation, error);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_validate_query_TransportValidateQueryAction.java
|
259 |
@Entity
@Table(name = "BLC_EMAIL_TRACKING")
public class EmailTrackingImpl implements EmailTracking {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "EmailTrackingId")
@GenericGenerator(
name="EmailTrackingId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="EmailTrackingImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.common.email.domain.EmailTrackingImpl")
}
)
@Column(name = "EMAIL_TRACKING_ID")
protected Long id;
@Column(name = "EMAIL_ADDRESS")
@Index(name="EMAILTRACKING_INDEX", columnNames={"EMAIL_ADDRESS"})
protected String emailAddress;
@Column(name = "DATE_SENT")
protected Date dateSent;
@Column(name = "TYPE")
protected String type;
@OneToMany(mappedBy = "emailTracking", targetEntity = EmailTrackingClicksImpl.class)
protected Set<EmailTrackingClicks> emailTrackingClicks = new HashSet<EmailTrackingClicks>();
@OneToMany(mappedBy = "emailTracking", targetEntity = EmailTrackingOpensImpl.class)
protected Set<EmailTrackingOpens> emailTrackingOpens = new HashSet<EmailTrackingOpens>();
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#getEmailAddress()
*/
@Override
public String getEmailAddress() {
return emailAddress;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#setEmailAddress(java.lang.String)
*/
@Override
public void setEmailAddress(String emailAddress) {
this.emailAddress = emailAddress;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#getDateSent()
*/
@Override
public Date getDateSent() {
return dateSent;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#setDateSent(java.util.Date)
*/
@Override
public void setDateSent(Date dateSent) {
this.dateSent = dateSent;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#getType()
*/
@Override
public String getType() {
return type;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTracking#setType(java.lang.String)
*/
@Override
public void setType(String type) {
this.type = type;
}
/**
* @return the emailTrackingClicks
*/
public Set<EmailTrackingClicks> getEmailTrackingClicks() {
return emailTrackingClicks;
}
/**
* @param emailTrackingClicks the emailTrackingClicks to set
*/
public void setEmailTrackingClicks(Set<EmailTrackingClicks> emailTrackingClicks) {
this.emailTrackingClicks = emailTrackingClicks;
}
/**
* @return the emailTrackingOpens
*/
public Set<EmailTrackingOpens> getEmailTrackingOpens() {
return emailTrackingOpens;
}
/**
* @param emailTrackingOpens the emailTrackingOpens to set
*/
public void setEmailTrackingOpens(Set<EmailTrackingOpens> emailTrackingOpens) {
this.emailTrackingOpens = emailTrackingOpens;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((dateSent == null) ? 0 : dateSent.hashCode());
result = prime * result + ((emailAddress == null) ? 0 : emailAddress.hashCode());
result = prime * result + ((emailTrackingClicks == null) ? 0 : emailTrackingClicks.hashCode());
result = prime * result + ((emailTrackingOpens == null) ? 0 : emailTrackingOpens.hashCode());
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EmailTrackingImpl other = (EmailTrackingImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (dateSent == null) {
if (other.dateSent != null)
return false;
} else if (!dateSent.equals(other.dateSent))
return false;
if (emailAddress == null) {
if (other.emailAddress != null)
return false;
} else if (!emailAddress.equals(other.emailAddress))
return false;
if (emailTrackingClicks == null) {
if (other.emailTrackingClicks != null)
return false;
} else if (!emailTrackingClicks.equals(other.emailTrackingClicks))
return false;
if (emailTrackingOpens == null) {
if (other.emailTrackingOpens != null)
return false;
} else if (!emailTrackingOpens.equals(other.emailTrackingOpens))
return false;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_email_domain_EmailTrackingImpl.java
|
68 |
public class StorageType implements Serializable {
private static final long serialVersionUID = 1L;
private static final Map<String, StorageType> TYPES = new HashMap<String, StorageType>();
public static final StorageType DATABASE = new StorageType("DATABASE", "Database");
public static final StorageType FILESYSTEM = new StorageType("FILESYSTEM", "FileSystem");
public static StorageType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public StorageType() {
//do nothing
}
public StorageType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
StorageType other = (StorageType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_field_type_StorageType.java
|
280 |
public class OCommandExecutorScript extends OCommandExecutorAbstract {
protected OCommandScript request;
public OCommandExecutorScript() {
}
@SuppressWarnings("unchecked")
public OCommandExecutorScript parse(final OCommandRequest iRequest) {
request = (OCommandScript) iRequest;
return this;
}
public Object execute(final Map<Object, Object> iArgs) {
return executeInContext(context, iArgs);
}
public Object executeInContext(final OCommandContext iContext, final Map<Object, Object> iArgs) {
final String language = request.getLanguage();
parserText = request.getText();
ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && !(db instanceof ODatabaseRecordTx))
db = db.getUnderlying();
final OScriptManager scriptManager = Orient.instance().getScriptManager();
CompiledScript compiledScript = request.getCompiledScript();
if (compiledScript == null) {
ScriptEngine scriptEngine = scriptManager.getEngine(language);
// COMPILE FUNCTION LIBRARY
String lib = scriptManager.getLibrary(db, language);
if (lib == null)
lib = "";
parserText = lib + parserText;
Compilable c = (Compilable) scriptEngine;
try {
compiledScript = c.compile(parserText);
} catch (ScriptException e) {
scriptManager.getErrorMessage(e, parserText);
}
request.setCompiledScript(compiledScript);
}
final Bindings binding = scriptManager.bind(compiledScript.getEngine().createBindings(), (ODatabaseRecordTx) db, iContext,
iArgs);
try {
return compiledScript.eval(binding);
} catch (ScriptException e) {
throw new OCommandScriptException("Error on execution of the script", request.getText(), e.getColumnNumber(), e);
} finally {
scriptManager.unbind(binding);
}
}
public boolean isIdempotent() {
return false;
}
@Override
protected void throwSyntaxErrorException(String iText) {
throw new OCommandScriptException("Error on execution of the script: " + iText, request.getText(), 0);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_command_script_OCommandExecutorScript.java
|
254 |
service.submit(runnable, selector, new ExecutionCallback() {
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
344 |
public class NodesShutdownRequestBuilder extends MasterNodeOperationRequestBuilder<NodesShutdownRequest, NodesShutdownResponse, NodesShutdownRequestBuilder> {
public NodesShutdownRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new NodesShutdownRequest());
}
/**
* The nodes ids to restart.
*/
public NodesShutdownRequestBuilder setNodesIds(String... nodesIds) {
request.nodesIds(nodesIds);
return this;
}
/**
* The delay for the restart to occur. Defaults to <tt>1s</tt>.
*/
public NodesShutdownRequestBuilder setDelay(TimeValue delay) {
request.delay(delay);
return this;
}
/**
* The delay for the restart to occur. Defaults to <tt>1s</tt>.
*/
public NodesShutdownRequestBuilder setDelay(String delay) {
request.delay(delay);
return this;
}
/**
* Should the JVM be exited as well or not. Defaults to <tt>true</tt>.
*/
public NodesShutdownRequestBuilder setExit(boolean exit) {
request.exit(exit);
return this;
}
@Override
protected void doExecute(ActionListener<NodesShutdownResponse> listener) {
((ClusterAdminClient) client).nodesShutdown(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_NodesShutdownRequestBuilder.java
|
330 |
map.addEntryListener(new EntryAdapter<String, String>() {
public void entryEvicted(EntryEvent<String, String> event) {
latch.countDown();
}
}, true);
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
1,081 |
public abstract class OSQLFilterItemAbstract implements OSQLFilterItem {
protected List<OPair<OSQLMethod, Object[]>> operationsChain = null;
public OSQLFilterItemAbstract(final OBaseParser iQueryToParse, final String iText) {
final List<String> parts = OStringSerializerHelper.smartSplit(iText, '.');
setRoot(iQueryToParse, parts.get(0));
if (parts.size() > 1) {
operationsChain = new ArrayList<OPair<OSQLMethod, Object[]>>();
// GET ALL SPECIAL OPERATIONS
for (int i = 1; i < parts.size(); ++i) {
final String part = parts.get(i);
final int pindex = part.indexOf('(');
if (pindex > -1) {
final String methodName = part.substring(0, pindex).trim().toLowerCase(Locale.ENGLISH);
OSQLMethod method = OSQLHelper.getMethodByName(methodName);
final Object[] arguments;
if (method != null) {
if (method.getMaxParams() == -1 || method.getMaxParams() > 0) {
arguments = OStringSerializerHelper.getParameters(part).toArray();
if (arguments.length < method.getMinParams()
|| (method.getMaxParams() > -1 && arguments.length > method.getMaxParams()))
throw new OQueryParsingException(iQueryToParse.parserText, "Syntax error: field operator '"
+ method.getName()
+ "' needs "
+ (method.getMinParams() == method.getMaxParams() ? method.getMinParams() : method.getMinParams() + "-"
+ method.getMaxParams()) + " argument(s) while has been received " + arguments.length, 0);
} else
arguments = null;
} else {
// LOOK FOR FUNCTION
final OSQLFunction f = OSQLEngine.getInstance().getFunction(methodName);
if (f == null)
// ERROR: METHOD/FUNCTION NOT FOUND OR MISPELLED
throw new OQueryParsingException(iQueryToParse.parserText,
"Syntax error: function or field operator not recognized between the supported ones: "
+ Arrays.toString(OSQLHelper.getAllMethodNames()), 0);
if (f.getMaxParams() == -1 || f.getMaxParams() > 0) {
arguments = OStringSerializerHelper.getParameters(part).toArray();
if (arguments.length < f.getMinParams() || (f.getMaxParams() > -1 && arguments.length > f.getMaxParams()))
throw new OQueryParsingException(iQueryToParse.parserText, "Syntax error: function '" + f.getName() + "' needs "
+ (f.getMinParams() == f.getMaxParams() ? f.getMinParams() : f.getMinParams() + "-" + f.getMaxParams())
+ " argument(s) while has been received " + arguments.length, 0);
} else
arguments = null;
method = new OSQLMethodFunctionDelegate(f);
}
// SPECIAL OPERATION FOUND: ADD IT IN TO THE CHAIN
operationsChain.add(new OPair<OSQLMethod, Object[]>(method, arguments));
} else {
operationsChain.add(new OPair<OSQLMethod, Object[]>(OSQLHelper.getMethodByName(OSQLMethodField.NAME),
new Object[] { part }));
}
}
}
}
public abstract String getRoot();
protected abstract void setRoot(OBaseParser iQueryToParse, final String iRoot);
public Object transformValue(final OIdentifiable iRecord, final OCommandContext iContext, Object ioResult) {
if (ioResult != null && operationsChain != null) {
// APPLY OPERATIONS FOLLOWING THE STACK ORDER
OSQLMethod operator = null;
try {
for (OPair<OSQLMethod, Object[]> op : operationsChain) {
operator = op.getKey();
// DON'T PASS THE CURRENT RECORD TO FORCE EVALUATING TEMPORARY RESULT
ioResult = operator.execute(iRecord, iContext, ioResult, op.getValue());
}
} catch (ParseException e) {
OLogManager.instance().exception("Error on conversion of value '%s' using field operator %s", e,
OCommandExecutionException.class, ioResult, operator.getName());
}
}
return ioResult;
}
public boolean hasChainOperators() {
return operationsChain != null;
}
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder();
final String root = getRoot();
if (root != null)
buffer.append(root);
if (operationsChain != null) {
for (OPair<OSQLMethod, Object[]> op : operationsChain) {
buffer.append('.');
buffer.append(op.getKey());
if (op.getValue() != null) {
final Object[] values = op.getValue();
buffer.append('(');
int i = 0;
for (Object v : values) {
if (i++ > 0)
buffer.append(',');
buffer.append(v);
}
buffer.append(')');
}
}
}
return buffer.toString();
}
protected OCollate getCollateForField(final ODocument doc, final String iFieldName) {
if (doc.getSchemaClass() != null) {
final OProperty p = doc.getSchemaClass().getProperty(iFieldName);
if (p != null)
return p.getCollate();
}
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_filter_OSQLFilterItemAbstract.java
|
378 |
.ackTimeout(request.timeout()), new ActionListener<RepositoriesService.RegisterRepositoryResponse>() {
@Override
public void onResponse(RepositoriesService.RegisterRepositoryResponse response) {
listener.onResponse(new PutRepositoryResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_put_TransportPutRepositoryAction.java
|
145 |
public class AtomicDouble extends Number implements java.io.Serializable {
private static final long serialVersionUID = -8405198993435143622L;
private transient volatile long value;
/**
* Creates a new {@code AtomicDouble} with the given initial value.
*
* @param initialValue the initial value
*/
public AtomicDouble(double initialValue) {
value = doubleToRawLongBits(initialValue);
}
/**
* Creates a new {@code AtomicDouble} with initial value {@code 0.0}.
*/
public AtomicDouble() {
// assert doubleToRawLongBits(0.0) == 0L;
}
/**
* Gets the current value.
*
* @return the current value
*/
public final double get() {
return longBitsToDouble(value);
}
/**
* Sets to the given value.
*
* @param newValue the new value
*/
public final void set(double newValue) {
long next = doubleToRawLongBits(newValue);
value = next;
}
/**
* Eventually sets to the given value.
*
* @param newValue the new value
*/
public final void lazySet(double newValue) {
long next = doubleToRawLongBits(newValue);
unsafe.putOrderedLong(this, valueOffset, next);
}
/**
* Atomically sets to the given value and returns the old value.
*
* @param newValue the new value
* @return the previous value
*/
public final double getAndSet(double newValue) {
long next = doubleToRawLongBits(newValue);
while (true) {
long current = value;
if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
return longBitsToDouble(current);
}
}
/**
* Atomically sets the value to the given updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful. False return indicates that
* the actual value was not bitwise equal to the expected value.
*/
public final boolean compareAndSet(double expect, double update) {
return unsafe.compareAndSwapLong(this, valueOffset,
doubleToRawLongBits(expect),
doubleToRawLongBits(update));
}
/**
* Atomically sets the value to the given updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* <p><a
* href="http://download.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/package-summary.html#Spurious">
* May fail spuriously and does not provide ordering guarantees</a>,
* so is only rarely an appropriate alternative to {@code compareAndSet}.
*
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful
*/
public final boolean weakCompareAndSet(double expect, double update) {
return compareAndSet(expect, update);
}
/**
* Atomically adds the given value to the current value.
*
* @param delta the value to add
* @return the previous value
*/
public final double getAndAdd(double delta) {
while (true) {
long current = value;
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
return currentVal;
}
}
/**
* Atomically adds the given value to the current value.
*
* @param delta the value to add
* @return the updated value
*/
public final double addAndGet(double delta) {
while (true) {
long current = value;
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
return nextVal;
}
}
/**
* Returns the String representation of the current value.
* @return the String representation of the current value
*/
public String toString() {
return Double.toString(get());
}
/**
* Returns the value of this {@code AtomicDouble} as an {@code int}
* after a narrowing primitive conversion.
*/
public int intValue() {
return (int) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code long}
* after a narrowing primitive conversion.
*/
public long longValue() {
return (long) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code float}
* after a narrowing primitive conversion.
*/
public float floatValue() {
return (float) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code double}.
*/
public double doubleValue() {
return get();
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @param s the stream
* @throws java.io.IOException if an I/O error occurs
* @serialData The current value is emitted (a {@code double}).
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeDouble(get());
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
* @throws ClassNotFoundException if the class of a serialized object
* could not be found
* @throws java.io.IOException if an I/O error occurs
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
set(s.readDouble());
}
// Unsafe mechanics
private static final sun.misc.Unsafe unsafe = getUnsafe();
private static final long valueOffset;
static {
try {
valueOffset = unsafe.objectFieldOffset
(AtomicDouble.class.getDeclaredField("value"));
} catch (Exception ex) { throw new Error(ex); }
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| 0true
|
src_main_java_jsr166e_extra_AtomicDouble.java
|
1 |
public class CompletableFuture<T> implements Future<T> {
// jsr166e nested interfaces
/** Interface describing a void action of one argument */
public interface Action<A> { void accept(A a); }
/** Interface describing a void action of two arguments */
public interface BiAction<A,B> { void accept(A a, B b); }
/** Interface describing a function of one argument */
public interface Fun<A,T> { T apply(A a); }
/** Interface describing a function of two arguments */
public interface BiFun<A,B,T> { T apply(A a, B b); }
/** Interface describing a function of no arguments */
public interface Generator<T> { T get(); }
/*
* Overview:
*
* 1. Non-nullness of field result (set via CAS) indicates done.
* An AltResult is used to box null as a result, as well as to
* hold exceptions. Using a single field makes completion fast
* and simple to detect and trigger, at the expense of a lot of
* encoding and decoding that infiltrates many methods. One minor
* simplification relies on the (static) NIL (to box null results)
* being the only AltResult with a null exception field, so we
* don't usually need explicit comparisons with NIL. The CF
* exception propagation mechanics surrounding decoding rely on
* unchecked casts of decoded results really being unchecked,
* where user type errors are caught at point of use, as is
* currently the case in Java. These are highlighted by using
* SuppressWarnings-annotated temporaries.
*
* 2. Waiters are held in a Treiber stack similar to the one used
* in FutureTask, Phaser, and SynchronousQueue. See their
* internal documentation for algorithmic details.
*
* 3. Completions are also kept in a list/stack, and pulled off
* and run when completion is triggered. (We could even use the
* same stack as for waiters, but would give up the potential
* parallelism obtained because woken waiters help release/run
* others -- see method postComplete). Because post-processing
* may race with direct calls, class Completion opportunistically
* extends AtomicInteger so callers can claim the action via
* compareAndSet(0, 1). The Completion.run methods are all
* written a boringly similar uniform way (that sometimes includes
* unnecessary-looking checks, kept to maintain uniformity).
* There are enough dimensions upon which they differ that
* attempts to factor commonalities while maintaining efficiency
* require more lines of code than they would save.
*
* 4. The exported then/and/or methods do support a bit of
* factoring (see doThenApply etc). They must cope with the
* intrinsic races surrounding addition of a dependent action
* versus performing the action directly because the task is
* already complete. For example, a CF may not be complete upon
* entry, so a dependent completion is added, but by the time it
* is added, the target CF is complete, so must be directly
* executed. This is all done while avoiding unnecessary object
* construction in safe-bypass cases.
*/
// preliminaries
static final class AltResult {
final Throwable ex; // null only for NIL
AltResult(Throwable ex) { this.ex = ex; }
}
static final AltResult NIL = new AltResult(null);
// Fields
volatile Object result; // Either the result or boxed AltResult
volatile WaitNode waiters; // Treiber stack of threads blocked on get()
volatile CompletionNode completions; // list (Treiber stack) of completions
// Basic utilities for triggering and processing completions
/**
* Removes and signals all waiting threads and runs all completions.
*/
final void postComplete() {
WaitNode q; Thread t;
while ((q = waiters) != null) {
if (UNSAFE.compareAndSwapObject(this, WAITERS, q, q.next) &&
(t = q.thread) != null) {
q.thread = null;
LockSupport.unpark(t);
}
}
CompletionNode h; Completion c;
while ((h = completions) != null) {
if (UNSAFE.compareAndSwapObject(this, COMPLETIONS, h, h.next) &&
(c = h.completion) != null)
c.run();
}
}
/**
* Triggers completion with the encoding of the given arguments:
* if the exception is non-null, encodes it as a wrapped
* CompletionException unless it is one already. Otherwise uses
* the given result, boxed as NIL if null.
*/
final void internalComplete(T v, Throwable ex) {
if (result == null)
UNSAFE.compareAndSwapObject
(this, RESULT, null,
(ex == null) ? (v == null) ? NIL : v :
new AltResult((ex instanceof CompletionException) ? ex :
new CompletionException(ex)));
postComplete(); // help out even if not triggered
}
/**
* If triggered, helps release and/or process completions.
*/
final void helpPostComplete() {
if (result != null)
postComplete();
}
/* ------------- waiting for completions -------------- */
/** Number of processors, for spin control */
static final int NCPU = Runtime.getRuntime().availableProcessors();
/**
* Heuristic spin value for waitingGet() before blocking on
* multiprocessors
*/
static final int SPINS = (NCPU > 1) ? 1 << 8 : 0;
/**
* Linked nodes to record waiting threads in a Treiber stack. See
* other classes such as Phaser and SynchronousQueue for more
* detailed explanation. This class implements ManagedBlocker to
* avoid starvation when blocking actions pile up in
* ForkJoinPools.
*/
static final class WaitNode implements ForkJoinPool.ManagedBlocker {
long nanos; // wait time if timed
final long deadline; // non-zero if timed
volatile int interruptControl; // > 0: interruptible, < 0: interrupted
volatile Thread thread;
volatile WaitNode next;
WaitNode(boolean interruptible, long nanos, long deadline) {
this.thread = Thread.currentThread();
this.interruptControl = interruptible ? 1 : 0;
this.nanos = nanos;
this.deadline = deadline;
}
public boolean isReleasable() {
if (thread == null)
return true;
if (Thread.interrupted()) {
int i = interruptControl;
interruptControl = -1;
if (i > 0)
return true;
}
if (deadline != 0L &&
(nanos <= 0L || (nanos = deadline - System.nanoTime()) <= 0L)) {
thread = null;
return true;
}
return false;
}
public boolean block() {
if (isReleasable())
return true;
else if (deadline == 0L)
LockSupport.park(this);
else if (nanos > 0L)
LockSupport.parkNanos(this, nanos);
return isReleasable();
}
}
/**
* Returns raw result after waiting, or null if interruptible and
* interrupted.
*/
private Object waitingGet(boolean interruptible) {
WaitNode q = null;
boolean queued = false;
int spins = SPINS;
for (Object r;;) {
if ((r = result) != null) {
if (q != null) { // suppress unpark
q.thread = null;
if (q.interruptControl < 0) {
if (interruptible) {
removeWaiter(q);
return null;
}
Thread.currentThread().interrupt();
}
}
postComplete(); // help release others
return r;
}
else if (spins > 0) {
int rnd = ThreadLocalRandom.current().nextInt();
if (rnd >= 0)
--spins;
}
else if (q == null)
q = new WaitNode(interruptible, 0L, 0L);
else if (!queued)
queued = UNSAFE.compareAndSwapObject(this, WAITERS,
q.next = waiters, q);
else if (interruptible && q.interruptControl < 0) {
removeWaiter(q);
return null;
}
else if (q.thread != null && result == null) {
try {
ForkJoinPool.managedBlock(q);
} catch (InterruptedException ex) {
q.interruptControl = -1;
}
}
}
}
/**
* Awaits completion or aborts on interrupt or timeout.
*
* @param nanos time to wait
* @return raw result
*/
private Object timedAwaitDone(long nanos)
throws InterruptedException, TimeoutException {
WaitNode q = null;
boolean queued = false;
for (Object r;;) {
if ((r = result) != null) {
if (q != null) {
q.thread = null;
if (q.interruptControl < 0) {
removeWaiter(q);
throw new InterruptedException();
}
}
postComplete();
return r;
}
else if (q == null) {
if (nanos <= 0L)
throw new TimeoutException();
long d = System.nanoTime() + nanos;
q = new WaitNode(true, nanos, d == 0L ? 1L : d); // avoid 0
}
else if (!queued)
queued = UNSAFE.compareAndSwapObject(this, WAITERS,
q.next = waiters, q);
else if (q.interruptControl < 0) {
removeWaiter(q);
throw new InterruptedException();
}
else if (q.nanos <= 0L) {
if (result == null) {
removeWaiter(q);
throw new TimeoutException();
}
}
else if (q.thread != null && result == null) {
try {
ForkJoinPool.managedBlock(q);
} catch (InterruptedException ex) {
q.interruptControl = -1;
}
}
}
}
/**
* Tries to unlink a timed-out or interrupted wait node to avoid
* accumulating garbage. Internal nodes are simply unspliced
* without CAS since it is harmless if they are traversed anyway
* by releasers. To avoid effects of unsplicing from already
* removed nodes, the list is retraversed in case of an apparent
* race. This is slow when there are a lot of nodes, but we don't
* expect lists to be long enough to outweigh higher-overhead
* schemes.
*/
private void removeWaiter(WaitNode node) {
if (node != null) {
node.thread = null;
retry:
for (;;) { // restart on removeWaiter race
for (WaitNode pred = null, q = waiters, s; q != null; q = s) {
s = q.next;
if (q.thread != null)
pred = q;
else if (pred != null) {
pred.next = s;
if (pred.thread == null) // check for race
continue retry;
}
else if (!UNSAFE.compareAndSwapObject(this, WAITERS, q, s))
continue retry;
}
break;
}
}
}
/* ------------- Async tasks -------------- */
/**
* A marker interface identifying asynchronous tasks produced by
* {@code async} methods. This may be useful for monitoring,
* debugging, and tracking asynchronous activities.
*
* @since 1.8
*/
public static interface AsynchronousCompletionTask {
}
/** Base class can act as either FJ or plain Runnable */
abstract static class Async extends ForkJoinTask<Void>
implements Runnable, AsynchronousCompletionTask {
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final void run() { exec(); }
}
static final class AsyncRun extends Async {
final Runnable fn;
final CompletableFuture<Void> dst;
AsyncRun(Runnable fn, CompletableFuture<Void> dst) {
this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<Void> d; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
fn.run();
ex = null;
} catch (Throwable rex) {
ex = rex;
}
d.internalComplete(null, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AsyncSupply<U> extends Async {
final Generator<U> fn;
final CompletableFuture<U> dst;
AsyncSupply(Generator<U> fn, CompletableFuture<U> dst) {
this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<U> d; U u; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
u = fn.get();
ex = null;
} catch (Throwable rex) {
ex = rex;
u = null;
}
d.internalComplete(u, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AsyncApply<T,U> extends Async {
final T arg;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
AsyncApply(T arg, Fun<? super T,? extends U> fn,
CompletableFuture<U> dst) {
this.arg = arg; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<U> d; U u; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
u = fn.apply(arg);
ex = null;
} catch (Throwable rex) {
ex = rex;
u = null;
}
d.internalComplete(u, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AsyncCombine<T,U,V> extends Async {
final T arg1;
final U arg2;
final BiFun<? super T,? super U,? extends V> fn;
final CompletableFuture<V> dst;
AsyncCombine(T arg1, U arg2,
BiFun<? super T,? super U,? extends V> fn,
CompletableFuture<V> dst) {
this.arg1 = arg1; this.arg2 = arg2; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<V> d; V v; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
v = fn.apply(arg1, arg2);
ex = null;
} catch (Throwable rex) {
ex = rex;
v = null;
}
d.internalComplete(v, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AsyncAccept<T> extends Async {
final T arg;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
AsyncAccept(T arg, Action<? super T> fn,
CompletableFuture<Void> dst) {
this.arg = arg; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<Void> d; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
fn.accept(arg);
ex = null;
} catch (Throwable rex) {
ex = rex;
}
d.internalComplete(null, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AsyncAcceptBoth<T,U> extends Async {
final T arg1;
final U arg2;
final BiAction<? super T,? super U> fn;
final CompletableFuture<Void> dst;
AsyncAcceptBoth(T arg1, U arg2,
BiAction<? super T,? super U> fn,
CompletableFuture<Void> dst) {
this.arg1 = arg1; this.arg2 = arg2; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<Void> d; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
fn.accept(arg1, arg2);
ex = null;
} catch (Throwable rex) {
ex = rex;
}
d.internalComplete(null, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AsyncCompose<T,U> extends Async {
final T arg;
final Fun<? super T, CompletableFuture<U>> fn;
final CompletableFuture<U> dst;
AsyncCompose(T arg,
Fun<? super T, CompletableFuture<U>> fn,
CompletableFuture<U> dst) {
this.arg = arg; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<U> d, fr; U u; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
fr = fn.apply(arg);
ex = (fr == null) ? new NullPointerException() : null;
} catch (Throwable rex) {
ex = rex;
fr = null;
}
if (ex != null)
u = null;
else {
Object r = fr.result;
if (r == null)
r = fr.waitingGet(false);
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
u = null;
}
else {
@SuppressWarnings("unchecked") U ur = (U) r;
u = ur;
}
}
d.internalComplete(u, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
/* ------------- Completions -------------- */
/**
* Simple linked list nodes to record completions, used in
* basically the same way as WaitNodes. (We separate nodes from
* the Completions themselves mainly because for the And and Or
* methods, the same Completion object resides in two lists.)
*/
static final class CompletionNode {
final Completion completion;
volatile CompletionNode next;
CompletionNode(Completion completion) { this.completion = completion; }
}
// Opportunistically subclass AtomicInteger to use compareAndSet to claim.
abstract static class Completion extends AtomicInteger implements Runnable {
}
static final class ThenApply<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
final Executor executor;
ThenApply(CompletableFuture<? extends T> src,
Fun<? super T,? extends U> fn,
CompletableFuture<U> dst,
Executor executor) {
this.src = src; this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
Executor e = executor;
U u = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncApply<T,U>(t, fn, dst));
else
u = fn.apply(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(u, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ThenAccept<T> extends Completion {
final CompletableFuture<? extends T> src;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
final Executor executor;
ThenAccept(CompletableFuture<? extends T> src,
Action<? super T> fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAccept<T>(t, fn, dst));
else
fn.accept(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ThenRun extends Completion {
final CompletableFuture<?> src;
final Runnable fn;
final CompletableFuture<Void> dst;
final Executor executor;
ThenRun(CompletableFuture<?> src,
Runnable fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<?> a;
final Runnable fn;
final CompletableFuture<Void> dst;
Object r; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(fn, dst));
else
fn.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ThenCombine<T,U,V> extends Completion {
final CompletableFuture<? extends T> src;
final CompletableFuture<? extends U> snd;
final BiFun<? super T,? super U,? extends V> fn;
final CompletableFuture<V> dst;
final Executor executor;
ThenCombine(CompletableFuture<? extends T> src,
CompletableFuture<? extends U> snd,
BiFun<? super T,? super U,? extends V> fn,
CompletableFuture<V> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final CompletableFuture<? extends U> b;
final BiFun<? super T,? super U,? extends V> fn;
final CompletableFuture<V> dst;
Object r, s; T t; U u; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
(b = this.snd) != null &&
(s = b.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex != null)
u = null;
else if (s instanceof AltResult) {
ex = ((AltResult)s).ex;
u = null;
}
else {
@SuppressWarnings("unchecked") U us = (U) s;
u = us;
}
Executor e = executor;
V v = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncCombine<T,U,V>(t, u, fn, dst));
else
v = fn.apply(t, u);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(v, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ThenAcceptBoth<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final CompletableFuture<? extends U> snd;
final BiAction<? super T,? super U> fn;
final CompletableFuture<Void> dst;
final Executor executor;
ThenAcceptBoth(CompletableFuture<? extends T> src,
CompletableFuture<? extends U> snd,
BiAction<? super T,? super U> fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final CompletableFuture<? extends U> b;
final BiAction<? super T,? super U> fn;
final CompletableFuture<Void> dst;
Object r, s; T t; U u; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
(b = this.snd) != null &&
(s = b.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex != null)
u = null;
else if (s instanceof AltResult) {
ex = ((AltResult)s).ex;
u = null;
}
else {
@SuppressWarnings("unchecked") U us = (U) s;
u = us;
}
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAcceptBoth<T,U>(t, u, fn, dst));
else
fn.accept(t, u);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class RunAfterBoth extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<?> snd;
final Runnable fn;
final CompletableFuture<Void> dst;
final Executor executor;
RunAfterBoth(CompletableFuture<?> src,
CompletableFuture<?> snd,
Runnable fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<?> b;
final Runnable fn;
final CompletableFuture<Void> dst;
Object r, s; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
(b = this.snd) != null &&
(s = b.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null && (s instanceof AltResult))
ex = ((AltResult)s).ex;
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(fn, dst));
else
fn.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AndCompletion extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<?> snd;
final CompletableFuture<Void> dst;
AndCompletion(CompletableFuture<?> src,
CompletableFuture<?> snd,
CompletableFuture<Void> dst) {
this.src = src; this.snd = snd; this.dst = dst;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<?> b;
final CompletableFuture<Void> dst;
Object r, s; Throwable ex;
if ((dst = this.dst) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
(b = this.snd) != null &&
(s = b.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null && (s instanceof AltResult))
ex = ((AltResult)s).ex;
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ApplyToEither<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final CompletableFuture<? extends T> snd;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
final Executor executor;
ApplyToEither(CompletableFuture<? extends T> src,
CompletableFuture<? extends T> snd,
Fun<? super T,? extends U> fn,
CompletableFuture<U> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final CompletableFuture<? extends T> b;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(((a = this.src) != null && (r = a.result) != null) ||
((b = this.snd) != null && (r = b.result) != null)) &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
Executor e = executor;
U u = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncApply<T,U>(t, fn, dst));
else
u = fn.apply(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(u, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class AcceptEither<T> extends Completion {
final CompletableFuture<? extends T> src;
final CompletableFuture<? extends T> snd;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
final Executor executor;
AcceptEither(CompletableFuture<? extends T> src,
CompletableFuture<? extends T> snd,
Action<? super T> fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final CompletableFuture<? extends T> b;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(((a = this.src) != null && (r = a.result) != null) ||
((b = this.snd) != null && (r = b.result) != null)) &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAccept<T>(t, fn, dst));
else
fn.accept(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class RunAfterEither extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<?> snd;
final Runnable fn;
final CompletableFuture<Void> dst;
final Executor executor;
RunAfterEither(CompletableFuture<?> src,
CompletableFuture<?> snd,
Runnable fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<?> b;
final Runnable fn;
final CompletableFuture<Void> dst;
Object r; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(((a = this.src) != null && (r = a.result) != null) ||
((b = this.snd) != null && (r = b.result) != null)) &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(fn, dst));
else
fn.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class OrCompletion extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<?> snd;
final CompletableFuture<Object> dst;
OrCompletion(CompletableFuture<?> src,
CompletableFuture<?> snd,
CompletableFuture<Object> dst) {
this.src = src; this.snd = snd; this.dst = dst;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<?> b;
final CompletableFuture<Object> dst;
Object r, t; Throwable ex;
if ((dst = this.dst) != null &&
(((a = this.src) != null && (r = a.result) != null) ||
((b = this.snd) != null && (r = b.result) != null)) &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
t = r;
}
dst.internalComplete(t, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ExceptionCompletion<T> extends Completion {
final CompletableFuture<? extends T> src;
final Fun<? super Throwable, ? extends T> fn;
final CompletableFuture<T> dst;
ExceptionCompletion(CompletableFuture<? extends T> src,
Fun<? super Throwable, ? extends T> fn,
CompletableFuture<T> dst) {
this.src = src; this.fn = fn; this.dst = dst;
}
public final void run() {
final CompletableFuture<? extends T> a;
final Fun<? super Throwable, ? extends T> fn;
final CompletableFuture<T> dst;
Object r; T t = null; Throwable ex, dx = null;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if ((r instanceof AltResult) &&
(ex = ((AltResult)r).ex) != null) {
try {
t = fn.apply(ex);
} catch (Throwable rex) {
dx = rex;
}
}
else {
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
dst.internalComplete(t, dx);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ThenCopy<T> extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<T> dst;
ThenCopy(CompletableFuture<?> src,
CompletableFuture<T> dst) {
this.src = src; this.dst = dst;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<T> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
dst.internalComplete(t, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
// version of ThenCopy for CompletableFuture<Void> dst
static final class ThenPropagate extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<Void> dst;
ThenPropagate(CompletableFuture<?> src,
CompletableFuture<Void> dst) {
this.src = src; this.dst = dst;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<Void> dst;
Object r; Throwable ex;
if ((dst = this.dst) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class HandleCompletion<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final BiFun<? super T, Throwable, ? extends U> fn;
final CompletableFuture<U> dst;
HandleCompletion(CompletableFuture<? extends T> src,
BiFun<? super T, Throwable, ? extends U> fn,
CompletableFuture<U> dst) {
this.src = src; this.fn = fn; this.dst = dst;
}
public final void run() {
final CompletableFuture<? extends T> a;
final BiFun<? super T, Throwable, ? extends U> fn;
final CompletableFuture<U> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
U u = null; Throwable dx = null;
try {
u = fn.apply(t, ex);
} catch (Throwable rex) {
dx = rex;
}
dst.internalComplete(u, dx);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
static final class ThenCompose<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final Fun<? super T, CompletableFuture<U>> fn;
final CompletableFuture<U> dst;
final Executor executor;
ThenCompose(CompletableFuture<? extends T> src,
Fun<? super T, CompletableFuture<U>> fn,
CompletableFuture<U> dst,
Executor executor) {
this.src = src; this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final Fun<? super T, CompletableFuture<U>> fn;
final CompletableFuture<U> dst;
Object r; T t; Throwable ex; Executor e;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
CompletableFuture<U> c = null;
U u = null;
boolean complete = false;
if (ex == null) {
if ((e = executor) != null)
e.execute(new AsyncCompose<T,U>(t, fn, dst));
else {
try {
if ((c = fn.apply(t)) == null)
ex = new NullPointerException();
} catch (Throwable rex) {
ex = rex;
}
}
}
if (c != null) {
ThenCopy<U> d = null;
Object s;
if ((s = c.result) == null) {
CompletionNode p = new CompletionNode
(d = new ThenCopy<U>(c, dst));
while ((s = c.result) == null) {
if (UNSAFE.compareAndSwapObject
(c, COMPLETIONS, p.next = c.completions, p))
break;
}
}
if (s != null && (d == null || d.compareAndSet(0, 1))) {
complete = true;
if (s instanceof AltResult) {
ex = ((AltResult)s).ex; // no rewrap
u = null;
}
else {
@SuppressWarnings("unchecked") U us = (U) s;
u = us;
}
}
}
if (complete || ex != null)
dst.internalComplete(u, ex);
if (c != null)
c.helpPostComplete();
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
// public methods
/**
* Creates a new incomplete CompletableFuture.
*/
public CompletableFuture() {
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* by a task running in the {@link ForkJoinPool#commonPool()} with
* the value obtained by calling the given Generator.
*
* @param supplier a function returning the value to be used
* to complete the returned CompletableFuture
* @param <U> the function's return type
* @return the new CompletableFuture
*/
public static <U> CompletableFuture<U> supplyAsync(Generator<U> supplier) {
if (supplier == null) throw new NullPointerException();
CompletableFuture<U> f = new CompletableFuture<U>();
ForkJoinPool.commonPool().
execute((ForkJoinTask<?>)new AsyncSupply<U>(supplier, f));
return f;
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* by a task running in the given executor with the value obtained
* by calling the given Generator.
*
* @param supplier a function returning the value to be used
* to complete the returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @param <U> the function's return type
* @return the new CompletableFuture
*/
public static <U> CompletableFuture<U> supplyAsync(Generator<U> supplier,
Executor executor) {
if (executor == null || supplier == null)
throw new NullPointerException();
CompletableFuture<U> f = new CompletableFuture<U>();
executor.execute(new AsyncSupply<U>(supplier, f));
return f;
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* by a task running in the {@link ForkJoinPool#commonPool()} after
* it runs the given action.
*
* @param runnable the action to run before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public static CompletableFuture<Void> runAsync(Runnable runnable) {
if (runnable == null) throw new NullPointerException();
CompletableFuture<Void> f = new CompletableFuture<Void>();
ForkJoinPool.commonPool().
execute((ForkJoinTask<?>)new AsyncRun(runnable, f));
return f;
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* by a task running in the given executor after it runs the given
* action.
*
* @param runnable the action to run before completing the
* returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public static CompletableFuture<Void> runAsync(Runnable runnable,
Executor executor) {
if (executor == null || runnable == null)
throw new NullPointerException();
CompletableFuture<Void> f = new CompletableFuture<Void>();
executor.execute(new AsyncRun(runnable, f));
return f;
}
/**
* Returns a new CompletableFuture that is already completed with
* the given value.
*
* @param value the value
* @param <U> the type of the value
* @return the completed CompletableFuture
*/
public static <U> CompletableFuture<U> completedFuture(U value) {
CompletableFuture<U> f = new CompletableFuture<U>();
f.result = (value == null) ? NIL : value;
return f;
}
/**
* Returns {@code true} if completed in any fashion: normally,
* exceptionally, or via cancellation.
*
* @return {@code true} if completed
*/
public boolean isDone() {
return result != null;
}
/**
* Waits if necessary for this future to complete, and then
* returns its result.
*
* @return the result value
* @throws CancellationException if this future was cancelled
* @throws ExecutionException if this future completed exceptionally
* @throws InterruptedException if the current thread was interrupted
* while waiting
*/
public T get() throws InterruptedException, ExecutionException {
Object r; Throwable ex, cause;
if ((r = result) == null && (r = waitingGet(true)) == null)
throw new InterruptedException();
if (!(r instanceof AltResult)) {
@SuppressWarnings("unchecked") T tr = (T) r;
return tr;
}
if ((ex = ((AltResult)r).ex) == null)
return null;
if (ex instanceof CancellationException)
throw (CancellationException)ex;
if ((ex instanceof CompletionException) &&
(cause = ex.getCause()) != null)
ex = cause;
throw new ExecutionException(ex);
}
/**
* Waits if necessary for at most the given time for this future
* to complete, and then returns its result, if available.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return the result value
* @throws CancellationException if this future was cancelled
* @throws ExecutionException if this future completed exceptionally
* @throws InterruptedException if the current thread was interrupted
* while waiting
* @throws TimeoutException if the wait timed out
*/
public T get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
Object r; Throwable ex, cause;
long nanos = unit.toNanos(timeout);
if (Thread.interrupted())
throw new InterruptedException();
if ((r = result) == null)
r = timedAwaitDone(nanos);
if (!(r instanceof AltResult)) {
@SuppressWarnings("unchecked") T tr = (T) r;
return tr;
}
if ((ex = ((AltResult)r).ex) == null)
return null;
if (ex instanceof CancellationException)
throw (CancellationException)ex;
if ((ex instanceof CompletionException) &&
(cause = ex.getCause()) != null)
ex = cause;
throw new ExecutionException(ex);
}
/**
* Returns the result value when complete, or throws an
* (unchecked) exception if completed exceptionally. To better
* conform with the use of common functional forms, if a
* computation involved in the completion of this
* CompletableFuture threw an exception, this method throws an
* (unchecked) {@link CompletionException} with the underlying
* exception as its cause.
*
* @return the result value
* @throws CancellationException if the computation was cancelled
* @throws CompletionException if this future completed
* exceptionally or a completion computation threw an exception
*/
public T join() {
Object r; Throwable ex;
if ((r = result) == null)
r = waitingGet(false);
if (!(r instanceof AltResult)) {
@SuppressWarnings("unchecked") T tr = (T) r;
return tr;
}
if ((ex = ((AltResult)r).ex) == null)
return null;
if (ex instanceof CancellationException)
throw (CancellationException)ex;
if (ex instanceof CompletionException)
throw (CompletionException)ex;
throw new CompletionException(ex);
}
/**
* Returns the result value (or throws any encountered exception)
* if completed, else returns the given valueIfAbsent.
*
* @param valueIfAbsent the value to return if not completed
* @return the result value, if completed, else the given valueIfAbsent
* @throws CancellationException if the computation was cancelled
* @throws CompletionException if this future completed
* exceptionally or a completion computation threw an exception
*/
public T getNow(T valueIfAbsent) {
Object r; Throwable ex;
if ((r = result) == null)
return valueIfAbsent;
if (!(r instanceof AltResult)) {
@SuppressWarnings("unchecked") T tr = (T) r;
return tr;
}
if ((ex = ((AltResult)r).ex) == null)
return null;
if (ex instanceof CancellationException)
throw (CancellationException)ex;
if (ex instanceof CompletionException)
throw (CompletionException)ex;
throw new CompletionException(ex);
}
/**
* If not already completed, sets the value returned by {@link
* #get()} and related methods to the given value.
*
* @param value the result value
* @return {@code true} if this invocation caused this CompletableFuture
* to transition to a completed state, else {@code false}
*/
public boolean complete(T value) {
boolean triggered = result == null &&
UNSAFE.compareAndSwapObject(this, RESULT, null,
value == null ? NIL : value);
postComplete();
return triggered;
}
/**
* If not already completed, causes invocations of {@link #get()}
* and related methods to throw the given exception.
*
* @param ex the exception
* @return {@code true} if this invocation caused this CompletableFuture
* to transition to a completed state, else {@code false}
*/
public boolean completeExceptionally(Throwable ex) {
if (ex == null) throw new NullPointerException();
boolean triggered = result == null &&
UNSAFE.compareAndSwapObject(this, RESULT, null, new AltResult(ex));
postComplete();
return triggered;
}
/**
* Returns a new CompletableFuture that is completed
* when this CompletableFuture completes, with the result of the
* given function of this CompletableFuture's result.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied function throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @return the new CompletableFuture
*/
public <U> CompletableFuture<U> thenApply(Fun<? super T,? extends U> fn) {
return doThenApply(fn, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when this CompletableFuture completes, with the result of the
* given function of this CompletableFuture's result from a
* task running in the {@link ForkJoinPool#commonPool()}.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied function throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @return the new CompletableFuture
*/
public <U> CompletableFuture<U> thenApplyAsync
(Fun<? super T,? extends U> fn) {
return doThenApply(fn, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when this CompletableFuture completes, with the result of the
* given function of this CompletableFuture's result from a
* task running in the given executor.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied function throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public <U> CompletableFuture<U> thenApplyAsync
(Fun<? super T,? extends U> fn,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doThenApply(fn, executor);
}
private <U> CompletableFuture<U> doThenApply
(Fun<? super T,? extends U> fn,
Executor e) {
if (fn == null) throw new NullPointerException();
CompletableFuture<U> dst = new CompletableFuture<U>();
ThenApply<T,U> d = null;
Object r;
if ((r = result) == null) {
CompletionNode p = new CompletionNode
(d = new ThenApply<T,U>(this, fn, dst, e));
while ((r = result) == null) {
if (UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p))
break;
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
T t; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
U u = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncApply<T,U>(t, fn, dst));
else
u = fn.apply(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(u, ex);
}
helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when this CompletableFuture completes, after performing the given
* action with this CompletableFuture's result.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied action throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param block the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> thenAccept(Action<? super T> block) {
return doThenAccept(block, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when this CompletableFuture completes, after performing the given
* action with this CompletableFuture's result from a task running
* in the {@link ForkJoinPool#commonPool()}.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied action throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param block the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> thenAcceptAsync(Action<? super T> block) {
return doThenAccept(block, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when this CompletableFuture completes, after performing the given
* action with this CompletableFuture's result from a task running
* in the given executor.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied action throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param block the action to perform before completing the
* returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public CompletableFuture<Void> thenAcceptAsync(Action<? super T> block,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doThenAccept(block, executor);
}
private CompletableFuture<Void> doThenAccept(Action<? super T> fn,
Executor e) {
if (fn == null) throw new NullPointerException();
CompletableFuture<Void> dst = new CompletableFuture<Void>();
ThenAccept<T> d = null;
Object r;
if ((r = result) == null) {
CompletionNode p = new CompletionNode
(d = new ThenAccept<T>(this, fn, dst, e));
while ((r = result) == null) {
if (UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p))
break;
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
T t; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAccept<T>(t, fn, dst));
else
fn.accept(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when this CompletableFuture completes, after performing the given
* action.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied action throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param action the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> thenRun(Runnable action) {
return doThenRun(action, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when this CompletableFuture completes, after performing the given
* action from a task running in the {@link ForkJoinPool#commonPool()}.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied action throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param action the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> thenRunAsync(Runnable action) {
return doThenRun(action, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when this CompletableFuture completes, after performing the given
* action from a task running in the given executor.
*
* <p>If this CompletableFuture completes exceptionally, or the
* supplied action throws an exception, then the returned
* CompletableFuture completes exceptionally with a
* CompletionException holding the exception as its cause.
*
* @param action the action to perform before completing the
* returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public CompletableFuture<Void> thenRunAsync(Runnable action,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doThenRun(action, executor);
}
private CompletableFuture<Void> doThenRun(Runnable action,
Executor e) {
if (action == null) throw new NullPointerException();
CompletableFuture<Void> dst = new CompletableFuture<Void>();
ThenRun d = null;
Object r;
if ((r = result) == null) {
CompletionNode p = new CompletionNode
(d = new ThenRun(this, action, dst, e));
while ((r = result) == null) {
if (UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p))
break;
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
Throwable ex;
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(action, dst));
else
action.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when both this and the other given CompletableFuture complete,
* with the result of the given function of the results of the two
* CompletableFutures.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied function throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @return the new CompletableFuture
*/
public <U,V> CompletableFuture<V> thenCombine
(CompletableFuture<? extends U> other,
BiFun<? super T,? super U,? extends V> fn) {
return doThenCombine(other, fn, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when both this and the other given CompletableFuture complete,
* with the result of the given function of the results of the two
* CompletableFutures from a task running in the
* {@link ForkJoinPool#commonPool()}.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied function throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @return the new CompletableFuture
*/
public <U,V> CompletableFuture<V> thenCombineAsync
(CompletableFuture<? extends U> other,
BiFun<? super T,? super U,? extends V> fn) {
return doThenCombine(other, fn, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when both this and the other given CompletableFuture complete,
* with the result of the given function of the results of the two
* CompletableFutures from a task running in the given executor.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied function throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public <U,V> CompletableFuture<V> thenCombineAsync
(CompletableFuture<? extends U> other,
BiFun<? super T,? super U,? extends V> fn,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doThenCombine(other, fn, executor);
}
private <U,V> CompletableFuture<V> doThenCombine
(CompletableFuture<? extends U> other,
BiFun<? super T,? super U,? extends V> fn,
Executor e) {
if (other == null || fn == null) throw new NullPointerException();
CompletableFuture<V> dst = new CompletableFuture<V>();
ThenCombine<T,U,V> d = null;
Object r, s = null;
if ((r = result) == null || (s = other.result) == null) {
d = new ThenCombine<T,U,V>(this, other, fn, dst, e);
CompletionNode q = null, p = new CompletionNode(d);
while ((r == null && (r = result) == null) ||
(s == null && (s = other.result) == null)) {
if (q != null) {
if (s != null ||
UNSAFE.compareAndSwapObject
(other, COMPLETIONS, q.next = other.completions, q))
break;
}
else if (r != null ||
UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p)) {
if (s != null)
break;
q = new CompletionNode(d);
}
}
}
if (r != null && s != null && (d == null || d.compareAndSet(0, 1))) {
T t; U u; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex != null)
u = null;
else if (s instanceof AltResult) {
ex = ((AltResult)s).ex;
u = null;
}
else {
@SuppressWarnings("unchecked") U us = (U) s;
u = us;
}
V v = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncCombine<T,U,V>(t, u, fn, dst));
else
v = fn.apply(t, u);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(v, ex);
}
helpPostComplete();
other.helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when both this and the other given CompletableFuture complete,
* after performing the given action with the results of the two
* CompletableFutures.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied action throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param block the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public <U> CompletableFuture<Void> thenAcceptBoth
(CompletableFuture<? extends U> other,
BiAction<? super T, ? super U> block) {
return doThenAcceptBoth(other, block, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when both this and the other given CompletableFuture complete,
* after performing the given action with the results of the two
* CompletableFutures from a task running in the {@link
* ForkJoinPool#commonPool()}.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied action throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param block the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public <U> CompletableFuture<Void> thenAcceptBothAsync
(CompletableFuture<? extends U> other,
BiAction<? super T, ? super U> block) {
return doThenAcceptBoth(other, block, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when both this and the other given CompletableFuture complete,
* after performing the given action with the results of the two
* CompletableFutures from a task running in the given executor.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied action throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param block the action to perform before completing the
* returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public <U> CompletableFuture<Void> thenAcceptBothAsync
(CompletableFuture<? extends U> other,
BiAction<? super T, ? super U> block,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doThenAcceptBoth(other, block, executor);
}
private <U> CompletableFuture<Void> doThenAcceptBoth
(CompletableFuture<? extends U> other,
BiAction<? super T,? super U> fn,
Executor e) {
if (other == null || fn == null) throw new NullPointerException();
CompletableFuture<Void> dst = new CompletableFuture<Void>();
ThenAcceptBoth<T,U> d = null;
Object r, s = null;
if ((r = result) == null || (s = other.result) == null) {
d = new ThenAcceptBoth<T,U>(this, other, fn, dst, e);
CompletionNode q = null, p = new CompletionNode(d);
while ((r == null && (r = result) == null) ||
(s == null && (s = other.result) == null)) {
if (q != null) {
if (s != null ||
UNSAFE.compareAndSwapObject
(other, COMPLETIONS, q.next = other.completions, q))
break;
}
else if (r != null ||
UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p)) {
if (s != null)
break;
q = new CompletionNode(d);
}
}
}
if (r != null && s != null && (d == null || d.compareAndSet(0, 1))) {
T t; U u; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex != null)
u = null;
else if (s instanceof AltResult) {
ex = ((AltResult)s).ex;
u = null;
}
else {
@SuppressWarnings("unchecked") U us = (U) s;
u = us;
}
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAcceptBoth<T,U>(t, u, fn, dst));
else
fn.accept(t, u);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
helpPostComplete();
other.helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when both this and the other given CompletableFuture complete,
* after performing the given action.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied action throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param action the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> runAfterBoth(CompletableFuture<?> other,
Runnable action) {
return doRunAfterBoth(other, action, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when both this and the other given CompletableFuture complete,
* after performing the given action from a task running in the
* {@link ForkJoinPool#commonPool()}.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied action throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param action the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> runAfterBothAsync(CompletableFuture<?> other,
Runnable action) {
return doRunAfterBoth(other, action, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when both this and the other given CompletableFuture complete,
* after performing the given action from a task running in the
* given executor.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, or the supplied action throws an exception,
* then the returned CompletableFuture completes exceptionally
* with a CompletionException holding the exception as its cause.
*
* @param other the other CompletableFuture
* @param action the action to perform before completing the
* returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public CompletableFuture<Void> runAfterBothAsync(CompletableFuture<?> other,
Runnable action,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doRunAfterBoth(other, action, executor);
}
private CompletableFuture<Void> doRunAfterBoth(CompletableFuture<?> other,
Runnable action,
Executor e) {
if (other == null || action == null) throw new NullPointerException();
CompletableFuture<Void> dst = new CompletableFuture<Void>();
RunAfterBoth d = null;
Object r, s = null;
if ((r = result) == null || (s = other.result) == null) {
d = new RunAfterBoth(this, other, action, dst, e);
CompletionNode q = null, p = new CompletionNode(d);
while ((r == null && (r = result) == null) ||
(s == null && (s = other.result) == null)) {
if (q != null) {
if (s != null ||
UNSAFE.compareAndSwapObject
(other, COMPLETIONS, q.next = other.completions, q))
break;
}
else if (r != null ||
UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p)) {
if (s != null)
break;
q = new CompletionNode(d);
}
}
}
if (r != null && s != null && (d == null || d.compareAndSet(0, 1))) {
Throwable ex;
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null && (s instanceof AltResult))
ex = ((AltResult)s).ex;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(action, dst));
else
action.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
helpPostComplete();
other.helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when either this or the other given CompletableFuture completes,
* with the result of the given function of either this or the other
* CompletableFuture's result.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied function
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @return the new CompletableFuture
*/
public <U> CompletableFuture<U> applyToEither
(CompletableFuture<? extends T> other,
Fun<? super T, U> fn) {
return doApplyToEither(other, fn, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when either this or the other given CompletableFuture completes,
* with the result of the given function of either this or the other
* CompletableFuture's result from a task running in the
* {@link ForkJoinPool#commonPool()}.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied function
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @return the new CompletableFuture
*/
public <U> CompletableFuture<U> applyToEitherAsync
(CompletableFuture<? extends T> other,
Fun<? super T, U> fn) {
return doApplyToEither(other, fn, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when either this or the other given CompletableFuture completes,
* with the result of the given function of either this or the other
* CompletableFuture's result from a task running in the
* given executor.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied function
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param fn the function to use to compute the value of
* the returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public <U> CompletableFuture<U> applyToEitherAsync
(CompletableFuture<? extends T> other,
Fun<? super T, U> fn,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doApplyToEither(other, fn, executor);
}
private <U> CompletableFuture<U> doApplyToEither
(CompletableFuture<? extends T> other,
Fun<? super T, U> fn,
Executor e) {
if (other == null || fn == null) throw new NullPointerException();
CompletableFuture<U> dst = new CompletableFuture<U>();
ApplyToEither<T,U> d = null;
Object r;
if ((r = result) == null && (r = other.result) == null) {
d = new ApplyToEither<T,U>(this, other, fn, dst, e);
CompletionNode q = null, p = new CompletionNode(d);
while ((r = result) == null && (r = other.result) == null) {
if (q != null) {
if (UNSAFE.compareAndSwapObject
(other, COMPLETIONS, q.next = other.completions, q))
break;
}
else if (UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p))
q = new CompletionNode(d);
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
T t; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
U u = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncApply<T,U>(t, fn, dst));
else
u = fn.apply(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(u, ex);
}
helpPostComplete();
other.helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when either this or the other given CompletableFuture completes,
* after performing the given action with the result of either this
* or the other CompletableFuture's result.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied action
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param block the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> acceptEither
(CompletableFuture<? extends T> other,
Action<? super T> block) {
return doAcceptEither(other, block, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when either this or the other given CompletableFuture completes,
* after performing the given action with the result of either this
* or the other CompletableFuture's result from a task running in
* the {@link ForkJoinPool#commonPool()}.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied action
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param block the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> acceptEitherAsync
(CompletableFuture<? extends T> other,
Action<? super T> block) {
return doAcceptEither(other, block, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when either this or the other given CompletableFuture completes,
* after performing the given action with the result of either this
* or the other CompletableFuture's result from a task running in
* the given executor.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied action
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param block the action to perform before completing the
* returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public CompletableFuture<Void> acceptEitherAsync
(CompletableFuture<? extends T> other,
Action<? super T> block,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doAcceptEither(other, block, executor);
}
private CompletableFuture<Void> doAcceptEither
(CompletableFuture<? extends T> other,
Action<? super T> fn,
Executor e) {
if (other == null || fn == null) throw new NullPointerException();
CompletableFuture<Void> dst = new CompletableFuture<Void>();
AcceptEither<T> d = null;
Object r;
if ((r = result) == null && (r = other.result) == null) {
d = new AcceptEither<T>(this, other, fn, dst, e);
CompletionNode q = null, p = new CompletionNode(d);
while ((r = result) == null && (r = other.result) == null) {
if (q != null) {
if (UNSAFE.compareAndSwapObject
(other, COMPLETIONS, q.next = other.completions, q))
break;
}
else if (UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p))
q = new CompletionNode(d);
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
T t; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAccept<T>(t, fn, dst));
else
fn.accept(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
helpPostComplete();
other.helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed
* when either this or the other given CompletableFuture completes,
* after performing the given action.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied action
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param action the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> runAfterEither(CompletableFuture<?> other,
Runnable action) {
return doRunAfterEither(other, action, null);
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when either this or the other given CompletableFuture completes,
* after performing the given action from a task running in the
* {@link ForkJoinPool#commonPool()}.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied action
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param action the action to perform before completing the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public CompletableFuture<Void> runAfterEitherAsync
(CompletableFuture<?> other,
Runnable action) {
return doRunAfterEither(other, action, ForkJoinPool.commonPool());
}
/**
* Returns a new CompletableFuture that is asynchronously completed
* when either this or the other given CompletableFuture completes,
* after performing the given action from a task running in the
* given executor.
*
* <p>If this and/or the other CompletableFuture complete
* exceptionally, then the returned CompletableFuture may also do so,
* with a CompletionException holding one of these exceptions as its
* cause. No guarantees are made about which result or exception is
* used in the returned CompletableFuture. If the supplied action
* throws an exception, then the returned CompletableFuture completes
* exceptionally with a CompletionException holding the exception as
* its cause.
*
* @param other the other CompletableFuture
* @param action the action to perform before completing the
* returned CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the new CompletableFuture
*/
public CompletableFuture<Void> runAfterEitherAsync
(CompletableFuture<?> other,
Runnable action,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doRunAfterEither(other, action, executor);
}
private CompletableFuture<Void> doRunAfterEither
(CompletableFuture<?> other,
Runnable action,
Executor e) {
if (other == null || action == null) throw new NullPointerException();
CompletableFuture<Void> dst = new CompletableFuture<Void>();
RunAfterEither d = null;
Object r;
if ((r = result) == null && (r = other.result) == null) {
d = new RunAfterEither(this, other, action, dst, e);
CompletionNode q = null, p = new CompletionNode(d);
while ((r = result) == null && (r = other.result) == null) {
if (q != null) {
if (UNSAFE.compareAndSwapObject
(other, COMPLETIONS, q.next = other.completions, q))
break;
}
else if (UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p))
q = new CompletionNode(d);
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
Throwable ex;
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(action, dst));
else
action.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
helpPostComplete();
other.helpPostComplete();
return dst;
}
/**
* Returns a CompletableFuture that upon completion, has the same
* value as produced by the given function of the result of this
* CompletableFuture.
*
* <p>If this CompletableFuture completes exceptionally, then the
* returned CompletableFuture also does so, with a
* CompletionException holding this exception as its cause.
* Similarly, if the computed CompletableFuture completes
* exceptionally, then so does the returned CompletableFuture.
*
* @param fn the function returning a new CompletableFuture
* @return the CompletableFuture
*/
public <U> CompletableFuture<U> thenCompose
(Fun<? super T, CompletableFuture<U>> fn) {
return doThenCompose(fn, null);
}
/**
* Returns a CompletableFuture that upon completion, has the same
* value as that produced asynchronously using the {@link
* ForkJoinPool#commonPool()} by the given function of the result
* of this CompletableFuture.
*
* <p>If this CompletableFuture completes exceptionally, then the
* returned CompletableFuture also does so, with a
* CompletionException holding this exception as its cause.
* Similarly, if the computed CompletableFuture completes
* exceptionally, then so does the returned CompletableFuture.
*
* @param fn the function returning a new CompletableFuture
* @return the CompletableFuture
*/
public <U> CompletableFuture<U> thenComposeAsync
(Fun<? super T, CompletableFuture<U>> fn) {
return doThenCompose(fn, ForkJoinPool.commonPool());
}
/**
* Returns a CompletableFuture that upon completion, has the same
* value as that produced asynchronously using the given executor
* by the given function of this CompletableFuture.
*
* <p>If this CompletableFuture completes exceptionally, then the
* returned CompletableFuture also does so, with a
* CompletionException holding this exception as its cause.
* Similarly, if the computed CompletableFuture completes
* exceptionally, then so does the returned CompletableFuture.
*
* @param fn the function returning a new CompletableFuture
* @param executor the executor to use for asynchronous execution
* @return the CompletableFuture
*/
public <U> CompletableFuture<U> thenComposeAsync
(Fun<? super T, CompletableFuture<U>> fn,
Executor executor) {
if (executor == null) throw new NullPointerException();
return doThenCompose(fn, executor);
}
private <U> CompletableFuture<U> doThenCompose
(Fun<? super T, CompletableFuture<U>> fn,
Executor e) {
if (fn == null) throw new NullPointerException();
CompletableFuture<U> dst = null;
ThenCompose<T,U> d = null;
Object r;
if ((r = result) == null) {
dst = new CompletableFuture<U>();
CompletionNode p = new CompletionNode
(d = new ThenCompose<T,U>(this, fn, dst, e));
while ((r = result) == null) {
if (UNSAFE.compareAndSwapObject
(this, COMPLETIONS, p.next = completions, p))
break;
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
T t; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
if (ex == null) {
if (e != null) {
if (dst == null)
dst = new CompletableFuture<U>();
e.execute(new AsyncCompose<T,U>(t, fn, dst));
}
else {
try {
if ((dst = fn.apply(t)) == null)
ex = new NullPointerException();
} catch (Throwable rex) {
ex = rex;
}
}
}
if (dst == null)
dst = new CompletableFuture<U>();
if (ex != null)
dst.internalComplete(null, ex);
}
helpPostComplete();
dst.helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed when this
* CompletableFuture completes, with the result of the given
* function of the exception triggering this CompletableFuture's
* completion when it completes exceptionally; otherwise, if this
* CompletableFuture completes normally, then the returned
* CompletableFuture also completes normally with the same value.
*
* @param fn the function to use to compute the value of the
* returned CompletableFuture if this CompletableFuture completed
* exceptionally
* @return the new CompletableFuture
*/
public CompletableFuture<T> exceptionally
(Fun<Throwable, ? extends T> fn) {
if (fn == null) throw new NullPointerException();
CompletableFuture<T> dst = new CompletableFuture<T>();
ExceptionCompletion<T> d = null;
Object r;
if ((r = result) == null) {
CompletionNode p =
new CompletionNode(d = new ExceptionCompletion<T>(this, fn, dst));
while ((r = result) == null) {
if (UNSAFE.compareAndSwapObject(this, COMPLETIONS,
p.next = completions, p))
break;
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
T t = null; Throwable ex, dx = null;
if (r instanceof AltResult) {
if ((ex = ((AltResult)r).ex) != null) {
try {
t = fn.apply(ex);
} catch (Throwable rex) {
dx = rex;
}
}
}
else {
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
dst.internalComplete(t, dx);
}
helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed when this
* CompletableFuture completes, with the result of the given
* function of the result and exception of this CompletableFuture's
* completion. The given function is invoked with the result (or
* {@code null} if none) and the exception (or {@code null} if none)
* of this CompletableFuture when complete.
*
* @param fn the function to use to compute the value of the
* returned CompletableFuture
* @return the new CompletableFuture
*/
public <U> CompletableFuture<U> handle
(BiFun<? super T, Throwable, ? extends U> fn) {
if (fn == null) throw new NullPointerException();
CompletableFuture<U> dst = new CompletableFuture<U>();
HandleCompletion<T,U> d = null;
Object r;
if ((r = result) == null) {
CompletionNode p =
new CompletionNode(d = new HandleCompletion<T,U>(this, fn, dst));
while ((r = result) == null) {
if (UNSAFE.compareAndSwapObject(this, COMPLETIONS,
p.next = completions, p))
break;
}
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
T t; Throwable ex;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
U u; Throwable dx;
try {
u = fn.apply(t, ex);
dx = null;
} catch (Throwable rex) {
dx = rex;
u = null;
}
dst.internalComplete(u, dx);
}
helpPostComplete();
return dst;
}
/* ------------- Arbitrary-arity constructions -------------- */
/*
* The basic plan of attack is to recursively form binary
* completion trees of elements. This can be overkill for small
* sets, but scales nicely. The And/All vs Or/Any forms use the
* same idea, but details differ.
*/
/**
* Returns a new CompletableFuture that is completed when all of
* the given CompletableFutures complete. If any of the given
* CompletableFutures complete exceptionally, then the returned
* CompletableFuture also does so, with a CompletionException
* holding this exception as its cause. Otherwise, the results,
* if any, of the given CompletableFutures are not reflected in
* the returned CompletableFuture, but may be obtained by
* inspecting them individually. If no CompletableFutures are
* provided, returns a CompletableFuture completed with the value
* {@code null}.
*
* <p>Among the applications of this method is to await completion
* of a set of independent CompletableFutures before continuing a
* program, as in: {@code CompletableFuture.allOf(c1, c2,
* c3).join();}.
*
* @param cfs the CompletableFutures
* @return a new CompletableFuture that is completed when all of the
* given CompletableFutures complete
* @throws NullPointerException if the array or any of its elements are
* {@code null}
*/
public static CompletableFuture<Void> allOf(CompletableFuture<?>... cfs) {
int len = cfs.length; // Directly handle empty and singleton cases
if (len > 1)
return allTree(cfs, 0, len - 1);
else {
CompletableFuture<Void> dst = new CompletableFuture<Void>();
CompletableFuture<?> f;
if (len == 0)
dst.result = NIL;
else if ((f = cfs[0]) == null)
throw new NullPointerException();
else {
ThenPropagate d = null;
CompletionNode p = null;
Object r;
while ((r = f.result) == null) {
if (d == null)
d = new ThenPropagate(f, dst);
else if (p == null)
p = new CompletionNode(d);
else if (UNSAFE.compareAndSwapObject
(f, COMPLETIONS, p.next = f.completions, p))
break;
}
if (r != null && (d == null || d.compareAndSet(0, 1)))
dst.internalComplete(null, (r instanceof AltResult) ?
((AltResult)r).ex : null);
f.helpPostComplete();
}
return dst;
}
}
/**
* Recursively constructs an And'ed tree of CompletableFutures.
* Called only when array known to have at least two elements.
*/
private static CompletableFuture<Void> allTree(CompletableFuture<?>[] cfs,
int lo, int hi) {
CompletableFuture<?> fst, snd;
int mid = (lo + hi) >>> 1;
if ((fst = (lo == mid ? cfs[lo] : allTree(cfs, lo, mid))) == null ||
(snd = (hi == mid+1 ? cfs[hi] : allTree(cfs, mid+1, hi))) == null)
throw new NullPointerException();
CompletableFuture<Void> dst = new CompletableFuture<Void>();
AndCompletion d = null;
CompletionNode p = null, q = null;
Object r = null, s = null;
while ((r = fst.result) == null || (s = snd.result) == null) {
if (d == null)
d = new AndCompletion(fst, snd, dst);
else if (p == null)
p = new CompletionNode(d);
else if (q == null) {
if (UNSAFE.compareAndSwapObject
(fst, COMPLETIONS, p.next = fst.completions, p))
q = new CompletionNode(d);
}
else if (UNSAFE.compareAndSwapObject
(snd, COMPLETIONS, q.next = snd.completions, q))
break;
}
if ((r != null || (r = fst.result) != null) &&
(s != null || (s = snd.result) != null) &&
(d == null || d.compareAndSet(0, 1))) {
Throwable ex;
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null && (s instanceof AltResult))
ex = ((AltResult)s).ex;
dst.internalComplete(null, ex);
}
fst.helpPostComplete();
snd.helpPostComplete();
return dst;
}
/**
* Returns a new CompletableFuture that is completed when any of
* the given CompletableFutures complete, with the same result.
* Otherwise, if it completed exceptionally, the returned
* CompletableFuture also does so, with a CompletionException
* holding this exception as its cause. If no CompletableFutures
* are provided, returns an incomplete CompletableFuture.
*
* @param cfs the CompletableFutures
* @return a new CompletableFuture that is completed with the
* result or exception of any of the given CompletableFutures when
* one completes
* @throws NullPointerException if the array or any of its elements are
* {@code null}
*/
public static CompletableFuture<Object> anyOf(CompletableFuture<?>... cfs) {
int len = cfs.length; // Same idea as allOf
if (len > 1)
return anyTree(cfs, 0, len - 1);
else {
CompletableFuture<Object> dst = new CompletableFuture<Object>();
CompletableFuture<?> f;
if (len == 0)
; // skip
else if ((f = cfs[0]) == null)
throw new NullPointerException();
else {
ThenCopy<Object> d = null;
CompletionNode p = null;
Object r;
while ((r = f.result) == null) {
if (d == null)
d = new ThenCopy<Object>(f, dst);
else if (p == null)
p = new CompletionNode(d);
else if (UNSAFE.compareAndSwapObject
(f, COMPLETIONS, p.next = f.completions, p))
break;
}
if (r != null && (d == null || d.compareAndSet(0, 1))) {
Throwable ex; Object t;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
t = r;
}
dst.internalComplete(t, ex);
}
f.helpPostComplete();
}
return dst;
}
}
/**
* Recursively constructs an Or'ed tree of CompletableFutures.
*/
private static CompletableFuture<Object> anyTree(CompletableFuture<?>[] cfs,
int lo, int hi) {
CompletableFuture<?> fst, snd;
int mid = (lo + hi) >>> 1;
if ((fst = (lo == mid ? cfs[lo] : anyTree(cfs, lo, mid))) == null ||
(snd = (hi == mid+1 ? cfs[hi] : anyTree(cfs, mid+1, hi))) == null)
throw new NullPointerException();
CompletableFuture<Object> dst = new CompletableFuture<Object>();
OrCompletion d = null;
CompletionNode p = null, q = null;
Object r;
while ((r = fst.result) == null && (r = snd.result) == null) {
if (d == null)
d = new OrCompletion(fst, snd, dst);
else if (p == null)
p = new CompletionNode(d);
else if (q == null) {
if (UNSAFE.compareAndSwapObject
(fst, COMPLETIONS, p.next = fst.completions, p))
q = new CompletionNode(d);
}
else if (UNSAFE.compareAndSwapObject
(snd, COMPLETIONS, q.next = snd.completions, q))
break;
}
if ((r != null || (r = fst.result) != null ||
(r = snd.result) != null) &&
(d == null || d.compareAndSet(0, 1))) {
Throwable ex; Object t;
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
t = r;
}
dst.internalComplete(t, ex);
}
fst.helpPostComplete();
snd.helpPostComplete();
return dst;
}
/* ------------- Control and status methods -------------- */
/**
* If not already completed, completes this CompletableFuture with
* a {@link CancellationException}. Dependent CompletableFutures
* that have not already completed will also complete
* exceptionally, with a {@link CompletionException} caused by
* this {@code CancellationException}.
*
* @param mayInterruptIfRunning this value has no effect in this
* implementation because interrupts are not used to control
* processing.
*
* @return {@code true} if this task is now cancelled
*/
public boolean cancel(boolean mayInterruptIfRunning) {
boolean cancelled = (result == null) &&
UNSAFE.compareAndSwapObject
(this, RESULT, null, new AltResult(new CancellationException()));
postComplete();
return cancelled || isCancelled();
}
/**
* Returns {@code true} if this CompletableFuture was cancelled
* before it completed normally.
*
* @return {@code true} if this CompletableFuture was cancelled
* before it completed normally
*/
public boolean isCancelled() {
Object r;
return ((r = result) instanceof AltResult) &&
(((AltResult)r).ex instanceof CancellationException);
}
/**
* Forcibly sets or resets the value subsequently returned by
* method {@link #get()} and related methods, whether or not
* already completed. This method is designed for use only in
* error recovery actions, and even in such situations may result
* in ongoing dependent completions using established versus
* overwritten outcomes.
*
* @param value the completion value
*/
public void obtrudeValue(T value) {
result = (value == null) ? NIL : value;
postComplete();
}
/**
* Forcibly causes subsequent invocations of method {@link #get()}
* and related methods to throw the given exception, whether or
* not already completed. This method is designed for use only in
* recovery actions, and even in such situations may result in
* ongoing dependent completions using established versus
* overwritten outcomes.
*
* @param ex the exception
*/
public void obtrudeException(Throwable ex) {
if (ex == null) throw new NullPointerException();
result = new AltResult(ex);
postComplete();
}
/**
* Returns the estimated number of CompletableFutures whose
* completions are awaiting completion of this CompletableFuture.
* This method is designed for use in monitoring system state, not
* for synchronization control.
*
* @return the number of dependent CompletableFutures
*/
public int getNumberOfDependents() {
int count = 0;
for (CompletionNode p = completions; p != null; p = p.next)
++count;
return count;
}
/**
* Returns a string identifying this CompletableFuture, as well as
* its completion state. The state, in brackets, contains the
* String {@code "Completed Normally"} or the String {@code
* "Completed Exceptionally"}, or the String {@code "Not
* completed"} followed by the number of CompletableFutures
* dependent upon its completion, if any.
*
* @return a string identifying this CompletableFuture, as well as its state
*/
public String toString() {
Object r = result;
int count;
return super.toString() +
((r == null) ?
(((count = getNumberOfDependents()) == 0) ?
"[Not completed]" :
"[Not completed, " + count + " dependents]") :
(((r instanceof AltResult) && ((AltResult)r).ex != null) ?
"[Completed exceptionally]" :
"[Completed normally]"));
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long RESULT;
private static final long WAITERS;
private static final long COMPLETIONS;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = CompletableFuture.class;
RESULT = UNSAFE.objectFieldOffset
(k.getDeclaredField("result"));
WAITERS = UNSAFE.objectFieldOffset
(k.getDeclaredField("waiters"));
COMPLETIONS = UNSAFE.objectFieldOffset
(k.getDeclaredField("completions"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
3,804 |
public class BoolQueryParser implements QueryParser {
public static final String NAME = "bool";
@Inject
public BoolQueryParser(Settings settings) {
BooleanQuery.setMaxClauseCount(settings.getAsInt("index.query.bool.max_clause_count", settings.getAsInt("indices.query.bool.max_clause_count", BooleanQuery.getMaxClauseCount())));
}
@Override
public String[] names() {
return new String[]{NAME};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
boolean disableCoord = false;
float boost = 1.0f;
String minimumShouldMatch = null;
List<BooleanClause> clauses = newArrayList();
boolean adjustPureNegative = true;
String queryName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("must".equals(currentFieldName)) {
Query query = parseContext.parseInnerQuery();
if (query != null) {
clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST));
}
} else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
Query query = parseContext.parseInnerQuery();
if (query != null) {
clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST_NOT));
}
} else if ("should".equals(currentFieldName)) {
Query query = parseContext.parseInnerQuery();
if (query != null) {
clauses.add(new BooleanClause(query, BooleanClause.Occur.SHOULD));
}
} else {
throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("must".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Query query = parseContext.parseInnerQuery();
if (query != null) {
clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST));
}
}
} else if ("must_not".equals(currentFieldName) || "mustNot".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Query query = parseContext.parseInnerQuery();
if (query != null) {
clauses.add(new BooleanClause(query, BooleanClause.Occur.MUST_NOT));
}
}
} else if ("should".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Query query = parseContext.parseInnerQuery();
if (query != null) {
clauses.add(new BooleanClause(query, BooleanClause.Occur.SHOULD));
}
}
} else {
throw new QueryParsingException(parseContext.index(), "bool query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) {
disableCoord = parser.booleanValue();
} else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
minimumShouldMatch = parser.textOrNull();
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("minimum_number_should_match".equals(currentFieldName) || "minimumNumberShouldMatch".equals(currentFieldName)) {
minimumShouldMatch = parser.textOrNull();
} else if ("adjust_pure_negative".equals(currentFieldName) || "adjustPureNegative".equals(currentFieldName)) {
adjustPureNegative = parser.booleanValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]");
}
}
}
if (clauses.isEmpty()) {
return null;
}
BooleanQuery booleanQuery = new BooleanQuery(disableCoord);
for (BooleanClause clause : clauses) {
booleanQuery.add(clause);
}
booleanQuery.setBoost(boost);
Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch);
Query query = optimizeQuery(adjustPureNegative ? fixNegativeQueryIfNeeded(booleanQuery) : booleanQuery);
if (queryName != null) {
parseContext.addNamedQuery(queryName, query);
}
return query;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_BoolQueryParser.java
|
5,292 |
public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public DoubleTerms readResult(StreamInput in) throws IOException {
DoubleTerms buckets = new DoubleTerms();
buckets.readFrom(in);
return buckets;
}
};
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_terms_DoubleTerms.java
|
1,182 |
public class OQueryOperatorIn extends OQueryOperatorEqualityNotNulls {
public OQueryOperatorIn() {
super("IN", 5, false);
}
@Override
@SuppressWarnings("unchecked")
protected boolean evaluateExpression(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft,
final Object iRight, OCommandContext iContext) {
if (iLeft instanceof Collection<?>) {
final Collection<Object> sourceCollection = (Collection<Object>) iLeft;
if (iRight instanceof Collection<?>) {
// AGAINST COLLECTION OF ITEMS
final Collection<Object> collectionToMatch = (Collection<Object>) iRight;
boolean found = false;
for (final Object o1 : sourceCollection) {
for (final Object o2 : collectionToMatch) {
if (OQueryOperatorEquals.equals(o1, o2)) {
found = true;
break;
}
}
}
return found;
} else {
// AGAINST SINGLE ITEM
if (sourceCollection instanceof Set<?>)
return sourceCollection.contains(iRight);
for (final Object o : sourceCollection) {
if (OQueryOperatorEquals.equals(iRight, o))
return true;
}
}
} else if (iRight instanceof Collection<?>) {
final Collection<Object> sourceCollection = (Collection<Object>) iRight;
if (sourceCollection instanceof Set<?>)
return sourceCollection.contains(iLeft);
for (final Object o : sourceCollection) {
if (OQueryOperatorEquals.equals(iLeft, o))
return true;
}
} else if (iLeft.getClass().isArray()) {
for (final Object o : (Object[]) iLeft) {
if (OQueryOperatorEquals.equals(iRight, o))
return true;
}
} else if (iRight.getClass().isArray()) {
for (final Object o : (Object[]) iRight) {
if (OQueryOperatorEquals.equals(iLeft, o))
return true;
}
}
return iLeft.equals(iRight);
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
return OIndexReuseType.INDEX_METHOD;
}
@SuppressWarnings("unchecked")
@Override
public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType,
List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) {
final OIndexDefinition indexDefinition = index.getDefinition();
final Object result;
final OIndexInternal<?> internalIndex = index.getInternal();
if (!internalIndex.canBeUsedInEqualityOperators())
return null;
if (indexDefinition.getParamCount() == 1) {
final Object inKeyValue = keyParams.get(0);
final List<Object> inParams;
if (inKeyValue instanceof List<?>)
inParams = (List<Object>) inKeyValue;
else if (inKeyValue instanceof OSQLFilterItem)
inParams = (List<Object>) ((OSQLFilterItem) inKeyValue).getValue(null, iContext);
else
throw new IllegalArgumentException("Key '" + inKeyValue + "' is not valid");
final List<Object> inKeys = new ArrayList<Object>();
boolean containsNotCompatibleKey = false;
for (final Object keyValue : inParams) {
final Object key = indexDefinition.createValue(OSQLHelper.getValue(keyValue));
if (key == null) {
containsNotCompatibleKey = true;
break;
}
inKeys.add(key);
}
if (containsNotCompatibleKey)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.getValues(inKeys).size();
else if (resultListener != null) {
index.getValues(inKeys, resultListener);
result = resultListener.getResult();
} else
result = index.getValues(inKeys);
} else
return null;
updateProfiler(iContext, internalIndex, keyParams, indexDefinition);
return result;
}
@Override
public ORID getBeginRidRange(Object iLeft, Object iRight) {
final Iterable<?> ridCollection;
final int ridSize;
if (iRight instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iRight).getRoot())) {
if (iLeft instanceof OSQLFilterItem)
iLeft = ((OSQLFilterItem) iLeft).getValue(null, null);
ridCollection = OMultiValue.getMultiValueIterable(iLeft);
ridSize = OMultiValue.getSize(iLeft);
} else if (iLeft instanceof OSQLFilterItemField
&& ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot())) {
if (iRight instanceof OSQLFilterItem)
iRight = ((OSQLFilterItem) iRight).getValue(null, null);
ridCollection = OMultiValue.getMultiValueIterable(iRight);
ridSize = OMultiValue.getSize(iRight);
} else
return null;
final List<ORID> rids = addRangeResults(ridCollection, ridSize);
return rids == null ? null : Collections.min(rids);
}
@Override
public ORID getEndRidRange(Object iLeft, Object iRight) {
final Iterable<?> ridCollection;
final int ridSize;
if (iRight instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iRight).getRoot())) {
if (iLeft instanceof OSQLFilterItem)
iLeft = ((OSQLFilterItem) iLeft).getValue(null, null);
ridCollection = OMultiValue.getMultiValueIterable(iLeft);
ridSize = OMultiValue.getSize(iLeft);
} else if (iLeft instanceof OSQLFilterItemField
&& ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot())) {
if (iRight instanceof OSQLFilterItem)
iRight = ((OSQLFilterItem) iRight).getValue(null, null);
ridCollection = OMultiValue.getMultiValueIterable(iRight);
ridSize = OMultiValue.getSize(iRight);
} else
return null;
final List<ORID> rids = addRangeResults(ridCollection, ridSize);
return rids == null ? null : Collections.max(rids);
}
protected List<ORID> addRangeResults(final Iterable<?> ridCollection, final int ridSize) {
if (ridCollection == null)
return null;
List<ORID> rids = null;
for (Object rid : ridCollection) {
if (rid instanceof OSQLFilterItemParameter)
rid = ((OSQLFilterItemParameter) rid).getValue(null, null);
if (rid instanceof OIdentifiable) {
final ORID r = ((OIdentifiable) rid).getIdentity();
if (r.isPersistent()) {
if (rids == null)
// LAZY CREATE IT
rids = new ArrayList<ORID>(ridSize);
rids.add(r);
}
}
}
return rids;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorIn.java
|
1,415 |
public abstract class OChannel extends OListenerManger<OChannelListener> {
private static final OProfilerMBean PROFILER = Orient.instance().getProfiler();
public Socket socket;
public InputStream inStream;
public OutputStream outStream;
protected final OAdaptiveLock lockRead = new OAdaptiveLock();
protected final OAdaptiveLock lockWrite = new OAdaptiveLock();
protected long timeout;
public int socketBufferSize;
private long metricTransmittedBytes = 0;
private long metricReceivedBytes = 0;
private long metricFlushes = 0;
private static final AtomicLong metricGlobalTransmittedBytes = new AtomicLong();
private static final AtomicLong metricGlobalReceivedBytes = new AtomicLong();
private static final AtomicLong metricGlobalFlushes = new AtomicLong();
private String profilerMetric;
static {
final String profilerMetric = PROFILER.getProcessMetric("network.channel.binary");
PROFILER.registerHookValue(profilerMetric + ".transmittedBytes", "Bytes transmitted to all the network channels",
METRIC_TYPE.SIZE, new OProfilerHookValue() {
public Object getValue() {
return metricGlobalTransmittedBytes.get();
}
});
PROFILER.registerHookValue(profilerMetric + ".receivedBytes", "Bytes received from all the network channels", METRIC_TYPE.SIZE,
new OProfilerHookValue() {
public Object getValue() {
return metricGlobalReceivedBytes.get();
}
});
PROFILER.registerHookValue(profilerMetric + ".flushes", "Number of times the network channels have been flushed",
METRIC_TYPE.COUNTER, new OProfilerHookValue() {
public Object getValue() {
return metricGlobalFlushes.get();
}
});
}
public OChannel(final Socket iSocket, final OContextConfiguration iConfig) throws IOException {
socket = iSocket;
socketBufferSize = iConfig.getValueAsInteger(OGlobalConfiguration.NETWORK_SOCKET_BUFFER_SIZE);
socket.setTcpNoDelay(true);
}
public void acquireWriteLock() {
lockWrite.lock();
}
public void releaseWriteLock() {
lockWrite.unlock();
}
public void acquireReadLock() {
lockRead.lock();
}
public void releaseReadLock() {
lockRead.unlock();
}
public void flush() throws IOException {
outStream.flush();
}
public void close() {
PROFILER.unregisterHookValue(profilerMetric + ".transmittedBytes");
PROFILER.unregisterHookValue(profilerMetric + ".receivedBytes");
PROFILER.unregisterHookValue(profilerMetric + ".flushes");
try {
if (socket != null)
socket.close();
} catch (IOException e) {
}
try {
if (inStream != null)
inStream.close();
} catch (IOException e) {
}
try {
if (outStream != null)
outStream.close();
} catch (IOException e) {
}
for (OChannelListener l : browseListeners())
try {
l.onChannelClose(this);
} catch (Exception e) {
// IGNORE ANY EXCEPTION
}
}
public void connected() {
final String dictProfilerMetric = PROFILER.getProcessMetric("network.channel.binary.*");
profilerMetric = PROFILER.getProcessMetric("network.channel.binary." + socket.getRemoteSocketAddress().toString()
+ socket.getLocalPort() + "".replace('.', '_'));
PROFILER.registerHookValue(profilerMetric + ".transmittedBytes", "Bytes transmitted to a network channel", METRIC_TYPE.SIZE,
new OProfilerHookValue() {
public Object getValue() {
return metricTransmittedBytes;
}
}, dictProfilerMetric + ".transmittedBytes");
PROFILER.registerHookValue(profilerMetric + ".receivedBytes", "Bytes received from a network channel", METRIC_TYPE.SIZE,
new OProfilerHookValue() {
public Object getValue() {
return metricReceivedBytes;
}
}, dictProfilerMetric + ".receivedBytes");
PROFILER.registerHookValue(profilerMetric + ".flushes", "Number of times the network channel has been flushed",
METRIC_TYPE.COUNTER, new OProfilerHookValue() {
public Object getValue() {
return metricFlushes;
}
}, dictProfilerMetric + ".flushes");
}
@Override
public String toString() {
return socket != null ? socket.getRemoteSocketAddress().toString() : "Not connected";
}
protected void updateMetricTransmittedBytes(final int iDelta) {
metricGlobalTransmittedBytes.addAndGet(iDelta);
metricTransmittedBytes += iDelta;
}
protected void updateMetricReceivedBytes(final int iDelta) {
metricGlobalReceivedBytes.addAndGet(iDelta);
metricReceivedBytes += iDelta;
}
protected void updateMetricFlushes() {
metricGlobalFlushes.incrementAndGet();
metricFlushes++;
}
}
| 1no label
|
enterprise_src_main_java_com_orientechnologies_orient_enterprise_channel_OChannel.java
|
609 |
public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsRequest> {
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
private Settings settings = EMPTY_SETTINGS;
UpdateSettingsRequest() {
}
/**
* Constructs a new request to update settings for one or more indices
*/
public UpdateSettingsRequest(String... indices) {
this.indices = indices;
}
/**
* Constructs a new request to update settings for one or more indices
*/
public UpdateSettingsRequest(Settings settings, String... indices) {
this.indices = indices;
this.settings = settings;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (settings.getAsMap().isEmpty()) {
validationException = addValidationError("no settings to update", validationException);
}
return validationException;
}
String[] indices() {
return indices;
}
Settings settings() {
return settings;
}
/**
* Sets the indices to apply to settings update to
*/
public UpdateSettingsRequest indices(String... indices) {
this.indices = indices;
return this;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public UpdateSettingsRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* Sets the settings to be updated
*/
public UpdateSettingsRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* Sets the settings to be updated
*/
public UpdateSettingsRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* Sets the settings to be updated (either json/yaml/properties format)
*/
public UpdateSettingsRequest settings(String source) {
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
return this;
}
/**
* Sets the settings to be updated (either json/yaml/properties format)
*/
@SuppressWarnings("unchecked")
public UpdateSettingsRequest settings(Map source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
settings(builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
settings = readSettingsFromStream(in);
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
writeSettingsToStream(settings, out);
writeTimeout(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_put_UpdateSettingsRequest.java
|
1,711 |
@Service("blAdminEntityService")
public class AdminEntityServiceImpl implements AdminEntityService {
@Resource(name = "blDynamicEntityRemoteService")
protected DynamicEntityService service;
@Resource(name = "blPersistencePackageFactory")
protected PersistencePackageFactory persistencePackageFactory;
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
protected DynamicDaoHelper dynamicDaoHelper = new DynamicDaoHelperImpl();
@Override
public ClassMetadata getClassMetadata(PersistencePackageRequest request)
throws ServiceException {
ClassMetadata cmd = inspect(request).getClassMetaData();
cmd.setCeilingType(request.getCeilingEntityClassname());
return cmd;
}
@Override
public DynamicResultSet getRecords(PersistencePackageRequest request) throws ServiceException {
return fetch(request);
}
@Override
public Entity getRecord(PersistencePackageRequest request, String id, ClassMetadata cmd, boolean isCollectionRequest)
throws ServiceException {
String idProperty = getIdProperty(cmd);
FilterAndSortCriteria fasc = new FilterAndSortCriteria(idProperty);
fasc.setFilterValue(id);
request.addFilterAndSortCriteria(fasc);
Entity[] entities = fetch(request).getRecords();
Assert.isTrue(entities != null && entities.length == 1, "Entity not found");
Entity entity = entities[0];
return entity;
}
@Override
public Entity addEntity(EntityForm entityForm, String[] customCriteria)
throws ServiceException {
PersistencePackageRequest ppr = getRequestForEntityForm(entityForm, customCriteria);
// If the entity form has dynamic forms inside of it, we need to persist those as well.
// They are typically done in their own custom persistence handlers, which will get triggered
// based on the criteria specific in the PersistencePackage.
for (Entry<String, EntityForm> entry : entityForm.getDynamicForms().entrySet()) {
DynamicEntityFormInfo info = entityForm.getDynamicFormInfo(entry.getKey());
customCriteria = new String[] {info.getCriteriaName()};
PersistencePackageRequest subRequest = getRequestForEntityForm(entry.getValue(), customCriteria);
ppr.addSubRequest(info.getPropertyName(), subRequest);
}
return add(ppr);
}
@Override
public Entity updateEntity(EntityForm entityForm, String[] customCriteria)
throws ServiceException {
PersistencePackageRequest ppr = getRequestForEntityForm(entityForm, customCriteria);
// If the entity form has dynamic forms inside of it, we need to persist those as well.
// They are typically done in their own custom persistence handlers, which will get triggered
// based on the criteria specific in the PersistencePackage.
for (Entry<String, EntityForm> entry : entityForm.getDynamicForms().entrySet()) {
DynamicEntityFormInfo info = entityForm.getDynamicFormInfo(entry.getKey());
String propertyName = info.getPropertyName();
String propertyValue = entityForm.getFields().get(propertyName).getValue();
customCriteria = new String[] { info.getCriteriaName(), entityForm.getId(), propertyName, propertyValue };
PersistencePackageRequest subRequest = getRequestForEntityForm(entry.getValue(), customCriteria);
ppr.addSubRequest(info.getPropertyName(), subRequest);
}
return update(ppr);
}
@Override
public void removeEntity(EntityForm entityForm, String[] customCriteria)
throws ServiceException {
PersistencePackageRequest ppr = getRequestForEntityForm(entityForm, customCriteria);
remove(ppr);
}
protected List<Property> getPropertiesFromEntityForm(EntityForm entityForm) {
List<Property> properties = new ArrayList<Property>(entityForm.getFields().size());
for (Entry<String, Field> entry : entityForm.getFields().entrySet()) {
Property p = new Property();
p.setName(entry.getKey());
p.setValue(entry.getValue().getValue());
properties.add(p);
}
return properties;
}
protected PersistencePackageRequest getRequestForEntityForm(EntityForm entityForm, String[] customCriteria) {
// Ensure the ID property is on the form
Field idField = entityForm.getFields().get(entityForm.getIdProperty());
if (idField == null) {
idField = new Field();
idField.setName(entityForm.getIdProperty());
idField.setValue(entityForm.getId());
entityForm.getFields().put(entityForm.getIdProperty(), idField);
} else {
idField.setValue(entityForm.getId());
}
List<Property> propList = getPropertiesFromEntityForm(entityForm);
Property[] properties = new Property[propList.size()];
properties = propList.toArray(properties);
Entity entity = new Entity();
entity.setProperties(properties);
String entityType = entityForm.getEntityType();
if (StringUtils.isEmpty(entityType)) {
entityType = entityForm.getCeilingEntityClassname();
}
entity.setType(new String[] { entityType });
PersistencePackageRequest ppr = PersistencePackageRequest.standard()
.withEntity(entity)
.withCustomCriteria(customCriteria)
.withCeilingEntityClassname(entityForm.getCeilingEntityClassname());
return ppr;
}
@Override
public Entity getAdvancedCollectionRecord(ClassMetadata containingClassMetadata, Entity containingEntity,
Property collectionProperty, String collectionItemId)
throws ServiceException {
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(collectionProperty.getMetadata());
FieldMetadata md = collectionProperty.getMetadata();
String containingEntityId = getContextSpecificRelationshipId(containingClassMetadata, containingEntity,
collectionProperty.getName());
Entity entity = null;
if (md instanceof AdornedTargetCollectionMetadata) {
FilterAndSortCriteria fasc = new FilterAndSortCriteria(ppr.getAdornedList().getCollectionFieldName());
fasc.setFilterValue(containingEntityId);
ppr.addFilterAndSortCriteria(fasc);
fasc = new FilterAndSortCriteria(ppr.getAdornedList().getCollectionFieldName() + "Target");
fasc.setFilterValue(collectionItemId);
ppr.addFilterAndSortCriteria(fasc);
Entity[] entities = fetch(ppr).getRecords();
Assert.isTrue(entities != null && entities.length == 1, "Entity not found");
entity = entities[0];
} else if (md instanceof MapMetadata) {
MapMetadata mmd = (MapMetadata) md;
FilterAndSortCriteria fasc = new FilterAndSortCriteria(ppr.getForeignKey().getManyToField());
fasc.setFilterValue(containingEntityId);
ppr.addFilterAndSortCriteria(fasc);
Entity[] entities = fetch(ppr).getRecords();
for (Entity e : entities) {
String idProperty = getIdProperty(containingClassMetadata);
if (mmd.isSimpleValue()) {
idProperty = "key";
}
Property p = e.getPMap().get(idProperty);
if (p.getValue().equals(collectionItemId)) {
entity = e;
break;
}
}
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was not an " +
"advanced collection field.", collectionProperty.getName(), containingClassMetadata.getCeilingType()));
}
if (entity == null) {
throw new NoResultException(String.format("Could not find record for class [%s], field [%s], main entity id " +
"[%s], collection entity id [%s]", containingClassMetadata.getCeilingType(),
collectionProperty.getName(), containingEntityId, collectionItemId));
}
return entity;
}
@Override
public DynamicResultSet getRecordsForCollection(ClassMetadata containingClassMetadata, Entity containingEntity,
Property collectionProperty, FilterAndSortCriteria[] fascs, Integer startIndex, Integer maxIndex)
throws ServiceException {
return getRecordsForCollection(containingClassMetadata, containingEntity, collectionProperty, fascs, startIndex,
maxIndex, null);
}
@Override
public DynamicResultSet getRecordsForCollection(ClassMetadata containingClassMetadata, Entity containingEntity,
Property collectionProperty, FilterAndSortCriteria[] fascs, Integer startIndex, Integer maxIndex,
String idValueOverride) throws ServiceException {
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(collectionProperty.getMetadata())
.withFilterAndSortCriteria(fascs)
.withStartIndex(startIndex)
.withMaxIndex(maxIndex);
FilterAndSortCriteria fasc;
FieldMetadata md = collectionProperty.getMetadata();
if (md instanceof BasicCollectionMetadata) {
fasc = new FilterAndSortCriteria(ppr.getForeignKey().getManyToField());
} else if (md instanceof AdornedTargetCollectionMetadata) {
fasc = new FilterAndSortCriteria(ppr.getAdornedList().getCollectionFieldName());
} else if (md instanceof MapMetadata) {
fasc = new FilterAndSortCriteria(ppr.getForeignKey().getManyToField());
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was not a " +
"collection field.", collectionProperty.getName(), containingClassMetadata.getCeilingType()));
}
String id;
if (idValueOverride == null) {
id = getContextSpecificRelationshipId(containingClassMetadata, containingEntity, collectionProperty.getName());
} else {
id = idValueOverride;
}
fasc.setFilterValue(id);
ppr.addFilterAndSortCriteria(fasc);
return fetch(ppr);
}
@Override
public Map<String, DynamicResultSet> getRecordsForAllSubCollections(PersistencePackageRequest ppr, Entity containingEntity)
throws ServiceException {
Map<String, DynamicResultSet> map = new HashMap<String, DynamicResultSet>();
ClassMetadata cmd = getClassMetadata(ppr);
for (Property p : cmd.getProperties()) {
if (p.getMetadata() instanceof CollectionMetadata) {
DynamicResultSet drs = getRecordsForCollection(cmd, containingEntity, p, null, null, null);
map.put(p.getName(), drs);
}
}
return map;
}
@Override
public Entity addSubCollectionEntity(EntityForm entityForm, ClassMetadata mainMetadata, Property field,
Entity parentEntity)
throws ServiceException, ClassNotFoundException {
// Assemble the properties from the entity form
List<Property> properties = new ArrayList<Property>();
for (Entry<String, Field> entry : entityForm.getFields().entrySet()) {
Property p = new Property();
p.setName(entry.getKey());
p.setValue(entry.getValue().getValue());
properties.add(p);
}
FieldMetadata md = field.getMetadata();
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(md)
.withEntity(new Entity());
if (md instanceof BasicCollectionMetadata) {
BasicCollectionMetadata fmd = (BasicCollectionMetadata) md;
ppr.getEntity().setType(new String[] { entityForm.getEntityType() });
// If we're looking up an entity instead of trying to create one on the fly, let's make sure
// that we're not changing the target entity at all and only creating the association to the id
if (fmd.getAddMethodType().equals(AddMethodType.LOOKUP)) {
List<String> fieldsToRemove = new ArrayList<String>();
String idProp = getIdProperty(mainMetadata);
for (String key : entityForm.getFields().keySet()) {
if (!idProp.equals(key)) {
fieldsToRemove.add(key);
}
}
for (String key : fieldsToRemove) {
ListIterator<Property> li = properties.listIterator();
while (li.hasNext()) {
if (li.next().getName().equals(key)) {
li.remove();
}
}
}
ppr.setValidateUnsubmittedProperties(false);
}
Property fp = new Property();
fp.setName(ppr.getForeignKey().getManyToField());
fp.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(fp);
} else if (md instanceof AdornedTargetCollectionMetadata) {
ppr.getEntity().setType(new String[] { ppr.getAdornedList().getAdornedTargetEntityClassname() });
String[] maintainedFields = ((AdornedTargetCollectionMetadata) md).getMaintainedAdornedTargetFields();
if (maintainedFields == null || maintainedFields.length == 0) {
ppr.setValidateUnsubmittedProperties(false);
}
} else if (md instanceof MapMetadata) {
ppr.getEntity().setType(new String[] { entityForm.getEntityType() });
Property p = new Property();
p.setName("symbolicId");
p.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(p);
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was" +
" not a collection field.", field.getName(), mainMetadata.getCeilingType()));
}
ppr.setCeilingEntityClassname(ppr.getEntity().getType()[0]);
Property[] propArr = new Property[properties.size()];
properties.toArray(propArr);
ppr.getEntity().setProperties(propArr);
return add(ppr);
}
@Override
public Entity updateSubCollectionEntity(EntityForm entityForm, ClassMetadata mainMetadata, Property field,
Entity parentEntity, String collectionItemId)
throws ServiceException, ClassNotFoundException {
List<Property> properties = getPropertiesFromEntityForm(entityForm);
FieldMetadata md = field.getMetadata();
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(md)
.withEntity(new Entity());
if (md instanceof BasicCollectionMetadata) {
BasicCollectionMetadata fmd = (BasicCollectionMetadata) md;
ppr.getEntity().setType(new String[] { fmd.getCollectionCeilingEntity() });
Property fp = new Property();
fp.setName(ppr.getForeignKey().getManyToField());
fp.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(fp);
} else if (md instanceof AdornedTargetCollectionMetadata) {
ppr.getEntity().setType(new String[] { ppr.getAdornedList().getAdornedTargetEntityClassname() });
} else if (md instanceof MapMetadata) {
ppr.getEntity().setType(new String[] { entityForm.getEntityType() });
Property p = new Property();
p.setName("symbolicId");
p.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(p);
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was" +
" not a collection field.", field.getName(), mainMetadata.getCeilingType()));
}
ppr.setCeilingEntityClassname(ppr.getEntity().getType()[0]);
Property p = new Property();
p.setName(entityForm.getIdProperty());
p.setValue(collectionItemId);
properties.add(p);
Property[] propArr = new Property[properties.size()];
properties.toArray(propArr);
ppr.getEntity().setProperties(propArr);
return update(ppr);
}
@Override
public void removeSubCollectionEntity(ClassMetadata mainMetadata, Property field, Entity parentEntity, String itemId,
String priorKey)
throws ServiceException {
List<Property> properties = new ArrayList<Property>();
Property p;
String parentId = getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName());
Entity entity = new Entity();
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(field.getMetadata())
.withEntity(entity);
if (field.getMetadata() instanceof BasicCollectionMetadata) {
BasicCollectionMetadata fmd = (BasicCollectionMetadata) field.getMetadata();
p = new Property();
p.setName("id");
p.setValue(itemId);
properties.add(p);
p = new Property();
p.setName(ppr.getForeignKey().getManyToField());
p.setValue(parentId);
properties.add(p);
entity.setType(new String[] { fmd.getCollectionCeilingEntity() });
} else if (field.getMetadata() instanceof AdornedTargetCollectionMetadata) {
AdornedTargetList adornedList = ppr.getAdornedList();
p = new Property();
p.setName(adornedList.getLinkedObjectPath() + "." + adornedList.getLinkedIdProperty());
p.setValue(parentId);
properties.add(p);
p = new Property();
p.setName(adornedList.getTargetObjectPath() + "." + adornedList.getTargetIdProperty());
p.setValue(itemId);
properties.add(p);
entity.setType(new String[] { adornedList.getAdornedTargetEntityClassname() });
} else if (field.getMetadata() instanceof MapMetadata) {
MapMetadata fmd = (MapMetadata) field.getMetadata();
p = new Property();
p.setName("symbolicId");
p.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(p);
p = new Property();
p.setName("priorKey");
p.setValue(priorKey);
properties.add(p);
MapStructure mapStructure = ppr.getMapStructure();
p = new Property();
p.setName(mapStructure.getKeyPropertyName());
p.setValue(itemId);
properties.add(p);
entity.setType(new String[] { fmd.getTargetClass() });
}
Property[] propArr = new Property[properties.size()];
properties.toArray(propArr);
ppr.getEntity().setProperties(propArr);
remove(ppr);
}
@Override
public String getContextSpecificRelationshipId(ClassMetadata cmd, Entity entity, String propertyName) {
String prefix;
if (propertyName.contains(".")) {
prefix = propertyName.substring(0, propertyName.lastIndexOf("."));
} else {
prefix = "";
}
if (prefix.equals("")) {
return entity.findProperty("id").getValue();
} else {
//we need to check all the parts of the prefix. For example, the prefix could include an @Embedded class like
//defaultSku.dimension. In this case, we want the id from the defaultSku property, since the @Embedded does
//not have an id property - nor should it.
String[] prefixParts = prefix.split("\\.");
for (int j = 0; j < prefixParts.length; j++) {
StringBuilder sb = new StringBuilder();
for (int x = 0; x < prefixParts.length - j; x++) {
sb.append(prefixParts[x]);
if (x < prefixParts.length - j - 1) {
sb.append(".");
}
}
String tempPrefix = sb.toString();
for (Property property : entity.getProperties()) {
if (property.getName().startsWith(tempPrefix)) {
BasicFieldMetadata md = (BasicFieldMetadata) cmd.getPMap().get(property.getName()).getMetadata();
if (md.getFieldType().equals(SupportedFieldType.ID)) {
return property.getValue();
}
}
}
}
}
if (!prefix.contains(".")) {
//this may be an embedded class directly on the root entity (e.g. embeddablePriceList.restrictedPriceLists on OfferImpl)
return entity.findProperty("id").getValue();
}
throw new RuntimeException("Unable to establish a relationship id");
}
@Override
public String getIdProperty(ClassMetadata cmd) throws ServiceException {
for (Property p : cmd.getProperties()) {
if (p.getMetadata() instanceof BasicFieldMetadata) {
BasicFieldMetadata fmd = (BasicFieldMetadata) p.getMetadata();
//check for ID type and also make sure the field we're looking at is not a "ToOne" association
if (SupportedFieldType.ID.equals(fmd.getFieldType()) && !p.getName().contains(".")) {
return p.getName();
}
}
}
throw new ServiceException("Could not determine ID field for " + cmd.getCeilingType());
}
protected Entity add(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
try {
return service.add(pkg);
} catch (ValidationException e) {
return e.getEntity();
}
}
protected Entity update(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
try {
return service.update(pkg);
} catch (ValidationException e) {
return e.getEntity();
}
}
protected DynamicResultSet inspect(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
return service.inspect(pkg);
}
protected void remove(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
service.remove(pkg);
}
protected DynamicResultSet fetch(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
CriteriaTransferObject cto = getDefaultCto();
if (request.getFilterAndSortCriteria() != null) {
cto.addAll(Arrays.asList(request.getFilterAndSortCriteria()));
}
if (request.getStartIndex() == null) {
cto.setFirstResult(0);
} else {
cto.setFirstResult(request.getStartIndex());
}
if (request.getMaxIndex() != null) {
int requestedMaxResults = request.getMaxIndex() - request.getStartIndex() + 1;
if (requestedMaxResults >= 0 && requestedMaxResults < cto.getMaxResults()) {
cto.setMaxResults(requestedMaxResults);
}
}
return service.fetch(pkg, cto);
}
protected CriteriaTransferObject getDefaultCto() {
CriteriaTransferObject cto = new CriteriaTransferObject();
cto.setMaxResults(50);
return cto;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_AdminEntityServiceImpl.java
|
10 |
public abstract class BrowserInput {
private final BrowserInput fPrevious;
private BrowserInput fNext;
/**
* Create a new Browser input.
*
* @param previous the input previous to this or <code>null</code> if this is the first
*/
public BrowserInput(BrowserInput previous) {
fPrevious= previous;
if (previous != null)
previous.fNext= this;
}
/**
* The previous input or <code>null</code> if this
* is the first.
*
* @return the previous input or <code>null</code>
*/
public BrowserInput getPrevious() {
return fPrevious;
}
/**
* The next input or <code>null</code> if this
* is the last.
*
* @return the next input or <code>null</code>
*/
public BrowserInput getNext() {
return fNext;
}
/**
* @return the HTML contents
*/
public abstract String getHtml();
/**
* A human readable name for the input.
*
* @return the input name
*/
public abstract String getInputName();
/**
* Returns the HTML from {@link #getHtml()}.
* This is a fallback mode for platforms where the {@link BrowserInformationControl}
* is not available and this input is passed to a {@link DefaultInformationControl}.
*
* @return {@link #getHtml()}
*/
public String toString() {
return getHtml();
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInput.java
|
182 |
private static class NullFunction implements IFunction<String,String> {
@Override
public String apply(String input) {
return null;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_atomicreference_ClientAtomicReferenceTest.java
|
11 |
.withPredicateProvider(new PredicateProvider() {
@Override
public Predicate buildPredicate(CriteriaBuilder builder, FieldPathBuilder fieldPathBuilder,
From root, String ceilingEntity,
String fullPropertyName, Path explicitPath, List directValues) {
return explicitPath.as(Long.class).in(directValues);
}
})
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_handler_SkuCustomPersistenceHandler.java
|
3,492 |
public static class Names {
private final String name;
private final String indexName;
private final String indexNameClean;
private final String fullName;
private final String sourcePath;
public Names(String name) {
this(name, name, name, name);
}
public Names(String name, String indexName, String indexNameClean, String fullName) {
this(name, indexName, indexNameClean, fullName, fullName);
}
public Names(String name, String indexName, String indexNameClean, String fullName, @Nullable String sourcePath) {
this.name = name.intern();
this.indexName = indexName.intern();
this.indexNameClean = indexNameClean.intern();
this.fullName = fullName.intern();
this.sourcePath = sourcePath == null ? this.fullName : sourcePath.intern();
}
/**
* The logical name of the field.
*/
public String name() {
return name;
}
/**
* The indexed name of the field. This is the name under which we will
* store it in the index.
*/
public String indexName() {
return indexName;
}
/**
* The cleaned index name, before any "path" modifications performed on it.
*/
public String indexNameClean() {
return indexNameClean;
}
/**
* The full name, including dot path.
*/
public String fullName() {
return fullName;
}
/**
* The dot path notation to extract the value from source.
*/
public String sourcePath() {
return sourcePath;
}
/**
* Creates a new index term based on the provided value.
*/
public Term createIndexNameTerm(String value) {
return new Term(indexName, value);
}
/**
* Creates a new index term based on the provided value.
*/
public Term createIndexNameTerm(BytesRef value) {
return new Term(indexName, value);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_FieldMapper.java
|
10 |
public interface TextCommand extends TextCommandConstants, SocketWritable, SocketReadable {
TextCommandType getType();
void init(SocketTextReader socketTextReader, long requestId);
SocketTextReader getSocketTextReader();
SocketTextWriter getSocketTextWriter();
long getRequestId();
boolean shouldReply();
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_TextCommand.java
|
3,231 |
public class ReplicationPublisher<K, V>
implements ReplicationChannel {
private static final String SERVICE_NAME = ReplicatedMapService.SERVICE_NAME;
private static final String EVENT_TOPIC_NAME = ReplicatedMapService.EVENT_TOPIC_NAME;
private static final String EXECUTOR_NAME = "hz:replicated-map";
private static final int MAX_MESSAGE_CACHE_SIZE = 1000;
private static final int MAX_CLEAR_EXECUTION_RETRY = 5;
private final List<ReplicationMessage> replicationMessageCache = new ArrayList<ReplicationMessage>();
private final Lock replicationMessageCacheLock = new ReentrantLock();
private final Random memberRandomizer = new Random();
private final ScheduledExecutorService executorService;
private final ExecutionService executionService;
private final OperationService operationService;
private final ClusterService clusterService;
private final EventService eventService;
private final NodeEngine nodeEngine;
private final AbstractBaseReplicatedRecordStore<K, V> replicatedRecordStore;
private final InternalReplicatedMapStorage<K, V> storage;
private final ReplicatedMapConfig replicatedMapConfig;
private final LocalReplicatedMapStatsImpl mapStats;
private final Member localMember;
private final String name;
private final boolean allowReplicationHooks;
private volatile PreReplicationHook preReplicationHook;
ReplicationPublisher(AbstractBaseReplicatedRecordStore<K, V> replicatedRecordStore, NodeEngine nodeEngine) {
this.replicatedRecordStore = replicatedRecordStore;
this.nodeEngine = nodeEngine;
this.name = replicatedRecordStore.getName();
this.storage = replicatedRecordStore.storage;
this.mapStats = replicatedRecordStore.mapStats;
this.eventService = nodeEngine.getEventService();
this.localMember = replicatedRecordStore.localMember;
this.clusterService = nodeEngine.getClusterService();
this.executionService = nodeEngine.getExecutionService();
this.operationService = nodeEngine.getOperationService();
this.replicatedMapConfig = replicatedRecordStore.replicatedMapConfig;
this.executorService = getExecutorService(nodeEngine, replicatedMapConfig);
this.allowReplicationHooks = Boolean.parseBoolean(System.getProperty("hazelcast.repmap.hooks.allowed", "false"));
}
@Override
public void replicate(MultiReplicationMessage message) {
distributeReplicationMessage(message, true);
}
@Override
public void replicate(ReplicationMessage message) {
distributeReplicationMessage(message, true);
}
public void setPreReplicationHook(PreReplicationHook preReplicationHook) {
this.preReplicationHook = preReplicationHook;
}
public void publishReplicatedMessage(ReplicationMessage message) {
if (replicatedMapConfig.getReplicationDelayMillis() == 0) {
distributeReplicationMessage(message, false);
} else {
replicationMessageCacheLock.lock();
try {
replicationMessageCache.add(message);
if (replicationMessageCache.size() == 1) {
ReplicationCachedSenderTask task = new ReplicationCachedSenderTask(this);
long replicationDelayMillis = replicatedMapConfig.getReplicationDelayMillis();
executorService.schedule(task, replicationDelayMillis, TimeUnit.MILLISECONDS);
} else {
if (replicationMessageCache.size() > MAX_MESSAGE_CACHE_SIZE) {
processMessageCache();
}
}
} finally {
replicationMessageCacheLock.unlock();
}
}
}
public void queueUpdateMessage(final ReplicationMessage update) {
executorService.execute(new Runnable() {
@Override
public void run() {
processUpdateMessage(update);
}
});
}
public void queueUpdateMessages(final MultiReplicationMessage updates) {
executorService.execute(new Runnable() {
@Override
public void run() {
for (ReplicationMessage update : updates.getReplicationMessages()) {
processUpdateMessage(update);
}
}
});
}
void destroy() {
executorService.shutdownNow();
}
void processMessageCache() {
ReplicationMessage[] replicationMessages = null;
replicationMessageCacheLock.lock();
try {
final int size = replicationMessageCache.size();
if (size > 0) {
replicationMessages = replicationMessageCache.toArray(new ReplicationMessage[size]);
replicationMessageCache.clear();
}
} finally {
replicationMessageCacheLock.unlock();
}
if (replicationMessages != null) {
MultiReplicationMessage message = new MultiReplicationMessage(name, replicationMessages);
distributeReplicationMessage(message, false);
}
}
void distributeReplicationMessage(final Object message, final boolean forceSend) {
final PreReplicationHook preReplicationHook = getPreReplicationHook();
if (forceSend || preReplicationHook == null) {
Collection<EventRegistration> eventRegistrations = eventService.getRegistrations(SERVICE_NAME, EVENT_TOPIC_NAME);
Collection<EventRegistration> registrations = filterEventRegistrations(eventRegistrations);
eventService.publishEvent(ReplicatedMapService.SERVICE_NAME, registrations, message, name.hashCode());
} else {
executionService.execute(EXECUTOR_NAME, new Runnable() {
@Override
public void run() {
if (message instanceof MultiReplicationMessage) {
preReplicationHook.preReplicateMultiMessage((MultiReplicationMessage) message, ReplicationPublisher.this);
} else {
preReplicationHook.preReplicateMessage((ReplicationMessage) message, ReplicationPublisher.this);
}
}
});
}
}
public void queuePreProvision(Address callerAddress, int chunkSize) {
RemoteProvisionTask task = new RemoteProvisionTask(replicatedRecordStore, nodeEngine, callerAddress, chunkSize);
executionService.execute(EXECUTOR_NAME, task);
}
public void retryWithDifferentReplicationNode(Member member) {
List<MemberImpl> members = new ArrayList<MemberImpl>(nodeEngine.getClusterService().getMemberList());
members.remove(member);
// If there are less than two members there is not other possible candidate to replicate from
if (members.size() < 2) {
return;
}
sendPreProvisionRequest(members);
}
public void distributeClear(boolean emptyReplicationQueue) {
executeRemoteClear(emptyReplicationQueue);
}
public void emptyReplicationQueue() {
replicationMessageCacheLock.lock();
try {
replicationMessageCache.clear();
} finally {
replicationMessageCacheLock.unlock();
}
}
void sendPreProvisionRequest(List<MemberImpl> members) {
if (members.size() == 0) {
return;
}
int randomMember = memberRandomizer.nextInt(members.size());
MemberImpl newMember = members.get(randomMember);
ReplicatedMapPostJoinOperation.MemberMapPair[] memberMapPairs = new ReplicatedMapPostJoinOperation.MemberMapPair[1];
memberMapPairs[0] = new ReplicatedMapPostJoinOperation.MemberMapPair(newMember, name);
OperationService operationService = nodeEngine.getOperationService();
int defaultChunkSize = ReplicatedMapPostJoinOperation.DEFAULT_CHUNK_SIZE;
ReplicatedMapPostJoinOperation op = new ReplicatedMapPostJoinOperation(memberMapPairs, defaultChunkSize);
operationService.send(op, newMember.getAddress());
}
private void executeRemoteClear(boolean emptyReplicationQueue) {
List<MemberImpl> failedMembers = new ArrayList<MemberImpl>(clusterService.getMemberList());
for (int i = 0; i < MAX_CLEAR_EXECUTION_RETRY; i++) {
Map<MemberImpl, InternalCompletableFuture> futures = executeClearOnMembers(failedMembers, emptyReplicationQueue);
// Clear to collect new failing members
failedMembers.clear();
for (Map.Entry<MemberImpl, InternalCompletableFuture> future : futures.entrySet()) {
try {
future.getValue().get();
} catch (Exception e) {
nodeEngine.getLogger(ReplicationPublisher.class).finest(e);
failedMembers.add(future.getKey());
}
}
if (failedMembers.size() == 0) {
return;
}
}
// If we get here we does not seem to have finished the operation
throw new CallTimeoutException("ReplicatedMap::clear couldn't be finished, failed nodes: " + failedMembers);
}
private Map executeClearOnMembers(Collection<MemberImpl> members, boolean emptyReplicationQueue) {
Address thisAddress = clusterService.getThisAddress();
Map<MemberImpl, InternalCompletableFuture> futures = new HashMap<MemberImpl, InternalCompletableFuture>(members.size());
for (MemberImpl member : members) {
Address address = member.getAddress();
if (!thisAddress.equals(address)) {
Operation operation = new ReplicatedMapClearOperation(name, emptyReplicationQueue);
InvocationBuilder ib = operationService.createInvocationBuilder(SERVICE_NAME, operation, address);
futures.put(member, ib.invoke());
}
}
return futures;
}
private void processUpdateMessage(ReplicationMessage update) {
if (localMember.equals(update.getOrigin())) {
return;
}
mapStats.incrementReceivedReplicationEvents();
if (update.getKey() instanceof String) {
String key = (String) update.getKey();
if (AbstractReplicatedRecordStore.CLEAR_REPLICATION_MAGIC_KEY.equals(key)) {
storage.clear();
return;
}
}
K marshalledKey = (K) replicatedRecordStore.marshallKey(update.getKey());
synchronized (replicatedRecordStore.getMutex(marshalledKey)) {
final ReplicatedRecord<K, V> localEntry = storage.get(marshalledKey);
if (localEntry == null) {
if (!update.isRemove()) {
V marshalledValue = (V) replicatedRecordStore.marshallValue(update.getValue());
VectorClock vectorClock = update.getVectorClock();
int updateHash = update.getUpdateHash();
long ttlMillis = update.getTtlMillis();
storage.put(marshalledKey,
new ReplicatedRecord<K, V>(marshalledKey, marshalledValue, vectorClock, updateHash, ttlMillis));
if (ttlMillis > 0) {
replicatedRecordStore.scheduleTtlEntry(ttlMillis, marshalledKey, null);
} else {
replicatedRecordStore.cancelTtlEntry(marshalledKey);
}
replicatedRecordStore.fireEntryListenerEvent(update.getKey(), null, update.getValue());
}
} else {
final VectorClock currentVectorClock = localEntry.getVectorClock();
final VectorClock updateVectorClock = update.getVectorClock();
if (VectorClock.happenedBefore(updateVectorClock, currentVectorClock)) {
// ignore the update. This is an old update
return;
} else if (VectorClock.happenedBefore(currentVectorClock, updateVectorClock)) {
// A new update happened
applyTheUpdate(update, localEntry);
} else {
if (localEntry.getLatestUpdateHash() >= update.getUpdateHash()) {
applyTheUpdate(update, localEntry);
} else {
currentVectorClock.applyVector(updateVectorClock);
currentVectorClock.incrementClock(localMember);
Object key = update.getKey();
V value = localEntry.getValue();
long ttlMillis = update.getTtlMillis();
int latestUpdateHash = localEntry.getLatestUpdateHash();
ReplicationMessage message = new ReplicationMessage(name, key, value, currentVectorClock, localMember,
latestUpdateHash, ttlMillis);
distributeReplicationMessage(message, true);
}
}
}
}
}
private void applyTheUpdate(ReplicationMessage<K, V> update, ReplicatedRecord<K, V> localEntry) {
VectorClock localVectorClock = localEntry.getVectorClock();
VectorClock remoteVectorClock = update.getVectorClock();
K marshalledKey = (K) replicatedRecordStore.marshallKey(update.getKey());
V marshalledValue = (V) replicatedRecordStore.marshallValue(update.getValue());
long ttlMillis = update.getTtlMillis();
long oldTtlMillis = localEntry.getTtlMillis();
Object oldValue = localEntry.setValue(marshalledValue, update.getUpdateHash(), ttlMillis);
if (update.isRemove()) {
// Force removal of the underlying stored entry
storage.remove(marshalledKey, localEntry);
}
localVectorClock.applyVector(remoteVectorClock);
if (ttlMillis > 0) {
replicatedRecordStore.scheduleTtlEntry(ttlMillis, marshalledKey, null);
} else {
replicatedRecordStore.cancelTtlEntry(marshalledKey);
}
V unmarshalledOldValue = (V) replicatedRecordStore.unmarshallValue(oldValue);
if (unmarshalledOldValue == null || !unmarshalledOldValue.equals(update.getValue())
|| update.getTtlMillis() != oldTtlMillis) {
replicatedRecordStore.fireEntryListenerEvent(update.getKey(), unmarshalledOldValue, update.getValue());
}
}
private Collection<EventRegistration> filterEventRegistrations(Collection<EventRegistration> eventRegistrations) {
Address address = ((MemberImpl) localMember).getAddress();
List<EventRegistration> registrations = new ArrayList<EventRegistration>(eventRegistrations);
Iterator<EventRegistration> iterator = registrations.iterator();
while (iterator.hasNext()) {
EventRegistration registration = iterator.next();
if (address.equals(registration.getSubscriber())) {
iterator.remove();
}
}
return registrations;
}
private PreReplicationHook getPreReplicationHook() {
if (!allowReplicationHooks) {
return null;
}
return preReplicationHook;
}
private ScheduledExecutorService getExecutorService(NodeEngine nodeEngine, ReplicatedMapConfig replicatedMapConfig) {
ScheduledExecutorService es = replicatedMapConfig.getReplicatorExecutorService();
if (es == null) {
es = nodeEngine.getExecutionService().getDefaultScheduledExecutor();
}
return new WrappedExecutorService(es);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_record_ReplicationPublisher.java
|
4,744 |
public class URLRepository extends BlobStoreRepository {
public final static String TYPE = "url";
private final URLBlobStore blobStore;
private final BlobPath basePath;
private boolean listDirectories;
/**
* Constructs new read-only URL-based repository
*
* @param name repository name
* @param repositorySettings repository settings
* @param indexShardRepository shard repository
* @throws IOException
*/
@Inject
public URLRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
super(name.getName(), repositorySettings, indexShardRepository);
URL url;
String path = repositorySettings.settings().get("url", componentSettings.get("url"));
if (path == null) {
throw new RepositoryException(name.name(), "missing url");
} else {
url = new URL(path);
}
int concurrentStreams = repositorySettings.settings().getAsInt("concurrent_streams", componentSettings.getAsInt("concurrent_streams", 5));
ExecutorService concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[fs_stream]"));
listDirectories = repositorySettings.settings().getAsBoolean("list_directories", componentSettings.getAsBoolean("list_directories", true));
blobStore = new URLBlobStore(componentSettings, concurrentStreamPool, url);
basePath = BlobPath.cleanPath();
}
/**
* {@inheritDoc}
*/
@Override
protected BlobStore blobStore() {
return blobStore;
}
@Override
protected BlobPath basePath() {
return basePath;
}
@Override
public ImmutableList<SnapshotId> snapshots() {
if (listDirectories) {
return super.snapshots();
} else {
try {
return readSnapshotList();
} catch (IOException ex) {
throw new RepositoryException(repositoryName, "failed to get snapshot list in repository", ex);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_repositories_uri_URLRepository.java
|
307 |
new Thread() {
public void run() {
try {
map.lock(key);
map.put(key, value);
putWhileLocked.countDown();
checkingKeySet.await();
map.unlock(key);
}catch(Exception e){}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
1,547 |
public class PropertyMapMap {
public static final String CLASS = Tokens.makeNamespace(PropertyMapMap.class) + ".class";
public enum Counters {
VERTICES_PROCESSED,
OUT_EDGES_PROCESSED
}
public static Configuration createConfiguration(final Class<? extends Element> klass) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, LongWritable, Text> {
private boolean isVertex;
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.outputs = new SafeMapperOutputs(context);
}
private LongWritable longWritable = new LongWritable();
private Text text = new Text();
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, LongWritable, Text>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
this.longWritable.set(value.getLongId());
this.text.set(ElementPicker.getPropertyAsString(value, Tokens._PROPERTIES));
for (int i = 0; i < value.pathCount(); i++) {
this.outputs.write(Tokens.SIDEEFFECT, this.longWritable, this.text);
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
this.longWritable.set(edge.getLongId());
this.text.set(ElementPicker.getPropertyAsString(edge, Tokens._PROPERTIES));
for (int i = 0; i < edge.pathCount(); i++) {
this.outputs.write(Tokens.SIDEEFFECT, this.longWritable, this.text);
}
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, LongWritable, Text>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_PropertyMapMap.java
|
6,272 |
public class LengthAssertion extends Assertion {
private static final ESLogger logger = Loggers.getLogger(LengthAssertion.class);
public LengthAssertion(String field, Object expectedValue) {
super(field, expectedValue);
}
@Override
protected void doAssert(Object actualValue, Object expectedValue) {
logger.trace("assert that [{}] has length [{}]", actualValue, expectedValue);
assertThat(expectedValue, instanceOf(Number.class));
int length = ((Number) expectedValue).intValue();
if (actualValue instanceof String) {
assertThat(errorMessage(), ((String) actualValue).length(), equalTo(length));
} else if (actualValue instanceof List) {
assertThat(errorMessage(), ((List) actualValue).size(), equalTo(length));
} else if (actualValue instanceof Map) {
assertThat(errorMessage(), ((Map) actualValue).keySet().size(), equalTo(length));
} else {
throw new UnsupportedOperationException("value is of unsupported type [" + actualValue.getClass().getSimpleName() + "]");
}
}
private String errorMessage() {
return "field [" + getField() + "] doesn't have length [" + getExpectedValue() + "]";
}
}
| 1no label
|
src_test_java_org_elasticsearch_test_rest_section_LengthAssertion.java
|
250 |
service.submitToMember(getUuidCallable, member, new ExecutionCallback() {
@Override
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
@Override
public void onFailure(Throwable t) {
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
6 |
public class OIterableObjectArray<T> implements Iterable<T> {
private final Object object;
private int length;
public OIterableObjectArray(Object o) {
object = o;
length = Array.getLength(o);
}
/**
* Returns an iterator over a set of elements of type T.
*
* @return an Iterator.
*/
public Iterator<T> iterator() {
return new ObjIterator();
}
private class ObjIterator implements Iterator<T> {
private int p = 0;
/**
* Returns <tt>true</tt> if the iteration has more elements. (In other words, returns <tt>true</tt> if <tt>next</tt> would
* return an element rather than throwing an exception.)
*
* @return <tt>true</tt> if the iterator has more elements.
*/
public boolean hasNext() {
return p < length;
}
/**
* Returns the next element in the iteration.
*
* @return the next element in the iteration.
* @throws java.util.NoSuchElementException
* iteration has no more elements.
*/
@SuppressWarnings("unchecked")
public T next() {
if (p < length) {
return (T) Array.get(object, p++);
} else {
throw new NoSuchElementException();
}
}
/**
* Removes from the underlying collection the last element returned by the iterator (optional operation). This method can be
* called only once per call to <tt>next</tt>. The behavior of an iterator is unspecified if the underlying collection is
* modified while the iteration is in progress in any way other than by calling this method.
*
* @throws UnsupportedOperationException
* if the <tt>remove</tt> operation is not supported by this Iterator.
* @throws IllegalStateException
* if the <tt>next</tt> method has not yet been called, or the <tt>remove</tt> method has already been called after
* the last call to the <tt>next</tt> method.
*/
public void remove() {
throw new UnsupportedOperationException();
}
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OIterableObjectArray.java
|
165 |
public class ForkJoinPool extends AbstractExecutorService {
/*
* Implementation Overview
*
* This class and its nested classes provide the main
* functionality and control for a set of worker threads:
* Submissions from non-FJ threads enter into submission queues.
* Workers take these tasks and typically split them into subtasks
* that may be stolen by other workers. Preference rules give
* first priority to processing tasks from their own queues (LIFO
* or FIFO, depending on mode), then to randomized FIFO steals of
* tasks in other queues.
*
* WorkQueues
* ==========
*
* Most operations occur within work-stealing queues (in nested
* class WorkQueue). These are special forms of Deques that
* support only three of the four possible end-operations -- push,
* pop, and poll (aka steal), under the further constraints that
* push and pop are called only from the owning thread (or, as
* extended here, under a lock), while poll may be called from
* other threads. (If you are unfamiliar with them, you probably
* want to read Herlihy and Shavit's book "The Art of
* Multiprocessor programming", chapter 16 describing these in
* more detail before proceeding.) The main work-stealing queue
* design is roughly similar to those in the papers "Dynamic
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
* (http://research.sun.com/scalable/pubs/index.html) and
* "Idempotent work stealing" by Michael, Saraswat, and Vechev,
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
* The main differences ultimately stem from GC requirements that
* we null out taken slots as soon as we can, to maintain as small
* a footprint as possible even in programs generating huge
* numbers of tasks. To accomplish this, we shift the CAS
* arbitrating pop vs poll (steal) from being on the indices
* ("base" and "top") to the slots themselves. So, both a
* successful pop and poll mainly entail a CAS of a slot from
* non-null to null. Because we rely on CASes of references, we
* do not need tag bits on base or top. They are simple ints as
* used in any circular array-based queue (see for example
* ArrayDeque). Updates to the indices must still be ordered in a
* way that guarantees that top == base means the queue is empty,
* but otherwise may err on the side of possibly making the queue
* appear nonempty when a push, pop, or poll have not fully
* committed. Note that this means that the poll operation,
* considered individually, is not wait-free. One thief cannot
* successfully continue until another in-progress one (or, if
* previously empty, a push) completes. However, in the
* aggregate, we ensure at least probabilistic non-blockingness.
* If an attempted steal fails, a thief always chooses a different
* random victim target to try next. So, in order for one thief to
* progress, it suffices for any in-progress poll or new push on
* any empty queue to complete. (This is why we normally use
* method pollAt and its variants that try once at the apparent
* base index, else consider alternative actions, rather than
* method poll.)
*
* This approach also enables support of a user mode in which local
* task processing is in FIFO, not LIFO order, simply by using
* poll rather than pop. This can be useful in message-passing
* frameworks in which tasks are never joined. However neither
* mode considers affinities, loads, cache localities, etc, so
* rarely provide the best possible performance on a given
* machine, but portably provide good throughput by averaging over
* these factors. (Further, even if we did try to use such
* information, we do not usually have a basis for exploiting it.
* For example, some sets of tasks profit from cache affinities,
* but others are harmed by cache pollution effects.)
*
* WorkQueues are also used in a similar way for tasks submitted
* to the pool. We cannot mix these tasks in the same queues used
* for work-stealing (this would contaminate lifo/fifo
* processing). Instead, we randomly associate submission queues
* with submitting threads, using a form of hashing. The
* ThreadLocal Submitter class contains a value initially used as
* a hash code for choosing existing queues, but may be randomly
* repositioned upon contention with other submitters. In
* essence, submitters act like workers except that they are
* restricted to executing local tasks that they submitted (or in
* the case of CountedCompleters, others with the same root task).
* However, because most shared/external queue operations are more
* expensive than internal, and because, at steady state, external
* submitters will compete for CPU with workers, ForkJoinTask.join
* and related methods disable them from repeatedly helping to
* process tasks if all workers are active. Insertion of tasks in
* shared mode requires a lock (mainly to protect in the case of
* resizing) but we use only a simple spinlock (using bits in
* field qlock), because submitters encountering a busy queue move
* on to try or create other queues -- they block only when
* creating and registering new queues.
*
* Management
* ==========
*
* The main throughput advantages of work-stealing stem from
* decentralized control -- workers mostly take tasks from
* themselves or each other. We cannot negate this in the
* implementation of other management responsibilities. The main
* tactic for avoiding bottlenecks is packing nearly all
* essentially atomic control state into two volatile variables
* that are by far most often read (not written) as status and
* consistency checks.
*
* Field "ctl" contains 64 bits holding all the information needed
* to atomically decide to add, inactivate, enqueue (on an event
* queue), dequeue, and/or re-activate workers. To enable this
* packing, we restrict maximum parallelism to (1<<15)-1 (which is
* far in excess of normal operating range) to allow ids, counts,
* and their negations (used for thresholding) to fit into 16bit
* fields.
*
* Field "plock" is a form of sequence lock with a saturating
* shutdown bit (similarly for per-queue "qlocks"), mainly
* protecting updates to the workQueues array, as well as to
* enable shutdown. When used as a lock, it is normally only very
* briefly held, so is nearly always available after at most a
* brief spin, but we use a monitor-based backup strategy to
* block when needed.
*
* Recording WorkQueues. WorkQueues are recorded in the
* "workQueues" array that is created upon first use and expanded
* if necessary. Updates to the array while recording new workers
* and unrecording terminated ones are protected from each other
* by a lock but the array is otherwise concurrently readable, and
* accessed directly. To simplify index-based operations, the
* array size is always a power of two, and all readers must
* tolerate null slots. Worker queues are at odd indices. Shared
* (submission) queues are at even indices, up to a maximum of 64
* slots, to limit growth even if array needs to expand to add
* more workers. Grouping them together in this way simplifies and
* speeds up task scanning.
*
* All worker thread creation is on-demand, triggered by task
* submissions, replacement of terminated workers, and/or
* compensation for blocked workers. However, all other support
* code is set up to work with other policies. To ensure that we
* do not hold on to worker references that would prevent GC, ALL
* accesses to workQueues are via indices into the workQueues
* array (which is one source of some of the messy code
* constructions here). In essence, the workQueues array serves as
* a weak reference mechanism. Thus for example the wait queue
* field of ctl stores indices, not references. Access to the
* workQueues in associated methods (for example signalWork) must
* both index-check and null-check the IDs. All such accesses
* ignore bad IDs by returning out early from what they are doing,
* since this can only be associated with termination, in which
* case it is OK to give up. All uses of the workQueues array
* also check that it is non-null (even if previously
* non-null). This allows nulling during termination, which is
* currently not necessary, but remains an option for
* resource-revocation-based shutdown schemes. It also helps
* reduce JIT issuance of uncommon-trap code, which tends to
* unnecessarily complicate control flow in some methods.
*
* Event Queuing. Unlike HPC work-stealing frameworks, we cannot
* let workers spin indefinitely scanning for tasks when none can
* be found immediately, and we cannot start/resume workers unless
* there appear to be tasks available. On the other hand, we must
* quickly prod them into action when new tasks are submitted or
* generated. In many usages, ramp-up time to activate workers is
* the main limiting factor in overall performance (this is
* compounded at program start-up by JIT compilation and
* allocation). So we try to streamline this as much as possible.
* We park/unpark workers after placing in an event wait queue
* when they cannot find work. This "queue" is actually a simple
* Treiber stack, headed by the "id" field of ctl, plus a 15bit
* counter value (that reflects the number of times a worker has
* been inactivated) to avoid ABA effects (we need only as many
* version numbers as worker threads). Successors are held in
* field WorkQueue.nextWait. Queuing deals with several intrinsic
* races, mainly that a task-producing thread can miss seeing (and
* signalling) another thread that gave up looking for work but
* has not yet entered the wait queue. We solve this by requiring
* a full sweep of all workers (via repeated calls to method
* scan()) both before and after a newly waiting worker is added
* to the wait queue. During a rescan, the worker might release
* some other queued worker rather than itself, which has the same
* net effect. Because enqueued workers may actually be rescanning
* rather than waiting, we set and clear the "parker" field of
* WorkQueues to reduce unnecessary calls to unpark. (This
* requires a secondary recheck to avoid missed signals.) Note
* the unusual conventions about Thread.interrupts surrounding
* parking and other blocking: Because interrupts are used solely
* to alert threads to check termination, which is checked anyway
* upon blocking, we clear status (using Thread.interrupted)
* before any call to park, so that park does not immediately
* return due to status being set via some other unrelated call to
* interrupt in user code.
*
* Signalling. We create or wake up workers only when there
* appears to be at least one task they might be able to find and
* execute. However, many other threads may notice the same task
* and each signal to wake up a thread that might take it. So in
* general, pools will be over-signalled. When a submission is
* added or another worker adds a task to a queue that has fewer
* than two tasks, they signal waiting workers (or trigger
* creation of new ones if fewer than the given parallelism level
* -- signalWork), and may leave a hint to the unparked worker to
* help signal others upon wakeup). These primary signals are
* buttressed by others (see method helpSignal) whenever other
* threads scan for work or do not have a task to process. On
* most platforms, signalling (unpark) overhead time is noticeably
* long, and the time between signalling a thread and it actually
* making progress can be very noticeably long, so it is worth
* offloading these delays from critical paths as much as
* possible.
*
* Trimming workers. To release resources after periods of lack of
* use, a worker starting to wait when the pool is quiescent will
* time out and terminate if the pool has remained quiescent for a
* given period -- a short period if there are more threads than
* parallelism, longer as the number of threads decreases. This
* will slowly propagate, eventually terminating all workers after
* periods of non-use.
*
* Shutdown and Termination. A call to shutdownNow atomically sets
* a plock bit and then (non-atomically) sets each worker's
* qlock status, cancels all unprocessed tasks, and wakes up
* all waiting workers. Detecting whether termination should
* commence after a non-abrupt shutdown() call requires more work
* and bookkeeping. We need consensus about quiescence (i.e., that
* there is no more work). The active count provides a primary
* indication but non-abrupt shutdown still requires a rechecking
* scan for any workers that are inactive but not queued.
*
* Joining Tasks
* =============
*
* Any of several actions may be taken when one worker is waiting
* to join a task stolen (or always held) by another. Because we
* are multiplexing many tasks on to a pool of workers, we can't
* just let them block (as in Thread.join). We also cannot just
* reassign the joiner's run-time stack with another and replace
* it later, which would be a form of "continuation", that even if
* possible is not necessarily a good idea since we sometimes need
* both an unblocked task and its continuation to progress.
* Instead we combine two tactics:
*
* Helping: Arranging for the joiner to execute some task that it
* would be running if the steal had not occurred.
*
* Compensating: Unless there are already enough live threads,
* method tryCompensate() may create or re-activate a spare
* thread to compensate for blocked joiners until they unblock.
*
* A third form (implemented in tryRemoveAndExec) amounts to
* helping a hypothetical compensator: If we can readily tell that
* a possible action of a compensator is to steal and execute the
* task being joined, the joining thread can do so directly,
* without the need for a compensation thread (although at the
* expense of larger run-time stacks, but the tradeoff is
* typically worthwhile).
*
* The ManagedBlocker extension API can't use helping so relies
* only on compensation in method awaitBlocker.
*
* The algorithm in tryHelpStealer entails a form of "linear"
* helping: Each worker records (in field currentSteal) the most
* recent task it stole from some other worker. Plus, it records
* (in field currentJoin) the task it is currently actively
* joining. Method tryHelpStealer uses these markers to try to
* find a worker to help (i.e., steal back a task from and execute
* it) that could hasten completion of the actively joined task.
* In essence, the joiner executes a task that would be on its own
* local deque had the to-be-joined task not been stolen. This may
* be seen as a conservative variant of the approach in Wagner &
* Calder "Leapfrogging: a portable technique for implementing
* efficient futures" SIGPLAN Notices, 1993
* (http://portal.acm.org/citation.cfm?id=155354). It differs in
* that: (1) We only maintain dependency links across workers upon
* steals, rather than use per-task bookkeeping. This sometimes
* requires a linear scan of workQueues array to locate stealers,
* but often doesn't because stealers leave hints (that may become
* stale/wrong) of where to locate them. It is only a hint
* because a worker might have had multiple steals and the hint
* records only one of them (usually the most current). Hinting
* isolates cost to when it is needed, rather than adding to
* per-task overhead. (2) It is "shallow", ignoring nesting and
* potentially cyclic mutual steals. (3) It is intentionally
* racy: field currentJoin is updated only while actively joining,
* which means that we miss links in the chain during long-lived
* tasks, GC stalls etc (which is OK since blocking in such cases
* is usually a good idea). (4) We bound the number of attempts
* to find work (see MAX_HELP) and fall back to suspending the
* worker and if necessary replacing it with another.
*
* Helping actions for CountedCompleters are much simpler: Method
* helpComplete can take and execute any task with the same root
* as the task being waited on. However, this still entails some
* traversal of completer chains, so is less efficient than using
* CountedCompleters without explicit joins.
*
* It is impossible to keep exactly the target parallelism number
* of threads running at any given time. Determining the
* existence of conservatively safe helping targets, the
* availability of already-created spares, and the apparent need
* to create new spares are all racy, so we rely on multiple
* retries of each. Compensation in the apparent absence of
* helping opportunities is challenging to control on JVMs, where
* GC and other activities can stall progress of tasks that in
* turn stall out many other dependent tasks, without us being
* able to determine whether they will ever require compensation.
* Even though work-stealing otherwise encounters little
* degradation in the presence of more threads than cores,
* aggressively adding new threads in such cases entails risk of
* unwanted positive feedback control loops in which more threads
* cause more dependent stalls (as well as delayed progress of
* unblocked threads to the point that we know they are available)
* leading to more situations requiring more threads, and so
* on. This aspect of control can be seen as an (analytically
* intractable) game with an opponent that may choose the worst
* (for us) active thread to stall at any time. We take several
* precautions to bound losses (and thus bound gains), mainly in
* methods tryCompensate and awaitJoin.
*
* Common Pool
* ===========
*
* The static common Pool always exists after static
* initialization. Since it (or any other created pool) need
* never be used, we minimize initial construction overhead and
* footprint to the setup of about a dozen fields, with no nested
* allocation. Most bootstrapping occurs within method
* fullExternalPush during the first submission to the pool.
*
* When external threads submit to the common pool, they can
* perform some subtask processing (see externalHelpJoin and
* related methods). We do not need to record whether these
* submissions are to the common pool -- if not, externalHelpJoin
* returns quickly (at the most helping to signal some common pool
* workers). These submitters would otherwise be blocked waiting
* for completion, so the extra effort (with liberally sprinkled
* task status checks) in inapplicable cases amounts to an odd
* form of limited spin-wait before blocking in ForkJoinTask.join.
*
* Style notes
* ===========
*
* There is a lot of representation-level coupling among classes
* ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
* fields of WorkQueue maintain data structures managed by
* ForkJoinPool, so are directly accessed. There is little point
* trying to reduce this, since any associated future changes in
* representations will need to be accompanied by algorithmic
* changes anyway. Several methods intrinsically sprawl because
* they must accumulate sets of consistent reads of volatiles held
* in local variables. Methods signalWork() and scan() are the
* main bottlenecks, so are especially heavily
* micro-optimized/mangled. There are lots of inline assignments
* (of form "while ((local = field) != 0)") which are usually the
* simplest way to ensure the required read orderings (which are
* sometimes critical). This leads to a "C"-like style of listing
* declarations of these locals at the heads of methods or blocks.
* There are several occurrences of the unusual "do {} while
* (!cas...)" which is the simplest way to force an update of a
* CAS'ed variable. There are also other coding oddities (including
* several unnecessary-looking hoisted null checks) that help
* some methods perform reasonably even when interpreted (not
* compiled).
*
* The order of declarations in this file is:
* (1) Static utility functions
* (2) Nested (static) classes
* (3) Static fields
* (4) Fields, along with constants used when unpacking some of them
* (5) Internal control methods
* (6) Callbacks and other support for ForkJoinTask methods
* (7) Exported methods
* (8) Static block initializing statics in minimally dependent order
*/
// Static utilities
/**
* If there is a security manager, makes sure caller has
* permission to modify threads.
*/
private static void checkPermission() {
SecurityManager security = System.getSecurityManager();
if (security != null)
security.checkPermission(modifyThreadPermission);
}
// Nested classes
/**
* Factory for creating new {@link ForkJoinWorkerThread}s.
* A {@code ForkJoinWorkerThreadFactory} must be defined and used
* for {@code ForkJoinWorkerThread} subclasses that extend base
* functionality or initialize threads with different contexts.
*/
public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
* @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
}
/**
* Default ForkJoinWorkerThreadFactory implementation; creates a
* new ForkJoinWorkerThread.
*/
static final class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
}
/**
* Per-thread records for threads that submit to pools. Currently
* holds only pseudo-random seed / index that is used to choose
* submission queues in method externalPush. In the future, this may
* also incorporate a means to implement different task rejection
* and resubmission policies.
*
* Seeds for submitters and workers/workQueues work in basically
* the same way but are initialized and updated using slightly
* different mechanics. Both are initialized using the same
* approach as in class ThreadLocal, where successive values are
* unlikely to collide with previous values. Seeds are then
* randomly modified upon collisions using xorshifts, which
* requires a non-zero seed.
*/
static final class Submitter {
int seed;
Submitter(int s) { seed = s; }
}
/**
* Class for artificial tasks that are used to replace the target
* of local joins if they are removed from an interior queue slot
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to
* actually do anything beyond having a unique identity.
*/
static final class EmptyTask extends ForkJoinTask<Void> {
private static final long serialVersionUID = -7721805057305804111L;
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
public final Void getRawResult() { return null; }
public final void setRawResult(Void x) {}
public final boolean exec() { return true; }
}
/**
* Queues supporting work-stealing as well as external task
* submission. See above for main rationale and algorithms.
* Implementation relies heavily on "Unsafe" intrinsics
* and selective use of "volatile":
*
* Field "base" is the index (mod array.length) of the least valid
* queue slot, which is always the next position to steal (poll)
* from if nonempty. Reads and writes require volatile orderings
* but not CAS, because updates are only performed after slot
* CASes.
*
* Field "top" is the index (mod array.length) of the next queue
* slot to push to or pop from. It is written only by owner thread
* for push, or under lock for external/shared push, and accessed
* by other threads only after reading (volatile) base. Both top
* and base are allowed to wrap around on overflow, but (top -
* base) (or more commonly -(base - top) to force volatile read of
* base before top) still estimates size. The lock ("qlock") is
* forced to -1 on termination, causing all further lock attempts
* to fail. (Note: we don't need CAS for termination state because
* upon pool shutdown, all shared-queues will stop being used
* anyway.) Nearly all lock bodies are set up so that exceptions
* within lock bodies are "impossible" (modulo JVM errors that
* would cause failure anyway.)
*
* The array slots are read and written using the emulation of
* volatiles/atomics provided by Unsafe. Insertions must in
* general use putOrderedObject as a form of releasing store to
* ensure that all writes to the task object are ordered before
* its publication in the queue. All removals entail a CAS to
* null. The array is always a power of two. To ensure safety of
* Unsafe array operations, all accesses perform explicit null
* checks and implicit bounds checks via power-of-two masking.
*
* In addition to basic queuing support, this class contains
* fields described elsewhere to control execution. It turns out
* to work better memory-layout-wise to include them in this class
* rather than a separate class.
*
* Performance on most platforms is very sensitive to placement of
* instances of both WorkQueues and their arrays -- we absolutely
* do not want multiple WorkQueue instances or multiple queue
* arrays sharing cache lines. (It would be best for queue objects
* and their arrays to share, but there is nothing available to
* help arrange that). Unfortunately, because they are recorded
* in a common array, WorkQueue instances are often moved to be
* adjacent by garbage collectors. To reduce impact, we use field
* padding that works OK on common platforms; this effectively
* trades off slightly slower average field access for the sake of
* avoiding really bad worst-case access. (Until better JVM
* support is in place, this padding is dependent on transient
* properties of JVM field layout rules.) We also take care in
* allocating, sizing and resizing the array. Non-shared queue
* arrays are initialized by workers before use. Others are
* allocated on first use.
*/
static final class WorkQueue {
/**
* Capacity of work-stealing queue array upon initialization.
* Must be a power of two; at least 4, but should be larger to
* reduce or eliminate cacheline sharing among queues.
* Currently, it is much larger, as a partial workaround for
* the fact that JVMs often place arrays in locations that
* share GC bookkeeping (especially cardmarks) such that
* per-write accesses encounter serious memory contention.
*/
static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
/**
* Maximum size for queue arrays. Must be a power of two less
* than or equal to 1 << (31 - width of array entry) to ensure
* lack of wraparound of index calculations, but defined to a
* value a bit less than this to help users trap runaway
* programs before saturating systems.
*/
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
int seed; // for random scanning; initialize nonzero
volatile int eventCount; // encoded inactivation count; < 0 if inactive
int nextWait; // encoded record of next event waiter
int hint; // steal or signal hint (index)
int poolIndex; // index of this queue in pool (or 0)
final int mode; // 0: lifo, > 0: fifo, < 0: shared
int nsteals; // number of steals
volatile int qlock; // 1: locked, -1: terminate; else 0
volatile int base; // index of next slot for poll
int top; // index of next slot for push
ForkJoinTask<?>[] array; // the elements (initially unallocated)
final ForkJoinPool pool; // the containing pool (may be null)
final ForkJoinWorkerThread owner; // owning thread or null if shared
volatile Thread parker; // == owner during call to park; else null
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
ForkJoinTask<?> currentSteal; // current non-local task being executed
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
int seed) {
this.pool = pool;
this.owner = owner;
this.mode = mode;
this.seed = seed;
// Place indices in the center of array (that is not yet allocated)
base = top = INITIAL_QUEUE_CAPACITY >>> 1;
}
/**
* Returns the approximate number of tasks in the queue.
*/
final int queueSize() {
int n = base - top; // non-owner callers must read base first
return (n >= 0) ? 0 : -n; // ignore transient negative
}
/**
* Provides a more accurate estimate of whether this queue has
* any tasks than does queueSize, by checking whether a
* near-empty queue has at least one unclaimed task.
*/
final boolean isEmpty() {
ForkJoinTask<?>[] a; int m, s;
int n = base - (s = top);
return (n >= 0 ||
(n == -1 &&
((a = array) == null ||
(m = a.length - 1) < 0 ||
U.getObject
(a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
}
/**
* Pushes a task. Call only by owner in unshared queues. (The
* shared-queue version is embedded in method externalPush.)
*
* @param task the task. Caller must ensure non-null.
* @throws RejectedExecutionException if array cannot be resized
*/
final void push(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a; ForkJoinPool p;
int s = top, m, n;
if ((a = array) != null) { // ignore if queue removed
int j = (((m = a.length - 1) & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
if ((n = (top = s + 1) - base) <= 2) {
if ((p = pool) != null)
p.signalWork(this);
}
else if (n >= m)
growArray();
}
}
/**
* Initializes or doubles the capacity of array. Call either
* by owner or with lock held -- it is OK for base, but not
* top, to move while resizings are in progress.
*/
final ForkJoinTask<?>[] growArray() {
ForkJoinTask<?>[] oldA = array;
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
if (size > MAXIMUM_QUEUE_CAPACITY)
throw new RejectedExecutionException("Queue capacity exceeded");
int oldMask, t, b;
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
(t = top) - (b = base) > 0) {
int mask = size - 1;
do {
ForkJoinTask<?> x;
int oldj = ((b & oldMask) << ASHIFT) + ABASE;
int j = ((b & mask) << ASHIFT) + ABASE;
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
if (x != null &&
U.compareAndSwapObject(oldA, oldj, x, null))
U.putObjectVolatile(a, j, x);
} while (++b != t);
}
return a;
}
/**
* Takes next task, if one exists, in LIFO order. Call only
* by owner in unshared queues.
*/
final ForkJoinTask<?> pop() {
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
if ((a = array) != null && (m = a.length - 1) >= 0) {
for (int s; (s = top - 1) - base >= 0;) {
long j = ((m & s) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
break;
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
return t;
}
}
}
return null;
}
/**
* Takes a task in FIFO order if b is base of queue and a task
* can be claimed without contention. Specialized versions
* appear in ForkJoinPool methods scan and tryHelpStealer.
*/
final ForkJoinTask<?> pollAt(int b) {
ForkJoinTask<?> t; ForkJoinTask<?>[] a;
if ((a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
return t;
}
}
return null;
}
/**
* Takes next task, if one exists, in FIFO order.
*/
final ForkJoinTask<?> poll() {
ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
while ((b = base) - top < 0 && (a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t != null) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
return t;
}
}
else if (base == b) {
if (b + 1 == top)
break;
Thread.yield(); // wait for lagging update (very rare)
}
}
return null;
}
/**
* Takes next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> nextLocalTask() {
return mode == 0 ? pop() : poll();
}
/**
* Returns next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> peek() {
ForkJoinTask<?>[] a = array; int m;
if (a == null || (m = a.length - 1) < 0)
return null;
int i = mode == 0 ? top - 1 : base;
int j = ((i & m) << ASHIFT) + ABASE;
return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
}
/**
* Pops the given task only if it is at the current top.
* (A shared version is available only via FJP.tryExternalUnpush)
*/
final boolean tryUnpush(ForkJoinTask<?> t) {
ForkJoinTask<?>[] a; int s;
if ((a = array) != null && (s = top) != base &&
U.compareAndSwapObject
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
top = s;
return true;
}
return false;
}
/**
* Removes and cancels all known tasks, ignoring any exceptions.
*/
final void cancelAll() {
ForkJoinTask.cancelIgnoringExceptions(currentJoin);
ForkJoinTask.cancelIgnoringExceptions(currentSteal);
for (ForkJoinTask<?> t; (t = poll()) != null; )
ForkJoinTask.cancelIgnoringExceptions(t);
}
/**
* Computes next value for random probes. Scans don't require
* a very high quality generator, but also not a crummy one.
* Marsaglia xor-shift is cheap and works well enough. Note:
* This is manually inlined in its usages in ForkJoinPool to
* avoid writes inside busy scan loops.
*/
final int nextSeed() {
int r = seed;
r ^= r << 13;
r ^= r >>> 17;
return seed = r ^= r << 5;
}
// Specialized execution methods
/**
* Pops and runs tasks until empty.
*/
private void popAndExecAll() {
// A bit faster than repeated pop calls
ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t;
while ((a = array) != null && (m = a.length - 1) >= 0 &&
(s = top - 1) - base >= 0 &&
(t = ((ForkJoinTask<?>)
U.getObject(a, j = ((m & s) << ASHIFT) + ABASE)))
!= null) {
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
t.doExec();
}
}
}
/**
* Polls and runs tasks until empty.
*/
private void pollAndExecAll() {
for (ForkJoinTask<?> t; (t = poll()) != null;)
t.doExec();
}
/**
* If present, removes from queue and executes the given task,
* or any other cancelled task. Returns (true) on any CAS
* or consistency check failure so caller can retry.
*
* @return false if no progress can be made, else true
*/
final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
boolean stat = true, removed = false, empty = true;
ForkJoinTask<?>[] a; int m, s, b, n;
if ((a = array) != null && (m = a.length - 1) >= 0 &&
(n = (s = top) - (b = base)) > 0) {
for (ForkJoinTask<?> t;;) { // traverse from s to b
int j = ((--s & m) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t == null) // inconsistent length
break;
else if (t == task) {
if (s + 1 == top) { // pop
if (!U.compareAndSwapObject(a, j, task, null))
break;
top = s;
removed = true;
}
else if (base == b) // replace with proxy
removed = U.compareAndSwapObject(a, j, task,
new EmptyTask());
break;
}
else if (t.status >= 0)
empty = false;
else if (s + 1 == top) { // pop and throw away
if (U.compareAndSwapObject(a, j, t, null))
top = s;
break;
}
if (--n == 0) {
if (!empty && base == b)
stat = false;
break;
}
}
}
if (removed)
task.doExec();
return stat;
}
/**
* Polls for and executes the given task or any other task in
* its CountedCompleter computation.
*/
final boolean pollAndExecCC(ForkJoinTask<?> root) {
ForkJoinTask<?>[] a; int b; Object o;
outer: while ((b = base) - top < 0 && (a = array) != null) {
long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) == null ||
!(o instanceof CountedCompleter))
break;
for (CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
t.doExec();
return true;
}
else
break; // restart
}
if ((r = r.completer) == null)
break outer; // not part of root computation
}
}
return false;
}
/**
* Executes a top-level task and any local tasks remaining
* after execution.
*/
final void runTask(ForkJoinTask<?> t) {
if (t != null) {
(currentSteal = t).doExec();
currentSteal = null;
++nsteals;
if (base - top < 0) { // process remaining local tasks
if (mode == 0)
popAndExecAll();
else
pollAndExecAll();
}
}
}
/**
* Executes a non-top-level (stolen) task.
*/
final void runSubtask(ForkJoinTask<?> t) {
if (t != null) {
ForkJoinTask<?> ps = currentSteal;
(currentSteal = t).doExec();
currentSteal = ps;
}
}
/**
* Returns true if owned and not known to be blocked.
*/
final boolean isApparentlyUnblocked() {
Thread wt; Thread.State s;
return (eventCount >= 0 &&
(wt = owner) != null &&
(s = wt.getState()) != Thread.State.BLOCKED &&
s != Thread.State.WAITING &&
s != Thread.State.TIMED_WAITING);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long QLOCK;
private static final int ABASE;
private static final int ASHIFT;
static {
try {
U = getUnsafe();
Class<?> k = WorkQueue.class;
Class<?> ak = ForkJoinTask[].class;
QLOCK = U.objectFieldOffset
(k.getDeclaredField("qlock"));
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
}
// static fields (initialized in static initializer below)
/**
* Creates a new ForkJoinWorkerThread. This factory is used unless
* overridden in ForkJoinPool constructors.
*/
public static final ForkJoinWorkerThreadFactory
defaultForkJoinWorkerThreadFactory;
/**
* Per-thread submission bookkeeping. Shared across all pools
* to reduce ThreadLocal pollution and because random motion
* to avoid contention in one pool is likely to hold for others.
* Lazily initialized on first submission (but null-checked
* in other contexts to avoid unnecessary initialization).
*/
static final ThreadLocal<Submitter> submitters;
/**
* Permission required for callers of methods that may start or
* kill threads.
*/
private static final RuntimePermission modifyThreadPermission;
/**
* Common (static) pool. Non-null for public use unless a static
* construction exception, but internal usages null-check on use
* to paranoically avoid potential initialization circularities
* as well as to simplify generated code.
*/
static final ForkJoinPool common;
/**
* Common pool parallelism. Must equal common.parallelism.
*/
static final int commonParallelism;
/**
* Sequence number for creating workerNamePrefix.
*/
private static int poolNumberSequence;
/**
* Returns the next sequence number. We don't expect this to
* ever contend, so use simple builtin sync.
*/
private static final synchronized int nextPoolId() {
return ++poolNumberSequence;
}
// static constants
/**
* Initial timeout value (in nanoseconds) for the thread
* triggering quiescence to park waiting for new work. On timeout,
* the thread will instead try to shrink the number of
* workers. The value should be large enough to avoid overly
* aggressive shrinkage during most transient stalls (long GCs
* etc).
*/
private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
/**
* Timeout value when there are more threads than parallelism level
*/
private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
/**
* Tolerance for idle timeouts, to cope with timer undershoots
*/
private static final long TIMEOUT_SLOP = 2000000L;
/**
* The maximum stolen->joining link depth allowed in method
* tryHelpStealer. Must be a power of two. Depths for legitimate
* chains are unbounded, but we use a fixed constant to avoid
* (otherwise unchecked) cycles and to bound staleness of
* traversal parameters at the expense of sometimes blocking when
* we could be helping.
*/
private static final int MAX_HELP = 64;
/**
* Increment for seed generators. See class ThreadLocal for
* explanation.
*/
private static final int SEED_INCREMENT = 0x61c88647;
/*
* Bits and masks for control variables
*
* Field ctl is a long packed with:
* AC: Number of active running workers minus target parallelism (16 bits)
* TC: Number of total workers minus target parallelism (16 bits)
* ST: true if pool is terminating (1 bit)
* EC: the wait count of top waiting thread (15 bits)
* ID: poolIndex of top of Treiber stack of waiters (16 bits)
*
* When convenient, we can extract the upper 32 bits of counts and
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
* (int)ctl. The ec field is never accessed alone, but always
* together with id and st. The offsets of counts by the target
* parallelism and the positionings of fields makes it possible to
* perform the most common checks via sign tests of fields: When
* ac is negative, there are not enough active workers, when tc is
* negative, there are not enough total workers, and when e is
* negative, the pool is terminating. To deal with these possibly
* negative fields, we use casts in and out of "short" and/or
* signed shifts to maintain signedness.
*
* When a thread is queued (inactivated), its eventCount field is
* set negative, which is the only way to tell if a worker is
* prevented from executing tasks, even though it must continue to
* scan for them to avoid queuing races. Note however that
* eventCount updates lag releases so usage requires care.
*
* Field plock is an int packed with:
* SHUTDOWN: true if shutdown is enabled (1 bit)
* SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
* SIGNAL: set when threads may be waiting on the lock (1 bit)
*
* The sequence number enables simple consistency checks:
* Staleness of read-only operations on the workQueues array can
* be checked by comparing plock before vs after the reads.
*/
// bit positions/shifts for fields
private static final int AC_SHIFT = 48;
private static final int TC_SHIFT = 32;
private static final int ST_SHIFT = 31;
private static final int EC_SHIFT = 16;
// bounds
private static final int SMASK = 0xffff; // short bits
private static final int MAX_CAP = 0x7fff; // max #workers - 1
private static final int EVENMASK = 0xfffe; // even short bits
private static final int SQMASK = 0x007e; // max 64 (even) slots
private static final int SHORT_SIGN = 1 << 15;
private static final int INT_SIGN = 1 << 31;
// masks
private static final long STOP_BIT = 0x0001L << ST_SHIFT;
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
// units for incrementing and decrementing
private static final long TC_UNIT = 1L << TC_SHIFT;
private static final long AC_UNIT = 1L << AC_SHIFT;
// masks and units for dealing with u = (int)(ctl >>> 32)
private static final int UAC_SHIFT = AC_SHIFT - 32;
private static final int UTC_SHIFT = TC_SHIFT - 32;
private static final int UAC_MASK = SMASK << UAC_SHIFT;
private static final int UTC_MASK = SMASK << UTC_SHIFT;
private static final int UAC_UNIT = 1 << UAC_SHIFT;
private static final int UTC_UNIT = 1 << UTC_SHIFT;
// masks and units for dealing with e = (int)ctl
private static final int E_MASK = 0x7fffffff; // no STOP_BIT
private static final int E_SEQ = 1 << EC_SHIFT;
// plock bits
private static final int SHUTDOWN = 1 << 31;
private static final int PL_LOCK = 2;
private static final int PL_SIGNAL = 1;
private static final int PL_SPINS = 1 << 8;
// access mode for WorkQueue
static final int LIFO_QUEUE = 0;
static final int FIFO_QUEUE = 1;
static final int SHARED_QUEUE = -1;
// bounds for #steps in scan loop -- must be power 2 minus 1
private static final int MIN_SCAN = 0x1ff; // cover estimation slop
private static final int MAX_SCAN = 0x1ffff; // 4 * max workers
// Instance fields
/*
* Field layout of this class tends to matter more than one would
* like. Runtime layout order is only loosely related to
* declaration order and may differ across JVMs, but the following
* empirically works OK on current JVMs.
*/
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
volatile long stealCount; // collects worker counts
volatile long ctl; // main pool control
volatile int plock; // shutdown status and seqLock
volatile int indexSeed; // worker/submitter index seed
final int config; // mode and parallelism level
WorkQueue[] workQueues; // main registry
final ForkJoinWorkerThreadFactory factory;
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
final String workerNamePrefix; // to create worker name string
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b;
/**
* Acquires the plock lock to protect worker array and related
* updates. This method is called only if an initial CAS on plock
* fails. This acts as a spinlock for normal cases, but falls back
* to builtin monitor to block when (rarely) needed. This would be
* a terrible idea for a highly contended lock, but works fine as
* a more conservative alternative to a pure spinlock.
*/
private int acquirePlock() {
int spins = PL_SPINS, r = 0, ps, nps;
for (;;) {
if (((ps = plock) & PL_LOCK) == 0 &&
U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
return nps;
else if (r == 0) { // randomize spins if possible
Thread t = Thread.currentThread(); WorkQueue w; Submitter z;
if ((t instanceof ForkJoinWorkerThread) &&
(w = ((ForkJoinWorkerThread)t).workQueue) != null)
r = w.seed;
else if ((z = submitters.get()) != null)
r = z.seed;
else
r = 1;
}
else if (spins >= 0) {
r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
if (r >= 0)
--spins;
}
else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
synchronized (this) {
if ((plock & PL_SIGNAL) != 0) {
try {
wait();
} catch (InterruptedException ie) {
try {
Thread.currentThread().interrupt();
} catch (SecurityException ignore) {
}
}
}
else
notifyAll();
}
}
}
}
/**
* Unlocks and signals any thread waiting for plock. Called only
* when CAS of seq value for unlock fails.
*/
private void releasePlock(int ps) {
plock = ps;
synchronized (this) { notifyAll(); }
}
/**
* Tries to create and start one worker if fewer than target
* parallelism level exist. Adjusts counts etc on failure.
*/
private void tryAddWorker() {
long c; int u;
while ((u = (int)((c = ctl) >>> 32)) < 0 &&
(u & SHORT_SIGN) != 0 && (int)c == 0) {
long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
((u + UAC_UNIT) & UAC_MASK)) << 32;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
ForkJoinWorkerThreadFactory fac;
Throwable ex = null;
ForkJoinWorkerThread wt = null;
try {
if ((fac = factory) != null &&
(wt = fac.newThread(this)) != null) {
wt.start();
break;
}
} catch (Throwable e) {
ex = e;
}
deregisterWorker(wt, ex);
break;
}
}
}
// Registering and deregistering workers
/**
* Callback from ForkJoinWorkerThread to establish and record its
* WorkQueue. To avoid scanning bias due to packing entries in
* front of the workQueues array, we treat the array as a simple
* power-of-two hash table using per-thread seed as hash,
* expanding as needed.
*
* @param wt the worker thread
* @return the worker's queue
*/
final WorkQueue registerWorker(ForkJoinWorkerThread wt) {
Thread.UncaughtExceptionHandler handler; WorkQueue[] ws; int s, ps;
wt.setDaemon(true);
if ((handler = ueh) != null)
wt.setUncaughtExceptionHandler(handler);
do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed,
s += SEED_INCREMENT) ||
s == 0); // skip 0
WorkQueue w = new WorkQueue(this, wt, config >>> 16, s);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
try {
if ((ws = workQueues) != null) { // skip if shutting down
int n = ws.length, m = n - 1;
int r = (s << 1) | 1; // use odd-numbered indices
if (ws[r &= m] != null) { // collision
int probes = 0; // step by approx half size
int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
while (ws[r = (r + step) & m] != null) {
if (++probes >= n) {
workQueues = ws = Arrays.copyOf(ws, n <<= 1);
m = n - 1;
probes = 0;
}
}
}
w.eventCount = w.poolIndex = r; // volatile write orders
ws[r] = w;
}
} finally {
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
wt.setName(workerNamePrefix.concat(Integer.toString(w.poolIndex)));
return w;
}
/**
* Final callback from terminating worker, as well as upon failure
* to construct or start a worker. Removes record of worker from
* array, and adjusts counts. If pool is shutting down, tries to
* complete termination.
*
* @param wt the worker thread or null if construction failed
* @param ex the exception causing failure, or null if none
*/
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
WorkQueue w = null;
if (wt != null && (w = wt.workQueue) != null) {
int ps;
w.qlock = -1; // ensure set
long ns = w.nsteals, sc; // collect steal count
do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
sc = stealCount, sc + ns));
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
try {
int idx = w.poolIndex;
WorkQueue[] ws = workQueues;
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
ws[idx] = null;
} finally {
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
}
long c; // adjust ctl counts
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
((c - TC_UNIT) & TC_MASK) |
(c & ~(AC_MASK|TC_MASK)))));
if (!tryTerminate(false, false) && w != null && w.array != null) {
w.cancelAll(); // cancel remaining tasks
WorkQueue[] ws; WorkQueue v; Thread p; int u, i, e;
while ((u = (int)((c = ctl) >>> 32)) < 0 && (e = (int)c) >= 0) {
if (e > 0) { // activate or create replacement
if ((ws = workQueues) == null ||
(i = e & SMASK) >= ws.length ||
(v = ws[i]) == null)
break;
long nc = (((long)(v.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (v.eventCount != (e | INT_SIGN))
break;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
v.eventCount = (e + E_SEQ) & E_MASK;
if ((p = v.parker) != null)
U.unpark(p);
break;
}
}
else {
if ((short)u < 0)
tryAddWorker();
break;
}
}
}
if (ex == null) // help clean refs on way out
ForkJoinTask.helpExpungeStaleExceptions();
else // rethrow
ForkJoinTask.rethrow(ex);
}
// Submissions
/**
* Unless shutting down, adds the given task to a submission queue
* at submitter's current queue index (modulo submission
* range). Only the most common path is directly handled in this
* method. All others are relayed to fullExternalPush.
*
* @param task the task. Caller must ensure non-null.
*/
final void externalPush(ForkJoinTask<?> task) {
WorkQueue[] ws; WorkQueue q; Submitter z; int m; ForkJoinTask<?>[] a;
if ((z = submitters.get()) != null && plock > 0 &&
(ws = workQueues) != null && (m = (ws.length - 1)) >= 0 &&
(q = ws[m & z.seed & SQMASK]) != null &&
U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
int b = q.base, s = q.top, n, an;
if ((a = q.array) != null && (an = a.length) > (n = s + 1 - b)) {
int j = (((an - 1) & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
q.top = s + 1; // push on to deque
q.qlock = 0;
if (n <= 2)
signalWork(q);
return;
}
q.qlock = 0;
}
fullExternalPush(task);
}
/**
* Full version of externalPush. This method is called, among
* other times, upon the first submission of the first task to the
* pool, so must perform secondary initialization. It also
* detects first submission by an external thread by looking up
* its ThreadLocal, and creates a new shared queue if the one at
* index if empty or contended. The plock lock body must be
* exception-free (so no try/finally) so we optimistically
* allocate new queues outside the lock and throw them away if
* (very rarely) not needed.
*
* Secondary initialization occurs when plock is zero, to create
* workQueue array and set plock to a valid value. This lock body
* must also be exception-free. Because the plock seq value can
* eventually wrap around zero, this method harmlessly fails to
* reinitialize if workQueues exists, while still advancing plock.
*/
private void fullExternalPush(ForkJoinTask<?> task) {
int r = 0; // random index seed
for (Submitter z = submitters.get();;) {
WorkQueue[] ws; WorkQueue q; int ps, m, k;
if (z == null) {
if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed,
r += SEED_INCREMENT) && r != 0)
submitters.set(z = new Submitter(r));
}
else if (r == 0) { // move to a different index
r = z.seed;
r ^= r << 13; // same xorshift as WorkQueues
r ^= r >>> 17;
z.seed = r ^ (r << 5);
}
else if ((ps = plock) < 0)
throw new RejectedExecutionException();
else if (ps == 0 || (ws = workQueues) == null ||
(m = ws.length - 1) < 0) { // initialize workQueues
int p = config & SMASK; // find power of two table size
int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4;
n |= n >>> 8; n |= n >>> 16; n = (n + 1) << 1;
WorkQueue[] nws = ((ws = workQueues) == null || ws.length == 0 ?
new WorkQueue[n] : null);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
if (((ws = workQueues) == null || ws.length == 0) && nws != null)
workQueues = nws;
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
else if ((q = ws[k = r & m & SQMASK]) != null) {
if (q.qlock == 0 && U.compareAndSwapInt(q, QLOCK, 0, 1)) {
ForkJoinTask<?>[] a = q.array;
int s = q.top;
boolean submitted = false;
try { // locked version of push
if ((a != null && a.length > s + 1 - q.base) ||
(a = q.growArray()) != null) { // must presize
int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
q.top = s + 1;
submitted = true;
}
} finally {
q.qlock = 0; // unlock
}
if (submitted) {
signalWork(q);
return;
}
}
r = 0; // move on failure
}
else if (((ps = plock) & PL_LOCK) == 0) { // create new queue
q = new WorkQueue(this, null, SHARED_QUEUE, r);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
if ((ws = workQueues) != null && k < ws.length && ws[k] == null)
ws[k] = q;
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
else
r = 0; // try elsewhere while lock held
}
}
// Maintaining ctl counts
/**
* Increments active count; mainly called upon return from blocking.
*/
final void incrementActiveCount() {
long c;
do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
}
/**
* Tries to create or activate a worker if too few are active.
*
* @param q the (non-null) queue holding tasks to be signalled
*/
final void signalWork(WorkQueue q) {
int hint = q.poolIndex;
long c; int e, u, i, n; WorkQueue[] ws; WorkQueue w; Thread p;
while ((u = (int)((c = ctl) >>> 32)) < 0) {
if ((e = (int)c) > 0) {
if ((ws = workQueues) != null && ws.length > (i = e & SMASK) &&
(w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
long nc = (((long)(w.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (U.compareAndSwapLong(this, CTL, c, nc)) {
w.hint = hint;
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p);
break;
}
if (q.top - q.base <= 0)
break;
}
else
break;
}
else {
if ((short)u < 0)
tryAddWorker();
break;
}
}
}
// Scanning for tasks
/**
* Top-level runloop for workers, called by ForkJoinWorkerThread.run.
*/
final void runWorker(WorkQueue w) {
w.growArray(); // allocate queue
do { w.runTask(scan(w)); } while (w.qlock >= 0);
}
/**
* Scans for and, if found, returns one task, else possibly
* inactivates the worker. This method operates on single reads of
* volatile state and is designed to be re-invoked continuously,
* in part because it returns upon detecting inconsistencies,
* contention, or state changes that indicate possible success on
* re-invocation.
*
* The scan searches for tasks across queues (starting at a random
* index, and relying on registerWorker to irregularly scatter
* them within array to avoid bias), checking each at least twice.
* The scan terminates upon either finding a non-empty queue, or
* completing the sweep. If the worker is not inactivated, it
* takes and returns a task from this queue. Otherwise, if not
* activated, it signals workers (that may include itself) and
* returns so caller can retry. Also returns for true if the
* worker array may have changed during an empty scan. On failure
* to find a task, we take one of the following actions, after
* which the caller will retry calling this method unless
* terminated.
*
* * If pool is terminating, terminate the worker.
*
* * If not already enqueued, try to inactivate and enqueue the
* worker on wait queue. Or, if inactivating has caused the pool
* to be quiescent, relay to idleAwaitWork to possibly shrink
* pool.
*
* * If already enqueued and none of the above apply, possibly
* park awaiting signal, else lingering to help scan and signal.
*
* * If a non-empty queue discovered or left as a hint,
* help wake up other workers before return.
*
* @param w the worker (via its WorkQueue)
* @return a task or null if none found
*/
private final ForkJoinTask<?> scan(WorkQueue w) {
WorkQueue[] ws; int m;
int ps = plock; // read plock before ws
if (w != null && (ws = workQueues) != null && (m = ws.length - 1) >= 0) {
int ec = w.eventCount; // ec is negative if inactive
int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
w.hint = -1; // update seed and clear hint
int j = ((m + m + 1) | MIN_SCAN) & MAX_SCAN;
do {
WorkQueue q; ForkJoinTask<?>[] a; int b;
if ((q = ws[(r + j) & m]) != null && (b = q.base) - q.top < 0 &&
(a = q.array) != null) { // probably nonempty
int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
ForkJoinTask<?> t = (ForkJoinTask<?>)
U.getObjectVolatile(a, i);
if (q.base == b && ec >= 0 && t != null &&
U.compareAndSwapObject(a, i, t, null)) {
if ((q.base = b + 1) - q.top < 0)
signalWork(q);
return t; // taken
}
else if ((ec < 0 || j < m) && (int)(ctl >> AC_SHIFT) <= 0) {
w.hint = (r + j) & m; // help signal below
break; // cannot take
}
}
} while (--j >= 0);
int h, e, ns; long c, sc; WorkQueue q;
if ((ns = w.nsteals) != 0) {
if (U.compareAndSwapLong(this, STEALCOUNT,
sc = stealCount, sc + ns))
w.nsteals = 0; // collect steals and rescan
}
else if (plock != ps) // consistency check
; // skip
else if ((e = (int)(c = ctl)) < 0)
w.qlock = -1; // pool is terminating
else {
if ((h = w.hint) < 0) {
if (ec >= 0) { // try to enqueue/inactivate
long nc = (((long)ec |
((c - AC_UNIT) & (AC_MASK|TC_MASK))));
w.nextWait = e; // link and mark inactive
w.eventCount = ec | INT_SIGN;
if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc))
w.eventCount = ec; // unmark on CAS failure
else if ((int)(c >> AC_SHIFT) == 1 - (config & SMASK))
idleAwaitWork(w, nc, c);
}
else if (w.eventCount < 0 && ctl == c) {
Thread wt = Thread.currentThread();
Thread.interrupted(); // clear status
U.putObject(wt, PARKBLOCKER, this);
w.parker = wt; // emulate LockSupport.park
if (w.eventCount < 0) // recheck
U.park(false, 0L); // block
w.parker = null;
U.putObject(wt, PARKBLOCKER, null);
}
}
if ((h >= 0 || (h = w.hint) >= 0) &&
(ws = workQueues) != null && h < ws.length &&
(q = ws[h]) != null) { // signal others before retry
WorkQueue v; Thread p; int u, i, s;
for (int n = (config & SMASK) - 1;;) {
int idleCount = (w.eventCount < 0) ? 0 : -1;
if (((s = idleCount - q.base + q.top) <= n &&
(n = s) <= 0) ||
(u = (int)((c = ctl) >>> 32)) >= 0 ||
(e = (int)c) <= 0 || m < (i = e & SMASK) ||
(v = ws[i]) == null)
break;
long nc = (((long)(v.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (v.eventCount != (e | INT_SIGN) ||
!U.compareAndSwapLong(this, CTL, c, nc))
break;
v.hint = h;
v.eventCount = (e + E_SEQ) & E_MASK;
if ((p = v.parker) != null)
U.unpark(p);
if (--n <= 0)
break;
}
}
}
}
return null;
}
/**
* If inactivating worker w has caused the pool to become
* quiescent, checks for pool termination, and, so long as this is
* not the only worker, waits for event for up to a given
* duration. On timeout, if ctl has not changed, terminates the
* worker, which will in turn wake up another worker to possibly
* repeat this process.
*
* @param w the calling worker
* @param currentCtl the ctl value triggering possible quiescence
* @param prevCtl the ctl value to restore if thread is terminated
*/
private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
if (w != null && w.eventCount < 0 &&
!tryTerminate(false, false) && (int)prevCtl != 0 &&
ctl == currentCtl) {
int dc = -(short)(currentCtl >>> TC_SHIFT);
long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT;
long deadline = System.nanoTime() + parkTime - TIMEOUT_SLOP;
Thread wt = Thread.currentThread();
while (ctl == currentCtl) {
Thread.interrupted(); // timed variant of version in scan()
U.putObject(wt, PARKBLOCKER, this);
w.parker = wt;
if (ctl == currentCtl)
U.park(false, parkTime);
w.parker = null;
U.putObject(wt, PARKBLOCKER, null);
if (ctl != currentCtl)
break;
if (deadline - System.nanoTime() <= 0L &&
U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
w.hint = -1;
w.qlock = -1; // shrink
break;
}
}
}
}
/**
* Scans through queues looking for work while joining a task; if
* any present, signals. May return early if more signalling is
* detectably unneeded.
*
* @param task return early if done
* @param origin an index to start scan
*/
private void helpSignal(ForkJoinTask<?> task, int origin) {
WorkQueue[] ws; WorkQueue w; Thread p; long c; int m, u, e, i, s;
if (task != null && task.status >= 0 &&
(u = (int)(ctl >>> 32)) < 0 && (u >> UAC_SHIFT) < 0 &&
(ws = workQueues) != null && (m = ws.length - 1) >= 0) {
outer: for (int k = origin, j = m; j >= 0; --j) {
WorkQueue q = ws[k++ & m];
for (int n = m;;) { // limit to at most m signals
if (task.status < 0)
break outer;
if (q == null ||
((s = -q.base + q.top) <= n && (n = s) <= 0))
break;
if ((u = (int)((c = ctl) >>> 32)) >= 0 ||
(e = (int)c) <= 0 || m < (i = e & SMASK) ||
(w = ws[i]) == null)
break outer;
long nc = (((long)(w.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (w.eventCount != (e | INT_SIGN))
break outer;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p);
if (--n <= 0)
break;
}
}
}
}
}
/**
* Tries to locate and execute tasks for a stealer of the given
* task, or in turn one of its stealers, Traces currentSteal ->
* currentJoin links looking for a thread working on a descendant
* of the given task and with a non-empty queue to steal back and
* execute tasks from. The first call to this method upon a
* waiting join will often entail scanning/search, (which is OK
* because the joiner has nothing better to do), but this method
* leaves hints in workers to speed up subsequent calls. The
* implementation is very branchy to cope with potential
* inconsistencies or loops encountering chains that are stale,
* unknown, or so long that they are likely cyclic.
*
* @param joiner the joining worker
* @param task the task to join
* @return 0 if no progress can be made, negative if task
* known complete, else positive
*/
private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
int stat = 0, steps = 0; // bound to avoid cycles
if (joiner != null && task != null) { // hoist null checks
restart: for (;;) {
ForkJoinTask<?> subtask = task; // current target
for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
WorkQueue[] ws; int m, s, h;
if ((s = task.status) < 0) {
stat = s;
break restart;
}
if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
break restart; // shutting down
if ((v = ws[h = (j.hint | 1) & m]) == null ||
v.currentSteal != subtask) {
for (int origin = h;;) { // find stealer
if (((h = (h + 2) & m) & 15) == 1 &&
(subtask.status < 0 || j.currentJoin != subtask))
continue restart; // occasional staleness check
if ((v = ws[h]) != null &&
v.currentSteal == subtask) {
j.hint = h; // save hint
break;
}
if (h == origin)
break restart; // cannot find stealer
}
}
for (;;) { // help stealer or descend to its stealer
ForkJoinTask[] a; int b;
if (subtask.status < 0) // surround probes with
continue restart; // consistency checks
if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
ForkJoinTask<?> t =
(ForkJoinTask<?>)U.getObjectVolatile(a, i);
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
stat = 1; // apparent progress
if (t != null && v.base == b &&
U.compareAndSwapObject(a, i, t, null)) {
v.base = b + 1; // help stealer
joiner.runSubtask(t);
}
else if (v.base == b && ++steps == MAX_HELP)
break restart; // v apparently stalled
}
else { // empty -- try to descend
ForkJoinTask<?> next = v.currentJoin;
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
else if (next == null || ++steps == MAX_HELP)
break restart; // dead-end or maybe cyclic
else {
subtask = next;
j = v;
break;
}
}
}
}
}
}
return stat;
}
/**
* Analog of tryHelpStealer for CountedCompleters. Tries to steal
* and run tasks within the target's computation.
*
* @param task the task to join
* @param mode if shared, exit upon completing any task
* if all workers are active
*/
private int helpComplete(ForkJoinTask<?> task, int mode) {
WorkQueue[] ws; WorkQueue q; int m, n, s, u;
if (task != null && (ws = workQueues) != null &&
(m = ws.length - 1) >= 0) {
for (int j = 1, origin = j;;) {
if ((s = task.status) < 0)
return s;
if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) {
origin = j;
if (mode == SHARED_QUEUE &&
((u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0))
break;
}
else if ((j = (j + 2) & m) == origin)
break;
}
}
return 0;
}
/**
* Tries to decrement active count (sometimes implicitly) and
* possibly release or create a compensating worker in preparation
* for blocking. Fails on contention or termination. Otherwise,
* adds a new thread if no idle workers are available and pool
* may become starved.
*/
final boolean tryCompensate() {
int pc = config & SMASK, e, i, tc; long c;
WorkQueue[] ws; WorkQueue w; Thread p;
if ((ws = workQueues) != null && (e = (int)(c = ctl)) >= 0) {
if (e != 0 && (i = e & SMASK) < ws.length &&
(w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
long nc = ((long)(w.nextWait & E_MASK) |
(c & (AC_MASK|TC_MASK)));
if (U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p);
return true; // replace with idle worker
}
}
else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 &&
(int)(c >> AC_SHIFT) + pc > 1) {
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc))
return true; // no compensation
}
else if (tc + pc < MAX_CAP) {
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc)) {
ForkJoinWorkerThreadFactory fac;
Throwable ex = null;
ForkJoinWorkerThread wt = null;
try {
if ((fac = factory) != null &&
(wt = fac.newThread(this)) != null) {
wt.start();
return true;
}
} catch (Throwable rex) {
ex = rex;
}
deregisterWorker(wt, ex); // clean up and return false
}
}
}
return false;
}
/**
* Helps and/or blocks until the given task is done.
*
* @param joiner the joining worker
* @param task the task
* @return task status on exit
*/
final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
int s = 0;
if (joiner != null && task != null && (s = task.status) >= 0) {
ForkJoinTask<?> prevJoin = joiner.currentJoin;
joiner.currentJoin = task;
do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
joiner.tryRemoveAndExec(task)); // process local tasks
if (s >= 0 && (s = task.status) >= 0) {
helpSignal(task, joiner.poolIndex);
if ((s = task.status) >= 0 &&
(task instanceof CountedCompleter))
s = helpComplete(task, LIFO_QUEUE);
}
while (s >= 0 && (s = task.status) >= 0) {
if ((!joiner.isEmpty() || // try helping
(s = tryHelpStealer(joiner, task)) == 0) &&
(s = task.status) >= 0) {
helpSignal(task, joiner.poolIndex);
if ((s = task.status) >= 0 && tryCompensate()) {
if (task.trySetSignal() && (s = task.status) >= 0) {
synchronized (task) {
if (task.status >= 0) {
try { // see ForkJoinTask
task.wait(); // for explanation
} catch (InterruptedException ie) {
}
}
else
task.notifyAll();
}
}
long c; // re-activate
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c + AC_UNIT));
}
}
}
joiner.currentJoin = prevJoin;
}
return s;
}
/**
* Stripped-down variant of awaitJoin used by timed joins. Tries
* to help join only while there is continuous progress. (Caller
* will then enter a timed wait.)
*
* @param joiner the joining worker
* @param task the task
*/
final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
int s;
if (joiner != null && task != null && (s = task.status) >= 0) {
ForkJoinTask<?> prevJoin = joiner.currentJoin;
joiner.currentJoin = task;
do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
joiner.tryRemoveAndExec(task));
if (s >= 0 && (s = task.status) >= 0) {
helpSignal(task, joiner.poolIndex);
if ((s = task.status) >= 0 &&
(task instanceof CountedCompleter))
s = helpComplete(task, LIFO_QUEUE);
}
if (s >= 0 && joiner.isEmpty()) {
do {} while (task.status >= 0 &&
tryHelpStealer(joiner, task) > 0);
}
joiner.currentJoin = prevJoin;
}
}
/**
* Returns a (probably) non-empty steal queue, if one is found
* during a scan, else null. This method must be retried by
* caller if, by the time it tries to use the queue, it is empty.
* @param r a (random) seed for scanning
*/
private WorkQueue findNonEmptyStealQueue(int r) {
for (;;) {
int ps = plock, m; WorkQueue[] ws; WorkQueue q;
if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) {
for (int j = (m + 1) << 2; j >= 0; --j) {
if ((q = ws[(((r + j) << 1) | 1) & m]) != null &&
q.base - q.top < 0)
return q;
}
}
if (plock == ps)
return null;
}
}
/**
* Runs tasks until {@code isQuiescent()}. We piggyback on
* active count ctl maintenance, but rather than blocking
* when tasks cannot be found, we rescan until all others cannot
* find tasks either.
*/
final void helpQuiescePool(WorkQueue w) {
for (boolean active = true;;) {
long c; WorkQueue q; ForkJoinTask<?> t; int b;
while ((t = w.nextLocalTask()) != null) {
if (w.base - w.top < 0)
signalWork(w);
t.doExec();
}
if ((q = findNonEmptyStealQueue(w.nextSeed())) != null) {
if (!active) { // re-establish active count
active = true;
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c + AC_UNIT));
}
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
if (q.base - q.top < 0)
signalWork(q);
w.runSubtask(t);
}
}
else if (active) { // decrement active count without queuing
long nc = (c = ctl) - AC_UNIT;
if ((int)(nc >> AC_SHIFT) + (config & SMASK) == 0)
return; // bypass decrement-then-increment
if (U.compareAndSwapLong(this, CTL, c, nc))
active = false;
}
else if ((int)((c = ctl) >> AC_SHIFT) + (config & SMASK) == 0 &&
U.compareAndSwapLong(this, CTL, c, c + AC_UNIT))
return;
}
}
/**
* Gets and removes a local or stolen task for the given worker.
*
* @return a task, if available
*/
final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
for (ForkJoinTask<?> t;;) {
WorkQueue q; int b;
if ((t = w.nextLocalTask()) != null)
return t;
if ((q = findNonEmptyStealQueue(w.nextSeed())) == null)
return null;
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
if (q.base - q.top < 0)
signalWork(q);
return t;
}
}
}
/**
* Returns a cheap heuristic guide for task partitioning when
* programmers, frameworks, tools, or languages have little or no
* idea about task granularity. In essence by offering this
* method, we ask users only about tradeoffs in overhead vs
* expected throughput and its variance, rather than how finely to
* partition tasks.
*
* In a steady state strict (tree-structured) computation, each
* thread makes available for stealing enough tasks for other
* threads to remain active. Inductively, if all threads play by
* the same rules, each thread should make available only a
* constant number of tasks.
*
* The minimum useful constant is just 1. But using a value of 1
* would require immediate replenishment upon each steal to
* maintain enough tasks, which is infeasible. Further,
* partitionings/granularities of offered tasks should minimize
* steal rates, which in general means that threads nearer the top
* of computation tree should generate more than those nearer the
* bottom. In perfect steady state, each thread is at
* approximately the same level of computation tree. However,
* producing extra tasks amortizes the uncertainty of progress and
* diffusion assumptions.
*
* So, users will want to use values larger (but not much larger)
* than 1 to both smooth over transient shortages and hedge
* against uneven progress; as traded off against the cost of
* extra task overhead. We leave the user to pick a threshold
* value to compare with the results of this call to guide
* decisions, but recommend values such as 3.
*
* When all threads are active, it is on average OK to estimate
* surplus strictly locally. In steady-state, if one thread is
* maintaining say 2 surplus tasks, then so are others. So we can
* just use estimated queue length. However, this strategy alone
* leads to serious mis-estimates in some non-steady-state
* conditions (ramp-up, ramp-down, other stalls). We can detect
* many of these by further considering the number of "idle"
* threads, that are known to have zero queued tasks, so
* compensate by a factor of (#idle/#active) threads.
*
* Note: The approximation of #busy workers as #active workers is
* not very good under current signalling scheme, and should be
* improved.
*/
static int getSurplusQueuedTaskCount() {
Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).config & SMASK;
int n = (q = wt.workQueue).top - q.base;
int a = (int)(pool.ctl >> AC_SHIFT) + p;
return n - (a > (p >>>= 1) ? 0 :
a > (p >>>= 1) ? 1 :
a > (p >>>= 1) ? 2 :
a > (p >>>= 1) ? 4 :
8);
}
return 0;
}
// Termination
/**
* Possibly initiates and/or completes termination. The caller
* triggering termination runs three passes through workQueues:
* (0) Setting termination status, followed by wakeups of queued
* workers; (1) cancelling all tasks; (2) interrupting lagging
* threads (likely in external tasks, but possibly also blocked in
* joins). Each pass repeats previous steps because of potential
* lagging thread creation.
*
* @param now if true, unconditionally terminate, else only
* if no work and no active workers
* @param enable if true, enable shutdown when next possible
* @return true if now terminating or terminated
*/
private boolean tryTerminate(boolean now, boolean enable) {
int ps;
if (this == common) // cannot shut down
return false;
if ((ps = plock) >= 0) { // enable by setting plock
if (!enable)
return false;
if ((ps & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = ((ps + PL_LOCK) & ~SHUTDOWN) | SHUTDOWN;
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
for (long c;;) {
if (((c = ctl) & STOP_BIT) != 0) { // already terminating
if ((short)(c >>> TC_SHIFT) == -(config & SMASK)) {
synchronized (this) {
notifyAll(); // signal when 0 workers
}
}
return true;
}
if (!now) { // check if idle & no tasks
WorkQueue[] ws; WorkQueue w;
if ((int)(c >> AC_SHIFT) != -(config & SMASK))
return false;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
if (!w.isEmpty()) { // signal unprocessed tasks
signalWork(w);
return false;
}
if ((i & 1) != 0 && w.eventCount >= 0)
return false; // unqueued inactive worker
}
}
}
}
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
for (int pass = 0; pass < 3; ++pass) {
WorkQueue[] ws; WorkQueue w; Thread wt;
if ((ws = workQueues) != null) {
int n = ws.length;
for (int i = 0; i < n; ++i) {
if ((w = ws[i]) != null) {
w.qlock = -1;
if (pass > 0) {
w.cancelAll();
if (pass > 1 && (wt = w.owner) != null) {
if (!wt.isInterrupted()) {
try {
wt.interrupt();
} catch (Throwable ignore) {
}
}
U.unpark(wt);
}
}
}
}
// Wake up workers parked on event queue
int i, e; long cc; Thread p;
while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
(i = e & SMASK) < n && i >= 0 &&
(w = ws[i]) != null) {
long nc = ((long)(w.nextWait & E_MASK) |
((cc + AC_UNIT) & AC_MASK) |
(cc & (TC_MASK|STOP_BIT)));
if (w.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, cc, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
w.qlock = -1;
if ((p = w.parker) != null)
U.unpark(p);
}
}
}
}
}
}
}
// external operations on common pool
/**
* Returns common pool queue for a thread that has submitted at
* least one task.
*/
static WorkQueue commonSubmitterQueue() {
ForkJoinPool p; WorkQueue[] ws; int m; Submitter z;
return ((z = submitters.get()) != null &&
(p = common) != null &&
(ws = p.workQueues) != null &&
(m = ws.length - 1) >= 0) ?
ws[m & z.seed & SQMASK] : null;
}
/**
* Tries to pop the given task from submitter's queue in common pool.
*/
static boolean tryExternalUnpush(ForkJoinTask<?> t) {
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; Submitter z;
ForkJoinTask<?>[] a; int m, s;
if (t != null &&
(z = submitters.get()) != null &&
(p = common) != null &&
(ws = p.workQueues) != null &&
(m = ws.length - 1) >= 0 &&
(q = ws[m & z.seed & SQMASK]) != null &&
(s = q.top) != q.base &&
(a = q.array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if (U.getObject(a, j) == t &&
U.compareAndSwapInt(q, QLOCK, 0, 1)) {
if (q.array == a && q.top == s && // recheck
U.compareAndSwapObject(a, j, t, null)) {
q.top = s - 1;
q.qlock = 0;
return true;
}
q.qlock = 0;
}
}
return false;
}
/**
* Tries to pop and run local tasks within the same computation
* as the given root. On failure, tries to help complete from
* other queues via helpComplete.
*/
private void externalHelpComplete(WorkQueue q, ForkJoinTask<?> root) {
ForkJoinTask<?>[] a; int m;
if (q != null && (a = q.array) != null && (m = (a.length - 1)) >= 0 &&
root != null && root.status >= 0) {
for (;;) {
int s, u; Object o; CountedCompleter<?> task = null;
if ((s = q.top) - q.base > 0) {
long j = ((m & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) != null &&
(o instanceof CountedCompleter)) {
CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;
do {
if (r == root) {
if (U.compareAndSwapInt(q, QLOCK, 0, 1)) {
if (q.array == a && q.top == s &&
U.compareAndSwapObject(a, j, t, null)) {
q.top = s - 1;
task = t;
}
q.qlock = 0;
}
break;
}
} while ((r = r.completer) != null);
}
}
if (task != null)
task.doExec();
if (root.status < 0 ||
(u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0)
break;
if (task == null) {
helpSignal(root, q.poolIndex);
if (root.status >= 0)
helpComplete(root, SHARED_QUEUE);
break;
}
}
}
}
/**
* Tries to help execute or signal availability of the given task
* from submitter's queue in common pool.
*/
static void externalHelpJoin(ForkJoinTask<?> t) {
// Some hard-to-avoid overlap with tryExternalUnpush
ForkJoinPool p; WorkQueue[] ws; WorkQueue q, w; Submitter z;
ForkJoinTask<?>[] a; int m, s, n;
if (t != null &&
(z = submitters.get()) != null &&
(p = common) != null &&
(ws = p.workQueues) != null &&
(m = ws.length - 1) >= 0 &&
(q = ws[m & z.seed & SQMASK]) != null &&
(a = q.array) != null) {
int am = a.length - 1;
if ((s = q.top) != q.base) {
long j = ((am & (s - 1)) << ASHIFT) + ABASE;
if (U.getObject(a, j) == t &&
U.compareAndSwapInt(q, QLOCK, 0, 1)) {
if (q.array == a && q.top == s &&
U.compareAndSwapObject(a, j, t, null)) {
q.top = s - 1;
q.qlock = 0;
t.doExec();
}
else
q.qlock = 0;
}
}
if (t.status >= 0) {
if (t instanceof CountedCompleter)
p.externalHelpComplete(q, t);
else
p.helpSignal(t, q.poolIndex);
}
}
}
// Exported methods
// Constructors
/**
* Creates a {@code ForkJoinPool} with parallelism equal to {@link
* java.lang.Runtime#availableProcessors}, using the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool() {
this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the indicated parallelism
* level, the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @param parallelism the parallelism level
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism) {
this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the given parameters.
*
* @param parallelism the parallelism level. For default value,
* use {@link java.lang.Runtime#availableProcessors}.
* @param factory the factory for creating new threads. For default value,
* use {@link #defaultForkJoinWorkerThreadFactory}.
* @param handler the handler for internal worker threads that
* terminate due to unrecoverable errors encountered while executing
* tasks. For default value, use {@code null}.
* @param asyncMode if true,
* establishes local first-in-first-out scheduling mode for forked
* tasks that are never joined. This mode may be more appropriate
* than default locally stack-based mode in applications in which
* worker threads only process event-style asynchronous tasks.
* For default value, use {@code false}.
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws NullPointerException if the factory is null
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism,
ForkJoinWorkerThreadFactory factory,
Thread.UncaughtExceptionHandler handler,
boolean asyncMode) {
checkPermission();
if (factory == null)
throw new NullPointerException();
if (parallelism <= 0 || parallelism > MAX_CAP)
throw new IllegalArgumentException();
this.factory = factory;
this.ueh = handler;
this.config = parallelism | (asyncMode ? (FIFO_QUEUE << 16) : 0);
long np = (long)(-parallelism); // offset ctl counts
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
int pn = nextPoolId();
StringBuilder sb = new StringBuilder("ForkJoinPool-");
sb.append(Integer.toString(pn));
sb.append("-worker-");
this.workerNamePrefix = sb.toString();
}
/**
* Constructor for common pool, suitable only for static initialization.
* Basically the same as above, but uses smallest possible initial footprint.
*/
ForkJoinPool(int parallelism, long ctl,
ForkJoinWorkerThreadFactory factory,
Thread.UncaughtExceptionHandler handler) {
this.config = parallelism;
this.ctl = ctl;
this.factory = factory;
this.ueh = handler;
this.workerNamePrefix = "ForkJoinPool.commonPool-worker-";
}
/**
* Returns the common pool instance. This pool is statically
* constructed; its run state is unaffected by attempts to {@link
* #shutdown} or {@link #shutdownNow}. However this pool and any
* ongoing processing are automatically terminated upon program
* {@link System#exit}. Any program that relies on asynchronous
* task processing to complete before program termination should
* invoke {@code commonPool().}{@link #awaitQuiescence}, before
* exit.
*
* @return the common pool instance
* @since 1.8
*/
public static ForkJoinPool commonPool() {
// assert common != null : "static init error";
return common;
}
// Execution methods
/**
* Performs the given task, returning its result upon completion.
* If the computation encounters an unchecked Exception or Error,
* it is rethrown as the outcome of this invocation. Rethrown
* exceptions behave in the same way as regular exceptions, but,
* when possible, contain stack traces (as displayed for example
* using {@code ex.printStackTrace()}) of both the current thread
* as well as the thread actually encountering the exception;
* minimally only the latter.
*
* @param task the task
* @return the task's result
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> T invoke(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
return task.join();
}
/**
* Arranges for (asynchronous) execution of the given task.
*
* @param task the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(ForkJoinTask<?> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
}
// AbstractExecutorService methods
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.AdaptedRunnableAction(task);
externalPush(job);
}
/**
* Submits a ForkJoinTask for execution.
*
* @param task the task to submit
* @return the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
return task;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Callable<T> task) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
externalPush(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Runnable task, T result) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
externalPush(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public ForkJoinTask<?> submit(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.AdaptedRunnableAction(task);
externalPush(job);
return job;
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws RejectedExecutionException {@inheritDoc}
*/
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
// In previous versions of this class, this method constructed
// a task to run ForkJoinTask.invokeAll, but now external
// invocation of multiple tasks is at least as efficient.
ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
boolean done = false;
try {
for (Callable<T> t : tasks) {
ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
futures.add(f);
externalPush(f);
}
for (int i = 0, size = futures.size(); i < size; i++)
((ForkJoinTask<?>)futures.get(i)).quietlyJoin();
done = true;
return futures;
} finally {
if (!done)
for (int i = 0, size = futures.size(); i < size; i++)
futures.get(i).cancel(false);
}
}
/**
* Returns the factory used for constructing new workers.
*
* @return the factory used for constructing new workers
*/
public ForkJoinWorkerThreadFactory getFactory() {
return factory;
}
/**
* Returns the handler for internal worker threads that terminate
* due to unrecoverable errors encountered while executing tasks.
*
* @return the handler, or {@code null} if none
*/
public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
return ueh;
}
/**
* Returns the targeted parallelism level of this pool.
*
* @return the targeted parallelism level of this pool
*/
public int getParallelism() {
return config & SMASK;
}
/**
* Returns the targeted parallelism level of the common pool.
*
* @return the targeted parallelism level of the common pool
* @since 1.8
*/
public static int getCommonPoolParallelism() {
return commonParallelism;
}
/**
* Returns the number of worker threads that have started but not
* yet terminated. The result returned by this method may differ
* from {@link #getParallelism} when threads are created to
* maintain parallelism when others are cooperatively blocked.
*
* @return the number of worker threads
*/
public int getPoolSize() {
return (config & SMASK) + (short)(ctl >>> TC_SHIFT);
}
/**
* Returns {@code true} if this pool uses local first-in-first-out
* scheduling mode for forked tasks that are never joined.
*
* @return {@code true} if this pool uses async mode
*/
public boolean getAsyncMode() {
return (config >>> 16) == FIFO_QUEUE;
}
/**
* Returns an estimate of the number of worker threads that are
* not blocked waiting to join tasks or for other managed
* synchronization. This method may overestimate the
* number of running threads.
*
* @return the number of worker threads
*/
public int getRunningThreadCount() {
int rc = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null && w.isApparentlyUnblocked())
++rc;
}
}
return rc;
}
/**
* Returns an estimate of the number of threads that are currently
* stealing or executing tasks. This method may overestimate the
* number of active threads.
*
* @return the number of active threads
*/
public int getActiveThreadCount() {
int r = (config & SMASK) + (int)(ctl >> AC_SHIFT);
return (r <= 0) ? 0 : r; // suppress momentarily negative values
}
/**
* Returns {@code true} if all worker threads are currently idle.
* An idle worker is one that cannot obtain a task to execute
* because none are available to steal from other threads, and
* there are no pending submissions to the pool. This method is
* conservative; it might not return {@code true} immediately upon
* idleness of all threads, but will eventually become true if
* threads remain inactive.
*
* @return {@code true} if all threads are currently idle
*/
public boolean isQuiescent() {
return (int)(ctl >> AC_SHIFT) + (config & SMASK) == 0;
}
/**
* Returns an estimate of the total number of tasks stolen from
* one thread's work queue by another. The reported value
* underestimates the actual total number of steals when the pool
* is not quiescent. This value may be useful for monitoring and
* tuning fork/join programs: in general, steal counts should be
* high enough to keep threads busy, but low enough to avoid
* overhead and contention across threads.
*
* @return the number of steals
*/
public long getStealCount() {
long count = stealCount;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.nsteals;
}
}
return count;
}
/**
* Returns an estimate of the total number of tasks currently held
* in queues by worker threads (but not including tasks submitted
* to the pool that have not begun executing). This value is only
* an approximation, obtained by iterating across all threads in
* the pool. This method may be useful for tuning task
* granularities.
*
* @return the number of queued tasks
*/
public long getQueuedTaskCount() {
long count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns an estimate of the number of tasks submitted to this
* pool that have not yet begun executing. This method may take
* time proportional to the number of submissions.
*
* @return the number of queued submissions
*/
public int getQueuedSubmissionCount() {
int count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns {@code true} if there are any tasks submitted to this
* pool that have not yet begun executing.
*
* @return {@code true} if there are any queued submissions
*/
public boolean hasQueuedSubmissions() {
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && !w.isEmpty())
return true;
}
}
return false;
}
/**
* Removes and returns the next unexecuted submission if one is
* available. This method may be useful in extensions to this
* class that re-assign work in systems with multiple pools.
*
* @return the next submission, or {@code null} if none
*/
protected ForkJoinTask<?> pollSubmission() {
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && (t = w.poll()) != null)
return t;
}
}
return null;
}
/**
* Removes all available unexecuted submitted and forked tasks
* from scheduling queues and adds them to the given collection,
* without altering their execution status. These may include
* artificially generated or wrapped tasks. This method is
* designed to be invoked only when the pool is known to be
* quiescent. Invocations at other times may not remove all
* tasks. A failure encountered while attempting to add elements
* to collection {@code c} may result in elements being in
* neither, either or both collections when the associated
* exception is thrown. The behavior of this operation is
* undefined if the specified collection is modified while the
* operation is in progress.
*
* @param c the collection to transfer elements into
* @return the number of elements transferred
*/
protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
int count = 0;
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
while ((t = w.poll()) != null) {
c.add(t);
++count;
}
}
}
}
return count;
}
/**
* Returns a string identifying this pool, as well as its state,
* including indications of run state, parallelism level, and
* worker and task counts.
*
* @return a string identifying this pool, as well as its state
*/
public String toString() {
// Use a single pass through workQueues to collect counts
long qt = 0L, qs = 0L; int rc = 0;
long st = stealCount;
long c = ctl;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
int size = w.queueSize();
if ((i & 1) == 0)
qs += size;
else {
qt += size;
st += w.nsteals;
if (w.isApparentlyUnblocked())
++rc;
}
}
}
}
int pc = (config & SMASK);
int tc = pc + (short)(c >>> TC_SHIFT);
int ac = pc + (int)(c >> AC_SHIFT);
if (ac < 0) // ignore transient negative
ac = 0;
String level;
if ((c & STOP_BIT) != 0)
level = (tc == 0) ? "Terminated" : "Terminating";
else
level = plock < 0 ? "Shutting down" : "Running";
return super.toString() +
"[" + level +
", parallelism = " + pc +
", size = " + tc +
", active = " + ac +
", running = " + rc +
", steals = " + st +
", tasks = " + qt +
", submissions = " + qs +
"]";
}
/**
* Possibly initiates an orderly shutdown in which previously
* submitted tasks are executed, but no new tasks will be
* accepted. Invocation has no effect on execution state if this
* is the {@link #commonPool()}, and no additional effect if
* already shut down. Tasks that are in the process of being
* submitted concurrently during the course of this method may or
* may not be rejected.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public void shutdown() {
checkPermission();
tryTerminate(false, true);
}
/**
* Possibly attempts to cancel and/or stop all tasks, and reject
* all subsequently submitted tasks. Invocation has no effect on
* execution state if this is the {@link #commonPool()}, and no
* additional effect if already shut down. Otherwise, tasks that
* are in the process of being submitted or executed concurrently
* during the course of this method may or may not be
* rejected. This method cancels both existing and unexecuted
* tasks, in order to permit termination in the presence of task
* dependencies. So the method always returns an empty list
* (unlike the case for some other Executors).
*
* @return an empty list
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public List<Runnable> shutdownNow() {
checkPermission();
tryTerminate(true, true);
return Collections.emptyList();
}
/**
* Returns {@code true} if all tasks have completed following shut down.
*
* @return {@code true} if all tasks have completed following shut down
*/
public boolean isTerminated() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) == -(config & SMASK));
}
/**
* Returns {@code true} if the process of termination has
* commenced but not yet completed. This method may be useful for
* debugging. A return of {@code true} reported a sufficient
* period after shutdown may indicate that submitted tasks have
* ignored or suppressed interruption, or are waiting for I/O,
* causing this executor not to properly terminate. (See the
* advisory notes for class {@link ForkJoinTask} stating that
* tasks should not normally entail blocking operations. But if
* they do, they must abort them on interrupt.)
*
* @return {@code true} if terminating but not yet terminated
*/
public boolean isTerminating() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) != -(config & SMASK));
}
/**
* Returns {@code true} if this pool has been shut down.
*
* @return {@code true} if this pool has been shut down
*/
public boolean isShutdown() {
return plock < 0;
}
/**
* Blocks until all tasks have completed execution after a
* shutdown request, or the timeout occurs, or the current thread
* is interrupted, whichever happens first. Because the {@link
* #commonPool()} never terminates until program shutdown, when
* applied to the common pool, this method is equivalent to {@link
* #awaitQuiescence} but always returns {@code false}.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return {@code true} if this executor terminated and
* {@code false} if the timeout elapsed before termination
* @throws InterruptedException if interrupted while waiting
*/
public boolean awaitTermination(long timeout, TimeUnit unit)
throws InterruptedException {
if (Thread.interrupted())
throw new InterruptedException();
if (this == common) {
awaitQuiescence(timeout, unit);
return false;
}
long nanos = unit.toNanos(timeout);
if (isTerminated())
return true;
long startTime = System.nanoTime();
boolean terminated = false;
synchronized (this) {
for (long waitTime = nanos, millis = 0L;;) {
if (terminated = isTerminated() ||
waitTime <= 0L ||
(millis = unit.toMillis(waitTime)) <= 0L)
break;
wait(millis);
waitTime = nanos - (System.nanoTime() - startTime);
}
}
return terminated;
}
/**
* If called by a ForkJoinTask operating in this pool, equivalent
* in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
* waits and/or attempts to assist performing tasks until this
* pool {@link #isQuiescent} or the indicated timeout elapses.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return {@code true} if quiescent; {@code false} if the
* timeout elapsed.
*/
public boolean awaitQuiescence(long timeout, TimeUnit unit) {
long nanos = unit.toNanos(timeout);
ForkJoinWorkerThread wt;
Thread thread = Thread.currentThread();
if ((thread instanceof ForkJoinWorkerThread) &&
(wt = (ForkJoinWorkerThread)thread).pool == this) {
helpQuiescePool(wt.workQueue);
return true;
}
long startTime = System.nanoTime();
WorkQueue[] ws;
int r = 0, m;
boolean found = true;
while (!isQuiescent() && (ws = workQueues) != null &&
(m = ws.length - 1) >= 0) {
if (!found) {
if ((System.nanoTime() - startTime) > nanos)
return false;
Thread.yield(); // cannot block
}
found = false;
for (int j = (m + 1) << 2; j >= 0; --j) {
ForkJoinTask<?> t; WorkQueue q; int b;
if ((q = ws[r++ & m]) != null && (b = q.base) - q.top < 0) {
found = true;
if ((t = q.pollAt(b)) != null) {
if (q.base - q.top < 0)
signalWork(q);
t.doExec();
}
break;
}
}
}
return true;
}
/**
* Waits and/or attempts to assist performing tasks indefinitely
* until the {@link #commonPool()} {@link #isQuiescent}.
*/
static void quiesceCommonPool() {
common.awaitQuiescence(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
}
/**
* Interface for extending managed parallelism for tasks running
* in {@link ForkJoinPool}s.
*
* <p>A {@code ManagedBlocker} provides two methods. Method
* {@code isReleasable} must return {@code true} if blocking is
* not necessary. Method {@code block} blocks the current thread
* if necessary (perhaps internally invoking {@code isReleasable}
* before actually blocking). These actions are performed by any
* thread invoking {@link ForkJoinPool#managedBlock}. The
* unusual methods in this API accommodate synchronizers that may,
* but don't usually, block for long periods. Similarly, they
* allow more efficient internal handling of cases in which
* additional workers may be, but usually are not, needed to
* ensure sufficient parallelism. Toward this end,
* implementations of method {@code isReleasable} must be amenable
* to repeated invocation.
*
* <p>For example, here is a ManagedBlocker based on a
* ReentrantLock:
* <pre> {@code
* class ManagedLocker implements ManagedBlocker {
* final ReentrantLock lock;
* boolean hasLock = false;
* ManagedLocker(ReentrantLock lock) { this.lock = lock; }
* public boolean block() {
* if (!hasLock)
* lock.lock();
* return true;
* }
* public boolean isReleasable() {
* return hasLock || (hasLock = lock.tryLock());
* }
* }}</pre>
*
* <p>Here is a class that possibly blocks waiting for an
* item on a given queue:
* <pre> {@code
* class QueueTaker<E> implements ManagedBlocker {
* final BlockingQueue<E> queue;
* volatile E item = null;
* QueueTaker(BlockingQueue<E> q) { this.queue = q; }
* public boolean block() throws InterruptedException {
* if (item == null)
* item = queue.take();
* return true;
* }
* public boolean isReleasable() {
* return item != null || (item = queue.poll()) != null;
* }
* public E getItem() { // call after pool.managedBlock completes
* return item;
* }
* }}</pre>
*/
public static interface ManagedBlocker {
/**
* Possibly blocks the current thread, for example waiting for
* a lock or condition.
*
* @return {@code true} if no additional blocking is necessary
* (i.e., if isReleasable would return true)
* @throws InterruptedException if interrupted while waiting
* (the method is not required to do so, but is allowed to)
*/
boolean block() throws InterruptedException;
/**
* Returns {@code true} if blocking is unnecessary.
*/
boolean isReleasable();
}
/**
* Blocks in accord with the given blocker. If the current thread
* is a {@link ForkJoinWorkerThread}, this method possibly
* arranges for a spare thread to be activated if necessary to
* ensure sufficient parallelism while the current thread is blocked.
*
* <p>If the caller is not a {@link ForkJoinTask}, this method is
* behaviorally equivalent to
* <pre> {@code
* while (!blocker.isReleasable())
* if (blocker.block())
* return;
* }</pre>
*
* If the caller is a {@code ForkJoinTask}, then the pool may
* first be expanded to ensure parallelism, and later adjusted.
*
* @param blocker the blocker
* @throws InterruptedException if blocker.block did so
*/
public static void managedBlock(ManagedBlocker blocker)
throws InterruptedException {
Thread t = Thread.currentThread();
if (t instanceof ForkJoinWorkerThread) {
ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
while (!blocker.isReleasable()) { // variant of helpSignal
WorkQueue[] ws; WorkQueue q; int m, u;
if ((ws = p.workQueues) != null && (m = ws.length - 1) >= 0) {
for (int i = 0; i <= m; ++i) {
if (blocker.isReleasable())
return;
if ((q = ws[i]) != null && q.base - q.top < 0) {
p.signalWork(q);
if ((u = (int)(p.ctl >>> 32)) >= 0 ||
(u >> UAC_SHIFT) >= 0)
break;
}
}
}
if (p.tryCompensate()) {
try {
do {} while (!blocker.isReleasable() &&
!blocker.block());
} finally {
p.incrementActiveCount();
}
break;
}
}
}
else {
do {} while (!blocker.isReleasable() &&
!blocker.block());
}
}
// AbstractExecutorService overrides. These rely on undocumented
// fact that ForkJoinTask.adapt returns ForkJoinTasks that also
// implement RunnableFuture.
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
}
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
return new ForkJoinTask.AdaptedCallable<T>(callable);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long CTL;
private static final long PARKBLOCKER;
private static final int ABASE;
private static final int ASHIFT;
private static final long STEALCOUNT;
private static final long PLOCK;
private static final long INDEXSEED;
private static final long QLOCK;
static {
// initialize field offsets for CAS etc
try {
U = getUnsafe();
Class<?> k = ForkJoinPool.class;
CTL = U.objectFieldOffset
(k.getDeclaredField("ctl"));
STEALCOUNT = U.objectFieldOffset
(k.getDeclaredField("stealCount"));
PLOCK = U.objectFieldOffset
(k.getDeclaredField("plock"));
INDEXSEED = U.objectFieldOffset
(k.getDeclaredField("indexSeed"));
Class<?> tk = Thread.class;
PARKBLOCKER = U.objectFieldOffset
(tk.getDeclaredField("parkBlocker"));
Class<?> wk = WorkQueue.class;
QLOCK = U.objectFieldOffset
(wk.getDeclaredField("qlock"));
Class<?> ak = ForkJoinTask[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
submitters = new ThreadLocal<Submitter>();
ForkJoinWorkerThreadFactory fac = defaultForkJoinWorkerThreadFactory =
new DefaultForkJoinWorkerThreadFactory();
modifyThreadPermission = new RuntimePermission("modifyThread");
/*
* Establish common pool parameters. For extra caution,
* computations to set up common pool state are here; the
* constructor just assigns these values to fields.
*/
int par = 0;
Thread.UncaughtExceptionHandler handler = null;
try { // TBD: limit or report ignored exceptions?
String pp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.parallelism");
String hp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.exceptionHandler");
String fp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.threadFactory");
if (fp != null)
fac = ((ForkJoinWorkerThreadFactory)ClassLoader.
getSystemClassLoader().loadClass(fp).newInstance());
if (hp != null)
handler = ((Thread.UncaughtExceptionHandler)ClassLoader.
getSystemClassLoader().loadClass(hp).newInstance());
if (pp != null)
par = Integer.parseInt(pp);
} catch (Exception ignore) {
}
if (par <= 0)
par = Runtime.getRuntime().availableProcessors();
if (par > MAX_CAP)
par = MAX_CAP;
commonParallelism = par;
long np = (long)(-par); // precompute initial ctl value
long ct = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
common = new ForkJoinPool(par, ct, fac, handler);
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| 0true
|
src_main_java_jsr166y_ForkJoinPool.java
|
695 |
public static interface Listener {
/**
* Callback before the bulk is executed.
*/
void beforeBulk(long executionId, BulkRequest request);
/**
* Callback after a successful execution of bulk request.
*/
void afterBulk(long executionId, BulkRequest request, BulkResponse response);
/**
* Callback after a failed execution of bulk request.
*/
void afterBulk(long executionId, BulkRequest request, Throwable failure);
}
| 0true
|
src_main_java_org_elasticsearch_action_bulk_BulkProcessor.java
|
1,373 |
public class OTransactionNoTx extends OTransactionAbstract {
public OTransactionNoTx(final ODatabaseRecordTx iDatabase) {
super(iDatabase);
}
public void begin() {
}
public void commit() {
}
public void rollback() {
}
public void close() {
}
public ORecordInternal<?> loadRecord(final ORID iRid, final ORecordInternal<?> iRecord, final String iFetchPlan,
boolean ignonreCache, boolean loadTombstone) {
if (iRid.isNew())
return null;
return database.executeReadRecord((ORecordId) iRid, iRecord, iFetchPlan, ignonreCache, loadTombstone);
}
/**
* Update the record.
*
* @param iForceCreate
* @param iRecordCreatedCallback
* @param iRecordUpdatedCallback
*/
public void saveRecord(final ORecordInternal<?> iRecord, final String iClusterName, final OPERATION_MODE iMode,
boolean iForceCreate, final ORecordCallback<? extends Number> iRecordCreatedCallback,
ORecordCallback<ORecordVersion> iRecordUpdatedCallback) {
try {
database.executeSaveRecord(iRecord, iClusterName, iRecord.getRecordVersion(), iRecord.getRecordType(), true, iMode,
iForceCreate, iRecordCreatedCallback, null);
} catch (Exception e) {
// REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS
final ORecordId rid = (ORecordId) iRecord.getIdentity();
if (rid.isValid())
database.getLevel1Cache().freeRecord(rid);
if (e instanceof RuntimeException)
throw (RuntimeException) e;
throw new OException(e);
}
}
@Override
public boolean updateReplica(ORecordInternal<?> iRecord) {
try {
return database.executeUpdateReplica(iRecord);
} catch (Exception e) {
// REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS
final ORecordId rid = (ORecordId) iRecord.getIdentity();
database.getLevel1Cache().freeRecord(rid);
if (e instanceof RuntimeException)
throw (RuntimeException) e;
throw new OException(e);
}
}
/**
* Deletes the record.
*/
public void deleteRecord(final ORecordInternal<?> iRecord, final OPERATION_MODE iMode) {
if (!iRecord.getIdentity().isPersistent())
return;
try {
database.executeDeleteRecord(iRecord, iRecord.getRecordVersion(), true, true, iMode, false);
} catch (Exception e) {
// REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS
final ORecordId rid = (ORecordId) iRecord.getIdentity();
if (rid.isValid())
database.getLevel1Cache().freeRecord(rid);
if (e instanceof RuntimeException)
throw (RuntimeException) e;
throw new OException(e);
}
}
public Collection<ORecordOperation> getCurrentRecordEntries() {
return null;
}
public Collection<ORecordOperation> getAllRecordEntries() {
return null;
}
public List<ORecordOperation> getRecordEntriesByClass(String iClassName) {
return null;
}
public List<ORecordOperation> getNewRecordEntriesByClusterIds(int[] iIds) {
return null;
}
public void clearRecordEntries() {
}
public int getRecordEntriesSize() {
return 0;
}
public ORecordInternal<?> getRecord(final ORID rid) {
return null;
}
public ORecordOperation getRecordEntry(final ORID rid) {
return null;
}
public boolean isUsingLog() {
return false;
}
public void setUsingLog(final boolean useLog) {
}
public ODocument getIndexChanges() {
return null;
}
public OTransactionIndexChangesPerKey getIndexEntry(final String iIndexName, final Object iKey) {
return null;
}
public void addIndexEntry(final OIndex<?> delegate, final String indexName, final OPERATION status, final Object key,
final OIdentifiable value) {
switch (status) {
case CLEAR:
delegate.clear();
break;
case PUT:
delegate.put(key, value);
break;
case REMOVE:
assert key != null;
delegate.remove(key, value);
break;
}
}
public void clearIndexEntries() {
}
public OTransactionIndexChanges getIndexChanges(final String iName) {
return null;
}
public int getId() {
return 0;
}
public List<String> getInvolvedIndexes() {
return null;
}
public void updateIdentityAfterCommit(ORID oldRid, ORID newRid) {
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionNoTx.java
|
408 |
public class ClientAtomicLongProxy extends ClientProxy implements IAtomicLong {
private final String name;
private volatile Data key;
public ClientAtomicLongProxy(String instanceName, String serviceName, String objectId) {
super(instanceName, serviceName, objectId);
this.name = objectId;
}
@Override
public <R> R apply(IFunction<Long, R> function) {
isNotNull(function, "function");
return invoke(new ApplyRequest(name, toData(function)));
}
@Override
public void alter(IFunction<Long, Long> function) {
isNotNull(function, "function");
invoke(new AlterRequest(name, toData(function)));
}
@Override
public long alterAndGet(IFunction<Long, Long> function) {
isNotNull(function, "function");
return (Long) invoke(new AlterAndGetRequest(name, toData(function)));
}
@Override
public long getAndAlter(IFunction<Long, Long> function) {
isNotNull(function, "function");
return (Long) invoke(new GetAndAlterRequest(name, toData(function)));
}
@Override
public long addAndGet(long delta) {
AddAndGetRequest request = new AddAndGetRequest(name, delta);
Long result = invoke(request);
return result;
}
@Override
public boolean compareAndSet(long expect, long update) {
CompareAndSetRequest request = new CompareAndSetRequest(name, expect, update);
Boolean result = invoke(request);
return result;
}
@Override
public long decrementAndGet() {
return addAndGet(-1);
}
@Override
public long get() {
return getAndAdd(0);
}
@Override
public long getAndAdd(long delta) {
GetAndAddRequest request = new GetAndAddRequest(name, delta);
Long result = invoke(request);
return result;
}
@Override
public long getAndSet(long newValue) {
GetAndSetRequest request = new GetAndSetRequest(name, newValue);
Long result = invoke(request);
return result;
}
@Override
public long incrementAndGet() {
return addAndGet(1);
}
@Override
public long getAndIncrement() {
return getAndAdd(1);
}
@Override
public void set(long newValue) {
SetRequest request = new SetRequest(name, newValue);
invoke(request);
}
@Override
protected void onDestroy() {
}
protected <T> T invoke(ClientRequest req) {
return super.invoke(req, getKey());
}
private Data getKey() {
if (key == null) {
key = toData(name);
}
return key;
}
@Override
public String toString() {
return "IAtomicLong{" + "name='" + name + '\'' + '}';
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientAtomicLongProxy.java
|
492 |
public final class ClientExecutionServiceImpl implements ClientExecutionService {
private static final ILogger LOGGER = Logger.getLogger(ClientExecutionService.class);
private final ExecutorService executor;
private final ExecutorService internalExecutor;
private final ScheduledExecutorService scheduledExecutor;
public ClientExecutionServiceImpl(String name, ThreadGroup threadGroup, ClassLoader classLoader, int poolSize) {
if (poolSize <= 0) {
poolSize = Runtime.getRuntime().availableProcessors();
}
internalExecutor = new ThreadPoolExecutor(2, 2, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(),
new PoolExecutorThreadFactory(threadGroup, name + ".internal-", classLoader),
new RejectedExecutionHandler() {
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
String message = "Internal executor rejected task: " + r + ", because client is shutting down...";
LOGGER.finest(message);
throw new RejectedExecutionException(message);
}
});
executor = new ThreadPoolExecutor(poolSize, poolSize, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(),
new PoolExecutorThreadFactory(threadGroup, name + ".cached-", classLoader),
new RejectedExecutionHandler() {
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
String message = "Internal executor rejected task: " + r + ", because client is shutting down...";
LOGGER.finest(message);
throw new RejectedExecutionException(message);
}
});
scheduledExecutor = Executors.newSingleThreadScheduledExecutor(
new SingleExecutorThreadFactory(threadGroup, classLoader, name + ".scheduled"));
}
@Override
public void executeInternal(Runnable command) {
internalExecutor.execute(command);
}
@Override
public <T> ICompletableFuture<T> submitInternal(final Callable<T> command) {
CompletableFutureTask futureTask = new CompletableFutureTask(command, internalExecutor);
internalExecutor.submit(futureTask);
return futureTask;
}
@Override
public void execute(Runnable command) {
executor.execute(command);
}
@Override
public ICompletableFuture<?> submit(Runnable task) {
CompletableFutureTask futureTask = new CompletableFutureTask(task, null, getAsyncExecutor());
executor.submit(futureTask);
return futureTask;
}
@Override
public <T> ICompletableFuture<T> submit(Callable<T> task) {
CompletableFutureTask<T> futureTask = new CompletableFutureTask<T>(task, getAsyncExecutor());
executor.submit(futureTask);
return futureTask;
}
@Override
public ScheduledFuture<?> schedule(final Runnable command, long delay, TimeUnit unit) {
return scheduledExecutor.schedule(new Runnable() {
public void run() {
executeInternal(command);
}
}, delay, unit);
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(final Runnable command, long initialDelay, long period, TimeUnit unit) {
return scheduledExecutor.scheduleAtFixedRate(new Runnable() {
public void run() {
executeInternal(command);
}
}, initialDelay, period, unit);
}
@Override
public ScheduledFuture<?> scheduleWithFixedDelay(final Runnable command, long initialDelay, long period, TimeUnit unit) {
return scheduledExecutor.scheduleWithFixedDelay(new Runnable() {
public void run() {
executeInternal(command);
}
}, initialDelay, period, unit);
}
@Override
public ExecutorService getAsyncExecutor() {
return executor;
}
public void shutdown() {
internalExecutor.shutdownNow();
scheduledExecutor.shutdownNow();
executor.shutdownNow();
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientExecutionServiceImpl.java
|
310 |
public enum ResourceType {
FILESYSTEM,CLASSPATH
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_MergeFileSystemAndClassPathXMLApplicationContext.java
|
419 |
public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSnapshotRequest> {
private String snapshot;
private String repository;
private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.strict();
private String renamePattern;
private String renameReplacement;
private boolean waitForCompletion;
private boolean includeGlobalState = true;
private Settings settings = EMPTY_SETTINGS;
RestoreSnapshotRequest() {
}
/**
* Constructs a new put repository request with the provided repository and snapshot names.
*
* @param repository repository name
* @param snapshot snapshot name
*/
public RestoreSnapshotRequest(String repository, String snapshot) {
this.snapshot = snapshot;
this.repository = repository;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (snapshot == null) {
validationException = addValidationError("name is missing", validationException);
}
if (repository == null) {
validationException = addValidationError("repository is missing", validationException);
}
if (indices == null) {
validationException = addValidationError("indices are missing", validationException);
}
if (indicesOptions == null) {
validationException = addValidationError("indicesOptions is missing", validationException);
}
if (settings == null) {
validationException = addValidationError("settings are missing", validationException);
}
return validationException;
}
/**
* Sets the name of the snapshot.
*
* @param snapshot snapshot name
* @return this request
*/
public RestoreSnapshotRequest snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Returns the name of the snapshot.
*
* @return snapshot name
*/
public String snapshot() {
return this.snapshot;
}
/**
* Sets repository name
*
* @param repository repository name
* @return this request
*/
public RestoreSnapshotRequest repository(String repository) {
this.repository = repository;
return this;
}
/**
* Returns repository name
*
* @return repository name
*/
public String repository() {
return this.repository;
}
/**
* Sets the list of indices that should be restored from snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
* indices in the snapshot.
*
* @param indices list of indices
* @return this request
*/
public RestoreSnapshotRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* Sets the list of indices that should be restored from snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
* indices in the snapshot.
*
* @param indices list of indices
* @return this request
*/
public RestoreSnapshotRequest indices(List<String> indices) {
this.indices = indices.toArray(new String[indices.size()]);
return this;
}
/**
* Returns list of indices that should be restored from snapshot
*
* @return
*/
public String[] indices() {
return indices;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @return the desired behaviour regarding indices to ignore and wildcard indices expression
*/
public IndicesOptions indicesOptions() {
return indicesOptions;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
* @return this request
*/
public RestoreSnapshotRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* Sets rename pattern that should be applied to restored indices.
* <p/>
* Indices that match the rename pattern will be renamed according to {@link #renameReplacement(String)}. The
* rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)}
* The request will fail if two or more indices will be renamed into the same name.
*
* @param renamePattern rename pattern
* @return this request
*/
public RestoreSnapshotRequest renamePattern(String renamePattern) {
this.renamePattern = renamePattern;
return this;
}
/**
* Returns rename pattern
*
* @return rename pattern
*/
public String renamePattern() {
return renamePattern;
}
/**
* Sets rename replacement
* <p/>
* See {@link #renamePattern(String)} for more information.
*
* @param renameReplacement rename replacement
* @return
*/
public RestoreSnapshotRequest renameReplacement(String renameReplacement) {
this.renameReplacement = renameReplacement;
return this;
}
/**
* Returns rename replacement
*
* @return rename replacement
*/
public String renameReplacement() {
return renameReplacement;
}
/**
* If this parameter is set to true the operation will wait for completion of restore process before returning.
*
* @param waitForCompletion if true the operation will wait for completion
* @return this request
*/
public RestoreSnapshotRequest waitForCompletion(boolean waitForCompletion) {
this.waitForCompletion = waitForCompletion;
return this;
}
/**
* Returns wait for completion setting
*
* @return true if the operation will wait for completion
*/
public boolean waitForCompletion() {
return waitForCompletion;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* Sets repository-specific restore settings in JSON, YAML or properties format
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(String source) {
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
return this;
}
/**
* Sets repository-specific restore settings
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(Map<String, Object> source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
settings(builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
}
/**
* Returns repository-specific restore settings
*
* @return restore settings
*/
public Settings settings() {
return this.settings;
}
/**
* If set to true the restore procedure will restore global cluster state.
* <p/>
* The global cluster state includes persistent settings and index template definitions.
*
* @param includeGlobalState true if global state should be restored from the snapshot
* @return this request
*/
public RestoreSnapshotRequest includeGlobalState(boolean includeGlobalState) {
this.includeGlobalState = includeGlobalState;
return this;
}
/**
* Returns true if global state should be restored from this snapshot
*
* @return true if global state should be restored
*/
public boolean includeGlobalState() {
return includeGlobalState;
}
/**
* Parses restore definition
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(XContentBuilder source) {
try {
return source(source.bytes());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for repository request", e);
}
}
/**
* Parses restore definition
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(Map source) {
boolean ignoreUnavailable = IndicesOptions.lenient().ignoreUnavailable();
boolean allowNoIndices = IndicesOptions.lenient().allowNoIndices();
boolean expandWildcardsOpen = IndicesOptions.lenient().expandWildcardsOpen();
boolean expandWildcardsClosed = IndicesOptions.lenient().expandWildcardsClosed();
for (Map.Entry<String, Object> entry : ((Map<String, Object>) source).entrySet()) {
String name = entry.getKey();
if (name.equals("indices")) {
if (entry.getValue() instanceof String) {
indices(Strings.splitStringByCommaToArray((String) entry.getValue()));
} else if (entry.getValue() instanceof ArrayList) {
indices((ArrayList<String>) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
}
} else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) {
ignoreUnavailable = nodeBooleanValue(entry.getValue());
} else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) {
allowNoIndices = nodeBooleanValue(entry.getValue());
} else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) {
expandWildcardsOpen = nodeBooleanValue(entry.getValue());
} else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) {
expandWildcardsClosed = nodeBooleanValue(entry.getValue());
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");
}
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("include_global_state")) {
includeGlobalState = nodeBooleanValue(entry.getValue());
} else if (name.equals("rename_pattern")) {
if (entry.getValue() instanceof String) {
renamePattern((String) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed rename_pattern");
}
} else if (name.equals("rename_replacement")) {
if (entry.getValue() instanceof String) {
renameReplacement((String) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed rename_replacement");
}
} else {
throw new ElasticsearchIllegalArgumentException("Unknown parameter " + name);
}
}
indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed));
return this;
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(String source) {
if (hasLength(source)) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
}
return this;
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @param offset offset
* @param length length
* @return this request
*/
public RestoreSnapshotRequest source(byte[] source, int offset, int length) {
if (length > 0) {
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
}
}
return this;
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(BytesReference source) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
snapshot = in.readString();
repository = in.readString();
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
renamePattern = in.readOptionalString();
renameReplacement = in.readOptionalString();
waitForCompletion = in.readBoolean();
includeGlobalState = in.readBoolean();
settings = readSettingsFromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(snapshot);
out.writeString(repository);
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
out.writeOptionalString(renamePattern);
out.writeOptionalString(renameReplacement);
out.writeBoolean(waitForCompletion);
out.writeBoolean(includeGlobalState);
writeSettingsToStream(settings, out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_RestoreSnapshotRequest.java
|
5,838 |
public class ContextIndexSearcher extends IndexSearcher {
public static enum Stage {
NA,
MAIN_QUERY
}
/** The wrapped {@link IndexSearcher}. The reason why we sometimes prefer delegating to this searcher instead of <tt>super</tt> is that
* this instance may have more assertions, for example if it comes from MockInternalEngine which wraps the IndexSearcher into an
* AssertingIndexSearcher. */
private final IndexSearcher in;
private final SearchContext searchContext;
private CachedDfSource dfSource;
private List<Collector> queryCollectors;
private Stage currentState = Stage.NA;
private boolean enableMainDocIdSetCollector;
private DocIdSetCollector mainDocIdSetCollector;
public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) {
super(searcher.reader());
in = searcher.searcher();
this.searchContext = searchContext;
setSimilarity(searcher.searcher().getSimilarity());
}
public void release() {
if (mainDocIdSetCollector != null) {
mainDocIdSetCollector.release();
}
}
public void dfSource(CachedDfSource dfSource) {
this.dfSource = dfSource;
}
/**
* Adds a query level collector that runs at {@link Stage#MAIN_QUERY}. Note, supports
* {@link org.elasticsearch.common.lucene.search.XCollector} allowing for a callback
* when collection is done.
*/
public void addMainQueryCollector(Collector collector) {
if (queryCollectors == null) {
queryCollectors = new ArrayList<Collector>();
}
queryCollectors.add(collector);
}
public DocIdSetCollector mainDocIdSetCollector() {
return this.mainDocIdSetCollector;
}
public void enableMainDocIdSetCollector() {
this.enableMainDocIdSetCollector = true;
}
public void inStage(Stage stage) {
this.currentState = stage;
}
public void finishStage(Stage stage) {
assert currentState == stage : "Expected stage " + stage + " but was stage " + currentState;
this.currentState = Stage.NA;
}
@Override
public Query rewrite(Query original) throws IOException {
if (original == searchContext.query() || original == searchContext.parsedQuery().query()) {
// optimize in case its the top level search query and we already rewrote it...
if (searchContext.queryRewritten()) {
return searchContext.query();
}
Query rewriteQuery = in.rewrite(original);
searchContext.updateRewriteQuery(rewriteQuery);
return rewriteQuery;
} else {
return in.rewrite(original);
}
}
@Override
public Weight createNormalizedWeight(Query query) throws IOException {
try {
// if its the main query, use we have dfs data, only then do it
if (dfSource != null && (query == searchContext.query() || query == searchContext.parsedQuery().query())) {
return dfSource.createNormalizedWeight(query);
}
return in.createNormalizedWeight(query);
} catch (Throwable t) {
searchContext.clearReleasables();
throw new RuntimeException(t);
}
}
@Override
public void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException {
if (searchContext.timeoutInMillis() != -1) {
// TODO: change to use our own counter that uses the scheduler in ThreadPool
collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), searchContext.timeoutInMillis());
}
if (currentState == Stage.MAIN_QUERY) {
if (enableMainDocIdSetCollector) {
// TODO should we create a cache of segment->docIdSets so we won't create one each time?
collector = this.mainDocIdSetCollector = new DocIdSetCollector(searchContext.docSetCache(), collector);
}
if (searchContext.parsedPostFilter() != null) {
// this will only get applied to the actual search collector and not
// to any scoped collectors, also, it will only be applied to the main collector
// since that is where the filter should only work
collector = new FilteredCollector(collector, searchContext.parsedPostFilter().filter());
}
if (queryCollectors != null && !queryCollectors.isEmpty()) {
collector = new MultiCollector(collector, queryCollectors.toArray(new Collector[queryCollectors.size()]));
}
// apply the minimum score after multi collector so we filter facets as well
if (searchContext.minimumScore() != null) {
collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
}
}
// we only compute the doc id set once since within a context, we execute the same query always...
try {
if (searchContext.timeoutInMillis() != -1) {
try {
super.search(leaves, weight, collector);
} catch (TimeLimitingCollector.TimeExceededException e) {
searchContext.queryResult().searchTimedOut(true);
}
} else {
super.search(leaves, weight, collector);
}
if (currentState == Stage.MAIN_QUERY) {
if (enableMainDocIdSetCollector) {
enableMainDocIdSetCollector = false;
mainDocIdSetCollector.postCollection();
}
if (queryCollectors != null && !queryCollectors.isEmpty()) {
for (Collector queryCollector : queryCollectors) {
if (queryCollector instanceof XCollector) {
((XCollector) queryCollector).postCollection();
}
}
}
}
} finally {
searchContext.clearReleasables();
}
}
@Override
public Explanation explain(Query query, int doc) throws IOException {
try {
if (searchContext.aliasFilter() == null) {
return super.explain(query, doc);
}
XFilteredQuery filteredQuery = new XFilteredQuery(query, searchContext.aliasFilter());
return super.explain(filteredQuery, doc);
} finally {
searchContext.clearReleasables();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_internal_ContextIndexSearcher.java
|
102 |
static class Traverser<K,V> {
Node<K,V>[] tab; // current table; updated if resized
Node<K,V> next; // the next entry to use
TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
int index; // index of bin to use next
int baseIndex; // current index of initial table
int baseLimit; // index bound for initial table
final int baseSize; // initial table size
Traverser(Node<K,V>[] tab, int size, int index, int limit) {
this.tab = tab;
this.baseSize = size;
this.baseIndex = this.index = index;
this.baseLimit = limit;
this.next = null;
}
/**
* Advances if possible, returning next valid node, or null if none.
*/
final Node<K,V> advance() {
Node<K,V> e;
if ((e = next) != null)
e = e.next;
for (;;) {
Node<K,V>[] t; int i, n; // must use locals in checks
if (e != null)
return next = e;
if (baseIndex >= baseLimit || (t = tab) == null ||
(n = t.length) <= (i = index) || i < 0)
return next = null;
if ((e = tabAt(t, i)) != null && e.hash < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
e = null;
pushState(t, i, n);
continue;
}
else if (e instanceof TreeBin)
e = ((TreeBin<K,V>)e).first;
else
e = null;
}
if (stack != null)
recoverState(n);
else if ((index = i + baseSize) >= n)
index = ++baseIndex; // visit upper slots if present
}
}
/**
* Saves traversal state upon encountering a forwarding node.
*/
private void pushState(Node<K,V>[] t, int i, int n) {
TableStack<K,V> s = spare; // reuse if possible
if (s != null)
spare = s.next;
else
s = new TableStack<K,V>();
s.tab = t;
s.length = n;
s.index = i;
s.next = stack;
stack = s;
}
/**
* Possibly pops traversal state.
*
* @param n length of current table
*/
private void recoverState(int n) {
TableStack<K,V> s; int len;
while ((s = stack) != null && (index += (len = s.length)) >= n) {
n = len;
index = s.index;
tab = s.tab;
s.tab = null;
TableStack<K,V> next = s.next;
s.next = spare; // save for reuse
stack = next;
spare = s;
}
if (s == null && (index += baseSize) >= n)
index = ++baseIndex;
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
84 |
public interface ClientEngine {
int getClientEndpointCount();
InternalPartitionService getPartitionService();
ClusterService getClusterService();
SerializationService getSerializationService();
EventService getEventService();
TransactionManagerService getTransactionManagerService();
ProxyService getProxyService();
Config getConfig();
ILogger getLogger(Class clazz);
ILogger getLogger(String className);
Object toObject(Data data);
Data toData(Object obj);
Address getMasterAddress();
Address getThisAddress();
MemberImpl getLocalMember();
SecurityContext getSecurityContext();
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_ClientEngine.java
|
183 |
public class StaticAssetView implements View {
private static final Log LOG = LogFactory.getLog(StaticAssetView.class);
protected boolean browserAssetCachingEnabled = true;
protected long cacheSeconds = 60 * 60 * 24;
@Override
public String getContentType() {
return null;
}
@Override
public void render(Map<String, ?> model, HttpServletRequest request, HttpServletResponse response) throws Exception {
String cacheFilePath = (String) model.get("cacheFilePath");
BufferedInputStream bis = new BufferedInputStream(new FileInputStream(cacheFilePath));
try {
String mimeType = (String) model.get("mimeType");
response.setContentType(mimeType);
if (!browserAssetCachingEnabled) {
response.setHeader("Cache-Control","no-cache");
response.setHeader("Pragma","no-cache");
response.setDateHeader ("Expires", 0);
} else {
response.setHeader("Cache-Control","public");
response.setHeader("Pragma","cache");
if (!StringUtils.isEmpty(request.getHeader("If-Modified-Since"))) {
long lastModified = request.getDateHeader("If-Modified-Since");
Calendar last = Calendar.getInstance();
last.setTime(new Date(lastModified));
Calendar check = Calendar.getInstance();
check.add(Calendar.SECOND, -2 * new Long(cacheSeconds).intValue());
if (check.compareTo(last) < 0) {
response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
return;
}
} else {
Calendar check = Calendar.getInstance();
check.add(Calendar.SECOND, -1 * new Long(cacheSeconds).intValue());
response.setDateHeader ("Last-Modified", check.getTimeInMillis());
}
Calendar cal = Calendar.getInstance();
cal.add(Calendar.SECOND, new Long(cacheSeconds).intValue());
response.setDateHeader ("Expires", cal.getTimeInMillis());
}
OutputStream os = response.getOutputStream();
boolean eof = false;
while (!eof) {
int temp = bis.read();
if (temp < 0) {
eof = true;
} else {
os.write(temp);
}
}
os.flush();
} catch (Exception e) {
if (e.getCause() instanceof SocketException) {
if (LOG.isDebugEnabled()) {
LOG.debug("Unable to stream asset", e);
}
} else {
LOG.error("Unable to stream asset", e);
throw e;
}
} finally {
try {
bis.close();
} catch (Throwable e) {
//do nothing
}
}
}
public boolean isBrowserAssetCachingEnabled() {
return browserAssetCachingEnabled;
}
public void setBrowserAssetCachingEnabled(boolean browserAssetCachingEnabled) {
this.browserAssetCachingEnabled = browserAssetCachingEnabled;
}
public long getCacheSeconds() {
return cacheSeconds;
}
public void setCacheSeconds(long cacheSeconds) {
this.cacheSeconds = cacheSeconds;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_file_StaticAssetView.java
|
753 |
public class MultiGetRequest extends ActionRequest<MultiGetRequest> {
/**
* A single get item.
*/
public static class Item implements Streamable {
private String index;
private String type;
private String id;
private String routing;
private String[] fields;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
private FetchSourceContext fetchSourceContext;
Item() {
}
/**
* Constructs a single get item.
*
* @param index The index name
* @param type The type (can be null)
* @param id The id
*/
public Item(String index, @Nullable String type, String id) {
this.index = index;
this.type = type;
this.id = id;
}
public String index() {
return this.index;
}
public Item index(String index) {
this.index = index;
return this;
}
public String type() {
return this.type;
}
public String id() {
return this.id;
}
/**
* The routing associated with this document.
*/
public Item routing(String routing) {
this.routing = routing;
return this;
}
public String routing() {
return this.routing;
}
public Item parent(String parent) {
if (routing == null) {
this.routing = parent;
}
return this;
}
public Item fields(String... fields) {
this.fields = fields;
return this;
}
public String[] fields() {
return this.fields;
}
public long version() {
return version;
}
public Item version(long version) {
this.version = version;
return this;
}
public VersionType versionType() {
return versionType;
}
public Item versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
public FetchSourceContext fetchSourceContext() {
return this.fetchSourceContext;
}
/**
* Allows setting the {@link FetchSourceContext} for this request, controlling if and how _source should be returned.
*/
public Item fetchSourceContext(FetchSourceContext fetchSourceContext) {
this.fetchSourceContext = fetchSourceContext;
return this;
}
public static Item readItem(StreamInput in) throws IOException {
Item item = new Item();
item.readFrom(in);
return item;
}
@Override
public void readFrom(StreamInput in) throws IOException {
index = in.readSharedString();
type = in.readOptionalSharedString();
id = in.readString();
routing = in.readOptionalString();
int size = in.readVInt();
if (size > 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
version = in.readVLong();
versionType = VersionType.fromValue(in.readByte());
fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeSharedString(index);
out.writeOptionalSharedString(type);
out.writeString(id);
out.writeOptionalString(routing);
if (fields == null) {
out.writeVInt(0);
} else {
out.writeVInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
out.writeVLong(version);
out.writeByte(versionType.getValue());
FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
}
}
private boolean listenerThreaded = false;
String preference;
Boolean realtime;
boolean refresh;
List<Item> items = new ArrayList<Item>();
public MultiGetRequest add(Item item) {
items.add(item);
return this;
}
public MultiGetRequest add(String index, @Nullable String type, String id) {
items.add(new Item(index, type, id));
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (items.isEmpty()) {
validationException = ValidateActions.addValidationError("no documents to get", validationException);
} else {
for (int i = 0; i < items.size(); i++) {
Item item = items.get(i);
if (item.index() == null) {
validationException = ValidateActions.addValidationError("index is missing for doc " + i, validationException);
}
if (item.id() == null) {
validationException = ValidateActions.addValidationError("id is missing for doc " + i, validationException);
}
}
}
return validationException;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public MultiGetRequest preference(String preference) {
this.preference = preference;
return this;
}
public String preference() {
return this.preference;
}
public boolean realtime() {
return this.realtime == null ? true : this.realtime;
}
public MultiGetRequest realtime(Boolean realtime) {
this.realtime = realtime;
return this;
}
public boolean refresh() {
return this.refresh;
}
public MultiGetRequest refresh(boolean refresh) {
this.refresh = refresh;
return this;
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, data, true);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data, boolean allowExplicitIndex) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, null, data, allowExplicitIndex);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, BytesReference data, boolean allowExplicitIndex) throws Exception {
XContentParser parser = XContentFactory.xContent(data).createParser(data);
try {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchIllegalArgumentException("docs array element should include an object");
}
String index = defaultIndex;
String type = defaultType;
String id = null;
String routing = defaultRouting;
String parent = null;
List<String> fields = null;
long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL;
FetchSourceContext fetchSourceContext = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("_index".equals(currentFieldName)) {
if (!allowExplicitIndex) {
throw new ElasticsearchIllegalArgumentException("explicit index in multi get is not allowed");
}
index = parser.text();
} else if ("_type".equals(currentFieldName)) {
type = parser.text();
} else if ("_id".equals(currentFieldName)) {
id = parser.text();
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
routing = parser.text();
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
parent = parser.text();
} else if ("fields".equals(currentFieldName)) {
fields = new ArrayList<String>();
fields.add(parser.text());
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
versionType = VersionType.fromString(parser.text());
} else if ("_source".equals(currentFieldName)) {
if (parser.isBooleanValue()) {
fetchSourceContext = new FetchSourceContext(parser.booleanValue());
} else if (token == XContentParser.Token.VALUE_STRING) {
fetchSourceContext = new FetchSourceContext(new String[]{parser.text()});
} else {
throw new ElasticsearchParseException("illegal type for _source: [" + token + "]");
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
fields = new ArrayList<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
}
} else if ("_source".equals(currentFieldName)) {
ArrayList<String> includes = new ArrayList<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
includes.add(parser.text());
}
fetchSourceContext = new FetchSourceContext(includes.toArray(Strings.EMPTY_ARRAY));
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("_source".equals(currentFieldName)) {
List<String> currentList = null, includes = null, excludes = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
if ("includes".equals(currentFieldName) || "include".equals(currentFieldName)) {
currentList = includes != null ? includes : (includes = new ArrayList<String>(2));
} else if ("excludes".equals(currentFieldName) || "exclude".equals(currentFieldName)) {
currentList = excludes != null ? excludes : (excludes = new ArrayList<String>(2));
} else {
throw new ElasticsearchParseException("Source definition may not contain " + parser.text());
}
} else if (token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
currentList.add(parser.text());
}
} else if (token.isValue()) {
currentList.add(parser.text());
} else {
throw new ElasticsearchParseException("unexpected token while parsing source settings");
}
}
fetchSourceContext = new FetchSourceContext(
includes == null ? Strings.EMPTY_ARRAY : includes.toArray(new String[includes.size()]),
excludes == null ? Strings.EMPTY_ARRAY : excludes.toArray(new String[excludes.size()]));
}
}
}
String[] aFields;
if (fields != null) {
aFields = fields.toArray(new String[fields.size()]);
} else {
aFields = defaultFields;
}
add(new Item(index, type, id).routing(routing).fields(aFields).parent(parent).version(version).versionType(versionType)
.fetchSourceContext(fetchSourceContext == null ? defaultFetchSource : fetchSourceContext));
}
} else if ("ids".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (!token.isValue()) {
throw new ElasticsearchIllegalArgumentException("ids array element should only contain ids");
}
add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
}
}
}
}
} finally {
parser.close();
}
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
preference = in.readOptionalString();
refresh = in.readBoolean();
byte realtime = in.readByte();
if (realtime == 0) {
this.realtime = false;
} else if (realtime == 1) {
this.realtime = true;
}
int size = in.readVInt();
items = new ArrayList<Item>(size);
for (int i = 0; i < size; i++) {
items.add(Item.readItem(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(preference);
out.writeBoolean(refresh);
if (realtime == null) {
out.writeByte((byte) -1);
} else if (realtime == false) {
out.writeByte((byte) 0);
} else {
out.writeByte((byte) 1);
}
out.writeVInt(items.size());
for (Item item : items) {
item.writeTo(out);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_get_MultiGetRequest.java
|
64 |
soa.visit(new Visitor() {
@Override
public void visit(Tree.SimpleType that) {
super.visit(that);
determineSatisfiedTypesTypeParams(typeDec, that, stTypeParams);
}
@Override
public void visit(Tree.StaticMemberOrTypeExpression that) {
super.visit(that);
determineSatisfiedTypesTypeParams(typeDec, that, stTypeParams);
}
});
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_AddSatisfiesProposal.java
|
67 |
class AssertExistsDeclarationProposal extends CorrectionProposal {
private AssertExistsDeclarationProposal(Declaration dec,
String existsOrNonempty, int offset, TextChange change) {
super("Change to 'assert (" + existsOrNonempty + " " + dec.getName() + ")'",
change, new Region(offset, 0));
}
private static void addSplitDeclarationProposal(IDocument doc,
Tree.AttributeDeclaration decNode, Tree.CompilationUnit cu,
IFile file, Collection<ICompletionProposal> proposals) {
Value dec = decNode.getDeclarationModel();
Tree.SpecifierOrInitializerExpression sie =
decNode.getSpecifierOrInitializerExpression();
if (dec==null || dec.isParameter() || dec.isToplevel() ||
sie==null || sie.getExpression()==null) {
return;
}
ProducedType siet = sie.getExpression().getTypeModel();
String existsOrNonempty;
String desc;
if (isTypeUnknown(siet)) {
return;
}
else if (cu.getUnit().isOptionalType(siet)) {
existsOrNonempty = "exists";
desc = "Assert Exists";
}
else if (cu.getUnit().isPossiblyEmptyType(siet)) {
existsOrNonempty = "nonempty";
desc = "Assert Nonempty";
}
else {
return;
}
Tree.Identifier id = decNode.getIdentifier();
if (id==null || id.getToken()==null) {
return;
}
// int idStartOffset = id.getStartIndex();
int idEndOffset = id.getStopIndex()+1;
int semiOffset = decNode.getStopIndex();
TextChange change = new TextFileChange(desc, file);
change.setEdit(new MultiTextEdit());
Type type = decNode.getType();
Integer typeOffset = type.getStartIndex();
Integer typeLen = type.getStopIndex()-typeOffset+1;
change.addEdit(new ReplaceEdit(typeOffset, typeLen,
"assert (" + existsOrNonempty));
change.addEdit(new InsertEdit(semiOffset, ")"));
proposals.add(new AssertExistsDeclarationProposal(dec,
existsOrNonempty,
idEndOffset + 8 + existsOrNonempty.length() - typeLen,
change));
}
static void addAssertExistsDeclarationProposals(
Collection<ICompletionProposal> proposals, IDocument doc,
IFile file, Tree.CompilationUnit cu, Tree.Declaration decNode) {
if (decNode==null) return;
Declaration dec = decNode.getDeclarationModel();
if (dec!=null) {
if (decNode instanceof Tree.AttributeDeclaration) {
Tree.AttributeDeclaration attDecNode =
(Tree.AttributeDeclaration) decNode;
Tree.SpecifierOrInitializerExpression sie =
attDecNode.getSpecifierOrInitializerExpression();
if (sie!=null || dec.isParameter()) {
addSplitDeclarationProposal(doc,
attDecNode, cu, file, proposals);
}
}
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_AssertExistsDeclarationProposal.java
|
24 |
private SortedSet<Edge> outEdges = new ConcurrentSkipListSet<Edge>(new Comparator<Edge>() {
@Override
public int compare(Edge e1, Edge e2) {
return e1.getEnd().compareTo(e2.getEnd());
}
});
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.