Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
440 | public interface CustomizeStoreKCVSManager extends KeyColumnValueStoreManager {
/**
* Opens a database against this {@link KeyColumnValueStoreManager} with the given
* TTL in seconds.
*
* @param name Name of database
* @param ttlInSeconds TTL for the entries in this {@link KeyColumnValueStore}
* @return Database Handle
* @throws com.thinkaurelius.titan.diskstorage.BackendException
* @see KeyColumnValueStoreManager#openDatabase(String)
*
*/
public KeyColumnValueStore openDatabase(String name, int ttlInSeconds) throws BackendException;
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_keycolumnvalue_CustomizeStoreKCVSManager.java |
75 | public class OSharedResourceAdaptiveExternal extends OSharedResourceAdaptive implements OSharedResource {
public OSharedResourceAdaptiveExternal(final boolean iConcurrent, final int iTimeout, final boolean ignoreThreadInterruption) {
super(iConcurrent, iTimeout, ignoreThreadInterruption);
}
@Override
public void acquireExclusiveLock() {
super.acquireExclusiveLock();
}
public boolean tryAcquireExclusiveLock() {
return super.tryAcquireExclusiveLock();
}
@Override
public void acquireSharedLock() {
super.acquireSharedLock();
}
public boolean tryAcquireSharedLock() {
return super.tryAcquireSharedLock();
}
@Override
public void releaseExclusiveLock() {
super.releaseExclusiveLock();
}
@Override
public void releaseSharedLock() {
super.releaseSharedLock();
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedResourceAdaptiveExternal.java |
1,011 | public interface OStringSerializer {
public StringBuilder toStream(StringBuilder iOutput, Object iSource);
public Object fromStream(String iSource);
public String getName();
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_serialization_serializer_string_OStringSerializer.java |
1,015 | @Service("blNullOrderFactory")
public class NullOrderFactoryImpl implements NullOrderFactory {
protected static final Order NULL_ORDER = new NullOrderImpl();
@Override
public Order getNullOrder() {
return NULL_ORDER;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_NullOrderFactoryImpl.java |
1,496 | public class OrderLookupTag extends BodyTagSupport {
private static final long serialVersionUID = 1L;
private Long orderId;
private String orderName;
private String orderVar;
private String totalQuantityVar;
@Override
public int doStartTag() throws JspException {
WebApplicationContext applicationContext = WebApplicationContextUtils.getWebApplicationContext(pageContext.getServletContext());
Customer customer = CustomerState.getCustomer((HttpServletRequest) pageContext.getRequest());
LegacyCartService cartService = (LegacyCartService) applicationContext.getBean("blOrderService");
Order order = null;
if (orderName != null && orderId != null) {
throw new IllegalArgumentException("Only orderName or orderId attribute may be specified on orderLookup tag");
} else if (orderId != null) {
order = cartService.findOrderById(orderId);
} else if (orderName != null) {
order = cartService.findNamedOrderForCustomer(orderName, customer);
} else if (customer != null){
order = cartService.findCartForCustomer(customer);
}
if (orderVar != null) {
pageContext.setAttribute(orderVar, order);
}
if (totalQuantityVar != null) {
int orderItemsCount = 0;
if (order != null && order.getOrderItems() != null) {
for (OrderItem orderItem : order.getOrderItems()) {
orderItemsCount += orderItem.getQuantity();
}
}
pageContext.setAttribute(totalQuantityVar, orderItemsCount);
} else if (totalQuantityVar != null) {
pageContext.setAttribute(totalQuantityVar, 0);
}
return EVAL_PAGE;
}
public String getOrderVar() {
return orderVar;
}
public void setOrderVar(String orderVar) {
this.orderVar = orderVar;
}
public String getTotalQuantityVar() {
return totalQuantityVar;
}
public void setTotalQuantityVar(String totalQuantityVar) {
this.totalQuantityVar = totalQuantityVar;
}
public String getOrderName() {
return orderName;
}
public void setOrderName(String orderName) {
this.orderName = orderName;
}
public Long getOrderId() {
return orderId;
}
public void setOrderId(Long orderId) {
this.orderId = orderId;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_layout_tags_OrderLookupTag.java |
85 | public enum Contain implements TitanPredicate {
/**
* Whether an element is in a collection
*/
IN {
@Override
public boolean evaluate(Object value, Object condition) {
Preconditions.checkArgument(isValidCondition(condition), "Invalid condition provided: %s", condition);
Collection col = (Collection) condition;
return col.contains(value);
}
@Override
public TitanPredicate negate() {
return NOT_IN;
}
},
/**
* Whether an element is not in a collection
*/
NOT_IN {
@Override
public boolean evaluate(Object value, Object condition) {
Preconditions.checkArgument(isValidCondition(condition), "Invalid condition provided: %s", condition);
Collection col = (Collection) condition;
return !col.contains(value);
}
@Override
public TitanPredicate negate() {
return IN;
}
};
private static final Logger log = LoggerFactory.getLogger(Contain.class);
@Override
public boolean isValidValueType(Class<?> clazz) {
return true;
}
@Override
public boolean isValidCondition(Object condition) {
return condition != null && (condition instanceof Collection) && !((Collection) condition).isEmpty();
}
@Override
public boolean hasNegation() {
return true;
}
@Override
public boolean isQNF() {
return false;
}
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Contain.java |
388 | public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
Settings transientSettings;
Settings persistentSettings;
ClusterUpdateSettingsResponse() {
this.persistentSettings = ImmutableSettings.EMPTY;
this.transientSettings = ImmutableSettings.EMPTY;
}
ClusterUpdateSettingsResponse(boolean acknowledged, Settings transientSettings, Settings persistentSettings) {
super(acknowledged);
this.persistentSettings = persistentSettings;
this.transientSettings = transientSettings;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
transientSettings = ImmutableSettings.readSettingsFromStream(in);
persistentSettings = ImmutableSettings.readSettingsFromStream(in);
readAcknowledged(in);
}
public Settings getTransientSettings() {
return transientSettings;
}
public Settings getPersistentSettings() {
return persistentSettings;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
ImmutableSettings.writeSettingsToStream(transientSettings, out);
ImmutableSettings.writeSettingsToStream(persistentSettings, out);
writeAcknowledged(out);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_settings_ClusterUpdateSettingsResponse.java |
245 | assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertTrue(map.containsKey(member.getUuid()));
}
}); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java |
76 | @SuppressWarnings("serial")
static final class MapReduceKeysToLongTask<K,V>
extends BulkTask<K,V,Long> {
final ObjectToLong<? super K> transformer;
final LongByLongToLong reducer;
final long basis;
long result;
MapReduceKeysToLongTask<K,V> rights, nextRight;
MapReduceKeysToLongTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceKeysToLongTask<K,V> nextRight,
ObjectToLong<? super K> transformer,
long basis,
LongByLongToLong reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Long getRawResult() { return result; }
public final void compute() {
final ObjectToLong<? super K> transformer;
final LongByLongToLong reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
long r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceKeysToLongTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V>
t = (MapReduceKeysToLongTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
} | 0true
| src_main_java_jsr166e_ConcurrentHashMapV8.java |
3,550 | public static class Builder extends AbstractFieldMapper.Builder<Builder, BinaryFieldMapper> {
private Boolean compress = null;
private long compressThreshold = Defaults.COMPRESS_THRESHOLD;
public Builder(String name) {
super(name, new FieldType(Defaults.FIELD_TYPE));
builder = this;
}
public Builder compress(boolean compress) {
this.compress = compress;
return this;
}
public Builder compressThreshold(long compressThreshold) {
this.compressThreshold = compressThreshold;
return this;
}
@Override
public BinaryFieldMapper build(BuilderContext context) {
return new BinaryFieldMapper(buildNames(context), fieldType, compress, compressThreshold, postingsProvider,
docValuesProvider, multiFieldsBuilder.build(this, context), copyTo);
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_BinaryFieldMapper.java |
3,229 | public static class Doubles extends ScriptDocValues {
private final DoubleValues values;
private final SlicedDoubleList list;
public Doubles(DoubleValues values) {
this.values = values;
this.list = new SlicedDoubleList(values.isMultiValued() ? 10 : 1);
}
public DoubleValues getInternalValues() {
return this.values;
}
@Override
public boolean isEmpty() {
return values.setDocument(docId) == 0;
}
public double getValue() {
int numValues = values.setDocument(docId);
if (numValues == 0) {
return 0d;
}
return values.nextValue();
}
public List<Double> getValues() {
if (!listLoaded) {
int numValues = values.setDocument(docId);
list.offset = 0;
list.grow(numValues);
list.length = numValues;
for (int i = 0; i < numValues; i++) {
list.values[i] = values.nextValue();
}
listLoaded = true;
}
return list;
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_ScriptDocValues.java |
270 | public interface OCommandPredicate {
/**
* Evaluates the predicate.
*
* @param iRecord
* Target record
* @param iCurrentResult TODO
* @param iContext
* Context of execution
* @return The result of predicate
*/
public Object evaluate(final ORecord<?> iRecord, ODocument iCurrentResult, final OCommandContext iContext);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_OCommandPredicate.java |
79 | @SuppressWarnings("serial")
static final class MapReduceMappingsToIntTask<K,V>
extends BulkTask<K,V,Integer> {
final ObjectByObjectToInt<? super K, ? super V> transformer;
final IntByIntToInt reducer;
final int basis;
int result;
MapReduceMappingsToIntTask<K,V> rights, nextRight;
MapReduceMappingsToIntTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceMappingsToIntTask<K,V> nextRight,
ObjectByObjectToInt<? super K, ? super V> transformer,
int basis,
IntByIntToInt reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Integer getRawResult() { return result; }
public final void compute() {
final ObjectByObjectToInt<? super K, ? super V> transformer;
final IntByIntToInt reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
int r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceMappingsToIntTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key, p.val));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V>
t = (MapReduceMappingsToIntTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
} | 0true
| src_main_java_jsr166e_ConcurrentHashMapV8.java |
2,082 | public class PartitionWideEntryWithPredicateBackupOperation extends PartitionWideEntryBackupOperation {
Predicate predicate;
public PartitionWideEntryWithPredicateBackupOperation() {
}
public PartitionWideEntryWithPredicateBackupOperation(String name, EntryBackupProcessor entryProcessor, Predicate predicate) {
super(name, entryProcessor);
this.predicate = predicate;
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
predicate = in.readObject();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeObject(predicate);
}
protected Predicate getPredicate() {
return predicate;
}
@Override
public String toString() {
return "PartitionWideEntryWithPredicateBackupOperation{}";
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_map_operation_PartitionWideEntryWithPredicateBackupOperation.java |
559 | public final class ListenerUtil {
private ListenerUtil() {
}
public static String listen(ClientContext context, ClientRequest request, Object key, EventHandler handler) {
//TODO callback
final Future future;
try {
final ClientInvocationServiceImpl invocationService = getClientInvocationService(context);
if (key == null) {
future = invocationService.invokeOnRandomTarget(request, handler);
} else {
future = invocationService.invokeOnKeyOwner(request, key, handler);
}
String registrationId = context.getSerializationService().toObject(future.get());
invocationService.registerListener(registrationId, request.getCallId());
return registrationId;
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
public static boolean stopListening(ClientContext context,
BaseClientRemoveListenerRequest request, String registrationId) {
try {
ClientInvocationServiceImpl invocationService = getClientInvocationService(context);
registrationId = invocationService.deRegisterListener(registrationId);
if (registrationId == null) {
return false;
}
request.setRegistrationId(registrationId);
final Future<Boolean> future = invocationService.invokeOnRandomTarget(request);
return (Boolean) context.getSerializationService().toObject(future.get());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
private static ClientInvocationServiceImpl getClientInvocationService(ClientContext context) {
return (ClientInvocationServiceImpl) context.getInvocationService();
}
} | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_util_ListenerUtil.java |
661 | public class ProductDaoTest extends BaseTest {
@Resource
private ProductDao productDao;
@Resource
private CatalogService catalogService;
private List<Product> savedProducts = new ArrayList<Product>();
private static RelatedProduct getRelatedUpSaleProduct(Product prod, Product prodToRelate, List<RelatedProduct> upSales){
RelatedProduct rp1 = new UpSaleProductImpl();
rp1.setProduct(prod);
rp1.setPromotionMessage("brand new coffee");
rp1.setRelatedProduct(prodToRelate);
upSales.add(rp1);
return rp1;
}
private static RelatedProduct getRelatedCrossProduct(Product prod, Product prodToRelate, List<RelatedProduct> upSales){
RelatedProduct rp1 = new CrossSaleProductImpl();
rp1.setProduct(prod);
rp1.setPromotionMessage("brand new coffee");
rp1.setRelatedProduct(prodToRelate);
upSales.add(rp1);
return rp1;
}
@Test(groups="createProducts", dataProvider="setupProducts", dataProviderClass=ProductDataProvider.class)
@Rollback(false)
@Transactional
public void createProducts(Product product) {
product = catalogService.saveProduct(product);
assert(product.getId() != null);
savedProducts.add(product);
}
@Test(groups="createUpSaleValues", dependsOnGroups="createProducts")
@Rollback(false)
@Transactional
public void createUpSaleValues(){
Product prod1 = savedProducts.get(0);
List<RelatedProduct> upSales = new ArrayList<RelatedProduct>();
getRelatedUpSaleProduct(prod1, savedProducts.get(2), upSales);
getRelatedUpSaleProduct(prod1, savedProducts.get(3), upSales);
getRelatedUpSaleProduct(prod1, savedProducts.get(4), upSales);
prod1.setUpSaleProducts(upSales);
prod1 = catalogService.saveProduct(prod1);
assert(prod1.getId() != null);
Product prod2 = savedProducts.get(1);
List<RelatedProduct> upSales2 = new ArrayList<RelatedProduct>();
getRelatedUpSaleProduct(prod2, savedProducts.get(5), upSales2);
getRelatedUpSaleProduct(prod2, savedProducts.get(6), upSales2);
prod2.setUpSaleProducts(upSales2);
prod2 = catalogService.saveProduct(prod2);
assert(prod2.getId() != null);
}
@Test(groups="testReadProductsWithUpSaleValues", dependsOnGroups="createUpSaleValues")
@Transactional
public void testReadProductsWithUpSaleValues() {
Product result = productDao.readProductById(savedProducts.get(0).getId());
List<RelatedProduct> related = result.getUpSaleProducts();
assert(related != null);
assert(!related.isEmpty());
assert(related.size() == 2 || related.size() == 3);
for(RelatedProduct rp : related){
assert(rp instanceof UpSaleProductImpl);
}
}
@Test(groups="createCrossSaleValues", dependsOnGroups="testReadProductsWithUpSaleValues")
@Rollback(false)
@Transactional
public void createCrossSaleValues(){
Product prod1 = savedProducts.get(0);
List<RelatedProduct> crossSale = new ArrayList<RelatedProduct>();
getRelatedCrossProduct(prod1, savedProducts.get(2), crossSale);
getRelatedCrossProduct(prod1, savedProducts.get(3), crossSale);
getRelatedCrossProduct(prod1, savedProducts.get(4), crossSale);
prod1.setCrossSaleProducts(crossSale);
prod1 = catalogService.saveProduct(prod1);
assert(prod1.getId() != null);
Product prod2 = savedProducts.get(1);
List<RelatedProduct> crossSale2 = new ArrayList<RelatedProduct>();
getRelatedCrossProduct(prod2, savedProducts.get(5), crossSale2);
getRelatedCrossProduct(prod2, savedProducts.get(6), crossSale2);
prod2.setCrossSaleProducts(crossSale2);
prod2 = catalogService.saveProduct(prod2);
assert(prod2.getId() != null);
}
@Test(groups="testReadProductsWithCrossSaleValues", dependsOnGroups="createCrossSaleValues")
@Transactional
public void testReadProductsWithCrossSaleValues() {
Product result = productDao.readProductById(savedProducts.get(1).getId());
List<RelatedProduct> related = result.getCrossSaleProducts();
assert(related != null);
assert(!related.isEmpty());
assert(related.size() == 2 || related.size() == 3);
for(RelatedProduct rp : related){
assert(rp instanceof CrossSaleProductImpl);
}
}
@Test(dataProvider="basicProduct", dataProviderClass=ProductDataProvider.class)
@Transactional
public void testReadProductsById(Product product) {
product = catalogService.saveProduct(product);
Product result = productDao.readProductById(product.getId());
assert product.equals(result);
}
@Test(dataProvider="basicProduct", dataProviderClass=ProductDataProvider.class)
@Transactional
public void testReadProductsByName(Product product) {
String name = product.getName();
product = catalogService.saveProduct(product);
List<Product> result = productDao.readProductsByName(name);
assert result.contains(product);
}
@Test(dataProvider="basicProduct", dataProviderClass=ProductDataProvider.class)
@Transactional
public void testFeaturedProduct(Product product) {
product = catalogService.saveProduct(product);
Long productId = product.getId();
product.setFeaturedProduct(true);
catalogService.saveProduct(product);
Product testProduct = productDao.readProductById(productId);
assert (testProduct.isFeaturedProduct() == true);
}
} | 0true
| integration_src_test_java_org_broadleafcommerce_core_catalog_dao_ProductDaoTest.java |
3,755 | public abstract class AbstractMergePolicyProvider<MP extends MergePolicy> extends AbstractIndexShardComponent implements MergePolicyProvider<MP> {
public static final String INDEX_COMPOUND_FORMAT = "index.compound_format";
protected volatile double noCFSRatio;
protected AbstractMergePolicyProvider(Store store) {
super(store.shardId(), store.indexSettings());
this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Boolean.toString(store.suggestUseCompoundFile())));
}
public static double parseNoCFSRatio(String noCFSRatio) {
noCFSRatio = noCFSRatio.trim();
if (noCFSRatio.equalsIgnoreCase("true")) {
return 1.0d;
} else if (noCFSRatio.equalsIgnoreCase("false")) {
return 0.0;
} else {
try {
double value = Double.parseDouble(noCFSRatio);
if (value < 0.0 || value > 1.0) {
throw new ElasticsearchIllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]");
}
return value;
} catch (NumberFormatException ex) {
throw new ElasticsearchIllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex);
}
}
}
public static String formatNoCFSRatio(double ratio) {
if (ratio == 1.0) {
return Boolean.TRUE.toString();
} else if (ratio == 0.0) {
return Boolean.FALSE.toString();
} else {
return Double.toString(ratio);
}
}
} | 0true
| src_main_java_org_elasticsearch_index_merge_policy_AbstractMergePolicyProvider.java |
2,796 | public abstract class AbstractIndexAnalyzerProvider<T extends Analyzer> extends AbstractIndexComponent implements AnalyzerProvider<T> {
private final String name;
protected final Version version;
/**
* Constructs a new analyzer component, with the index name and its settings and the analyzer name.
*
* @param index The index name
* @param indexSettings The index settings
* @param name The analyzer name
*/
public AbstractIndexAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, String name, Settings settings) {
super(index, indexSettings);
this.name = name;
this.version = Analysis.parseAnalysisVersion(indexSettings, settings, logger);
}
/**
* Constructs a new analyzer component, with the index name and its settings and the analyzer name.
*
* @param index The index name
* @param indexSettings The index settings
* @param prefixSettings A settings prefix (like "com.mycompany") to simplify extracting the component settings
* @param name The analyzer name
*/
public AbstractIndexAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, String prefixSettings, String name, Settings settings) {
super(index, indexSettings, prefixSettings);
this.name = name;
this.version = Analysis.parseAnalysisVersion(indexSettings, settings, logger);
}
/**
* Returns the injected name of the analyzer.
*/
@Override
public final String name() {
return this.name;
}
@Override
public final AnalyzerScope scope() {
return AnalyzerScope.INDEX;
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_AbstractIndexAnalyzerProvider.java |
1,790 | imap.addEntryListener(new EntryAdapter<Integer, Integer>() {
@Override
public void entryAdded(EntryEvent<Integer, Integer> event) {
events1.add(event);
}
}, true); | 0true
| hazelcast_src_test_java_com_hazelcast_map_IssuesTest.java |
2,733 | public static class NodesLocalGatewayMetaState extends NodesOperationResponse<NodeLocalGatewayMetaState> {
private FailedNodeException[] failures;
NodesLocalGatewayMetaState() {
}
public NodesLocalGatewayMetaState(ClusterName clusterName, NodeLocalGatewayMetaState[] nodes, FailedNodeException[] failures) {
super(clusterName, nodes);
this.failures = failures;
}
public FailedNodeException[] failures() {
return failures;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeLocalGatewayMetaState[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new NodeLocalGatewayMetaState();
nodes[i].readFrom(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeLocalGatewayMetaState response : nodes) {
response.writeTo(out);
}
}
} | 0true
| src_main_java_org_elasticsearch_gateway_local_state_meta_TransportNodesListGatewayMetaState.java |
754 | public class ListAddAllOperation extends CollectionAddAllOperation {
private int index = -1;
public ListAddAllOperation() {
}
public ListAddAllOperation(String name, int index, List<Data> valueList) {
super(name, valueList);
this.index = index;
}
@Override
public int getId() {
return CollectionDataSerializerHook.LIST_ADD_ALL;
}
@Override
public void run() throws Exception {
if (!hasEnoughCapacity(valueList.size())) {
response = false;
return;
}
valueMap = getOrCreateListContainer().addAll(index, valueList);
response = !valueMap.isEmpty();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeInt(index);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
index = in.readInt();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_list_ListAddAllOperation.java |
8 | private class OutgoingMessageHolder implements MessageHolder
{
private Deque<Message<? extends MessageType>> outgoingMessages = new ArrayDeque<Message<? extends MessageType>>();
@Override
public synchronized void offer( Message<? extends MessageType> message )
{
outgoingMessages.addFirst( message );
}
public synchronized Message<? extends MessageType> nextOutgoingMessage()
{
return outgoingMessages.pollFirst();
}
} | 1no label
| enterprise_cluster_src_main_java_org_neo4j_cluster_StateMachines.java |
274 | public interface OCommandRequestAsynch {
public OCommandResultListener getResultListener();
public void setResultListener(OCommandResultListener iListener);
public boolean isAsynchronous();
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_OCommandRequestAsynch.java |
388 | public class SupportLevelFilter extends Filter {
@Override
public int decide(LoggingEvent event) {
if(SupportLevel.SUPPORT.equals(event.getLevel())) {
return Filter.DENY;
}
return Filter.ACCEPT;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_logging_SupportLevelFilter.java |
1,819 | class DeferredLookups implements Lookups {
private final InjectorImpl injector;
private final List<Element> lookups = Lists.newArrayList();
public DeferredLookups(InjectorImpl injector) {
this.injector = injector;
}
/**
* Initialize the specified lookups, either immediately or when the injector is created.
*/
public void initialize(Errors errors) {
injector.lookups = injector;
new LookupProcessor(errors).process(injector, lookups);
}
public <T> Provider<T> getProvider(Key<T> key) {
ProviderLookup<T> lookup = new ProviderLookup<T>(key, key);
lookups.add(lookup);
return lookup.getProvider();
}
public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> type) {
MembersInjectorLookup<T> lookup = new MembersInjectorLookup<T>(type, type);
lookups.add(lookup);
return lookup.getMembersInjector();
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_DeferredLookups.java |
2,947 | public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
private String language;
@Inject
public StemmerTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.language = Strings.capitalize(settings.get("language", settings.get("name", "porter")));
}
@Override
public TokenStream create(TokenStream tokenStream) {
if ("arabic".equalsIgnoreCase(language)) {
return new ArabicStemFilter(tokenStream);
} else if ("armenian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new ArmenianStemmer());
} else if ("basque".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new BasqueStemmer());
} else if ("brazilian".equalsIgnoreCase(language)) {
return new BrazilianStemFilter(tokenStream);
} else if ("bulgarian".equalsIgnoreCase(language)) {
return new BulgarianStemFilter(tokenStream);
} else if ("catalan".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new CatalanStemmer());
} else if ("czech".equalsIgnoreCase(language)) {
return new CzechStemFilter(tokenStream);
} else if ("danish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new DanishStemmer());
} else if ("dutch".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new DutchStemmer());
} else if ("english".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new EnglishStemmer());
} else if ("finnish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new FinnishStemmer());
} else if ("french".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new FrenchStemmer());
} else if ("german".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new GermanStemmer());
} else if ("german2".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new German2Stemmer());
} else if ("hungarian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new HungarianStemmer());
} else if ("italian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new ItalianStemmer());
} else if ("kp".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new KpStemmer());
} else if ("kstem".equalsIgnoreCase(language)) {
return new KStemFilter(tokenStream);
} else if ("lovins".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new LovinsStemmer());
} else if ("latvian".equalsIgnoreCase(language)) {
return new LatvianStemFilter(tokenStream);
} else if ("norwegian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new NorwegianStemmer());
} else if ("minimal_norwegian".equalsIgnoreCase(language) || "minimalNorwegian".equals(language)) {
return new NorwegianMinimalStemFilter(tokenStream);
} else if ("porter".equalsIgnoreCase(language)) {
return new PorterStemFilter(tokenStream);
} else if ("porter2".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new PorterStemmer());
} else if ("portuguese".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new PortugueseStemmer());
} else if ("romanian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new RomanianStemmer());
} else if ("russian".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new RussianStemmer());
} else if ("spanish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new SpanishStemmer());
} else if ("swedish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new SwedishStemmer());
} else if ("turkish".equalsIgnoreCase(language)) {
return new SnowballFilter(tokenStream, new TurkishStemmer());
} else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) {
return new EnglishMinimalStemFilter(tokenStream);
} else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) {
return new EnglishPossessiveFilter(version, tokenStream);
} else if ("light_finish".equalsIgnoreCase(language) || "lightFinish".equalsIgnoreCase(language)) {
// leaving this for backward compatibility
return new FinnishLightStemFilter(tokenStream);
} else if ("light_finnish".equalsIgnoreCase(language) || "lightFinnish".equalsIgnoreCase(language)) {
return new FinnishLightStemFilter(tokenStream);
} else if ("light_french".equalsIgnoreCase(language) || "lightFrench".equalsIgnoreCase(language)) {
return new FrenchLightStemFilter(tokenStream);
} else if ("minimal_french".equalsIgnoreCase(language) || "minimalFrench".equalsIgnoreCase(language)) {
return new FrenchMinimalStemFilter(tokenStream);
} else if ("light_german".equalsIgnoreCase(language) || "lightGerman".equalsIgnoreCase(language)) {
return new GermanLightStemFilter(tokenStream);
} else if ("minimal_german".equalsIgnoreCase(language) || "minimalGerman".equalsIgnoreCase(language)) {
return new GermanMinimalStemFilter(tokenStream);
} else if ("hindi".equalsIgnoreCase(language)) {
return new HindiStemFilter(tokenStream);
} else if ("light_hungarian".equalsIgnoreCase(language) || "lightHungarian".equalsIgnoreCase(language)) {
return new HungarianLightStemFilter(tokenStream);
} else if ("indonesian".equalsIgnoreCase(language)) {
return new IndonesianStemFilter(tokenStream);
} else if ("light_italian".equalsIgnoreCase(language) || "lightItalian".equalsIgnoreCase(language)) {
return new ItalianLightStemFilter(tokenStream);
} else if ("light_portuguese".equalsIgnoreCase(language) || "lightPortuguese".equalsIgnoreCase(language)) {
return new PortugueseLightStemFilter(tokenStream);
} else if ("minimal_portuguese".equalsIgnoreCase(language) || "minimalPortuguese".equalsIgnoreCase(language)) {
return new PortugueseMinimalStemFilter(tokenStream);
} else if ("portuguese".equalsIgnoreCase(language)) {
return new PortugueseStemFilter(tokenStream);
} else if ("light_russian".equalsIgnoreCase(language) || "lightRussian".equalsIgnoreCase(language)) {
return new RussianLightStemFilter(tokenStream);
} else if ("light_spanish".equalsIgnoreCase(language) || "lightSpanish".equalsIgnoreCase(language)) {
return new SpanishLightStemFilter(tokenStream);
} else if ("light_swedish".equalsIgnoreCase(language) || "lightSwedish".equalsIgnoreCase(language)) {
return new SwedishLightStemFilter(tokenStream);
} else if ("greek".equalsIgnoreCase(language)) {
return new GreekStemFilter(tokenStream);
}
return new SnowballFilter(tokenStream, language);
}
} | 1no label
| src_main_java_org_elasticsearch_index_analysis_StemmerTokenFilterFactory.java |
1,048 | @SuppressWarnings("unchecked")
public abstract class OCommandExecutorSQLResultsetAbstract extends OCommandExecutorSQLAbstract implements Iterator<OIdentifiable>,
Iterable<OIdentifiable> {
protected static final String KEYWORD_FROM_2FIND = " " + KEYWORD_FROM + " ";
protected static final String KEYWORD_LET_2FIND = " " + KEYWORD_LET + " ";
protected OSQLAsynchQuery<ORecordSchemaAware<?>> request;
protected OSQLTarget parsedTarget;
protected OSQLFilter compiledFilter;
protected Map<String, Object> let = null;
protected Iterator<? extends OIdentifiable> target;
protected Iterable<OIdentifiable> tempResult;
protected int resultCount;
protected int skip = 0;
/**
* Compile the filter conditions only the first time.
*/
public OCommandExecutorSQLResultsetAbstract parse(final OCommandRequest iRequest) {
final OCommandRequestText textRequest = (OCommandRequestText) iRequest;
init(textRequest);
if (iRequest instanceof OSQLSynchQuery) {
request = (OSQLSynchQuery<ORecordSchemaAware<?>>) iRequest;
} else if (iRequest instanceof OSQLAsynchQuery)
request = (OSQLAsynchQuery<ORecordSchemaAware<?>>) iRequest;
else {
// BUILD A QUERY OBJECT FROM THE COMMAND REQUEST
request = new OSQLSynchQuery<ORecordSchemaAware<?>>(textRequest.getText());
if (textRequest.getResultListener() != null)
request.setResultListener(textRequest.getResultListener());
}
return this;
}
@Override
public boolean isReplicated() {
return true;
}
@Override
public boolean isIdempotent() {
return true;
}
/**
* Assign the right TARGET if found.
*
* @param iArgs
* Parameters to bind
* @return true if the target has been recognized, otherwise false
*/
protected boolean assignTarget(final Map<Object, Object> iArgs) {
parameters = iArgs;
if (parsedTarget == null)
return true;
if (iArgs != null && iArgs.size() > 0 && compiledFilter != null)
compiledFilter.bindParameters(iArgs);
if (target == null)
if (parsedTarget.getTargetClasses() != null)
searchInClasses();
else if (parsedTarget.getTargetClusters() != null)
searchInClusters();
else if (parsedTarget.getTargetRecords() != null)
target = parsedTarget.getTargetRecords().iterator();
else if (parsedTarget.getTargetVariable() != null) {
final Object var = getContext().getVariable(parsedTarget.getTargetVariable());
if (var == null) {
target = Collections.EMPTY_LIST.iterator();
return true;
} else if (var instanceof OIdentifiable) {
final ArrayList<OIdentifiable> list = new ArrayList<OIdentifiable>();
list.add((OIdentifiable) var);
target = list.iterator();
} else if (var instanceof Iterable<?>)
target = ((Iterable<? extends OIdentifiable>) var).iterator();
} else
return false;
return true;
}
protected Object getResult() {
if (tempResult != null) {
for (Object d : tempResult)
if (d != null) {
if (!(d instanceof OIdentifiable))
// NON-DOCUMENT AS RESULT, COMES FROM EXPAND? CREATE A DOCUMENT AT THE FLY
d = new ODocument().field("value", d);
request.getResultListener().result(d);
}
}
if (request instanceof OSQLSynchQuery)
return ((OSQLSynchQuery<ORecordSchemaAware<?>>) request).getResult();
return null;
}
protected boolean handleResult(final OIdentifiable iRecord, boolean iCloneIt) {
if (iRecord != null) {
resultCount++;
OIdentifiable recordCopy = iRecord instanceof ORecord<?> ? ((ORecord<?>) iRecord).copy() : iRecord.getIdentity().copy();
if (recordCopy != null)
// CALL THE LISTENER NOW
if (request.getResultListener() != null)
request.getResultListener().result(recordCopy);
if (limit > -1 && resultCount >= limit)
// BREAK THE EXECUTION
return false;
}
return true;
}
protected void parseLet() {
let = new LinkedHashMap<String, Object>();
boolean stop = false;
while (!stop) {
// PARSE THE KEY
parserNextWord(false);
final String letName = parserGetLastWord();
parserOptionalKeyword("=");
parserNextWord(false, " =><,\r\n");
// PARSE THE VALUE
String letValueAsString = parserGetLastWord();
final Object letValue;
// TRY TO PARSE AS FUNCTION
final Object func = OSQLHelper.getFunction(parsedTarget, letValueAsString);
if (func != null)
letValue = func;
else if (letValueAsString.startsWith("(")) {
letValue = new OSQLSynchQuery<Object>(letValueAsString.substring(1, letValueAsString.length() - 1));
} else
letValue = letValueAsString;
let.put(letName, letValue);
stop = parserGetLastSeparator() == ' ';
}
}
/**
* Parses the limit keyword if found.
*
* @param w
*
* @return
* @return the limit found as integer, or -1 if no limit is found. -1 means no limits.
* @throws OCommandSQLParsingException
* if no valid limit has been found
*/
protected int parseLimit(final String w) throws OCommandSQLParsingException {
if (!w.equals(KEYWORD_LIMIT))
return -1;
parserNextWord(true);
final String word = parserGetLastWord();
try {
limit = Integer.parseInt(word);
} catch (Exception e) {
throwParsingException("Invalid LIMIT value setted to '" + word + "' but it should be a valid integer. Example: LIMIT 10");
}
if (limit == 0)
throwParsingException("Invalid LIMIT value setted to ZERO. Use -1 to ignore the limit or use a positive number. Example: LIMIT 10");
return limit;
}
/**
* Parses the skip keyword if found.
*
* @param w
*
* @return
* @return the skip found as integer, or -1 if no skip is found. -1 means no skip.
* @throws OCommandSQLParsingException
* if no valid skip has been found
*/
protected int parseSkip(final String w) throws OCommandSQLParsingException {
if (!w.equals(KEYWORD_SKIP))
return -1;
parserNextWord(true);
final String word = parserGetLastWord();
try {
skip = Integer.parseInt(word);
} catch (Exception e) {
throwParsingException("Invalid SKIP value setted to '" + word
+ "' but it should be a valid positive integer. Example: SKIP 10");
}
if (skip < 0)
throwParsingException("Invalid SKIP value setted to the negative number '" + word
+ "'. Only positive numbers are valid. Example: SKIP 10");
return skip;
}
protected boolean filter(final ORecordInternal<?> iRecord) {
context.setVariable("current", iRecord);
if (iRecord instanceof ORecordSchemaAware<?>) {
// CHECK THE TARGET CLASS
final ORecordSchemaAware<?> recordSchemaAware = (ORecordSchemaAware<?>) iRecord;
Map<OClass, String> targetClasses = parsedTarget.getTargetClasses();
// check only classes that specified in query will go to result set
if ((targetClasses != null) && (!targetClasses.isEmpty())) {
for (OClass targetClass : targetClasses.keySet()) {
if (!targetClass.isSuperClassOf(recordSchemaAware.getSchemaClass()))
return false;
}
context.updateMetric("documentAnalyzedCompatibleClass", +1);
}
}
return evaluateRecord(iRecord);
}
protected boolean evaluateRecord(final ORecord<?> iRecord) {
assignLetClauses(iRecord);
if (compiledFilter == null)
return true;
return (Boolean) compiledFilter.evaluate(iRecord, null, context);
}
protected void assignLetClauses(final ORecord<?> iRecord) {
if (let != null && !let.isEmpty()) {
// BIND CONTEXT VARIABLES
for (Entry<String, Object> entry : let.entrySet()) {
String varName = entry.getKey();
if (varName.startsWith("$"))
varName = varName.substring(1);
final Object letValue = entry.getValue();
Object varValue;
if (letValue instanceof OSQLSynchQuery<?>) {
final OSQLSynchQuery<Object> subQuery = (OSQLSynchQuery<Object>) letValue;
subQuery.reset();
subQuery.resetPagination();
subQuery.getContext().setParent(context);
subQuery.getContext().setVariable("current", iRecord);
varValue = ODatabaseRecordThreadLocal.INSTANCE.get().query(subQuery);
} else if (letValue instanceof OSQLFunctionRuntime) {
final OSQLFunctionRuntime f = (OSQLFunctionRuntime) letValue;
if (f.getFunction().aggregateResults()) {
f.execute(iRecord, null, context);
varValue = f.getFunction().getResult();
} else
varValue = f.execute(iRecord, null, context);
} else
varValue = ODocumentHelper.getFieldValue(iRecord, ((String) letValue).trim(), context);
context.setVariable(varName, varValue);
}
}
}
protected void searchInClasses() {
final OClass cls = parsedTarget.getTargetClasses().keySet().iterator().next();
final ODatabaseRecord database = getDatabase();
database.checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_READ, cls.getName().toLowerCase());
// NO INDEXES: SCAN THE ENTIRE CLUSTER
final ORID[] range = getRange();
target = new ORecordIteratorClass<ORecordInternal<?>>(database, (ODatabaseRecordAbstract) database, cls.getName(), true,
request.isUseCache(), false).setRange(range[0], range[1]);
}
protected void searchInClusters() {
final ODatabaseRecord database = getDatabase();
final Set<Integer> clusterIds = new HashSet<Integer>();
for (String clusterName : parsedTarget.getTargetClusters().keySet()) {
if (clusterName == null || clusterName.length() == 0)
throw new OCommandExecutionException("No cluster or schema class selected in query");
database.checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, clusterName.toLowerCase());
if (Character.isDigit(clusterName.charAt(0))) {
// GET THE CLUSTER NUMBER
for (int clusterId : OStringSerializerHelper.splitIntArray(clusterName)) {
if (clusterId == -1)
throw new OCommandExecutionException("Cluster '" + clusterName + "' not found");
clusterIds.add(clusterId);
}
} else {
// GET THE CLUSTER NUMBER BY THE CLASS NAME
final int clusterId = database.getClusterIdByName(clusterName.toLowerCase());
if (clusterId == -1)
throw new OCommandExecutionException("Cluster '" + clusterName + "' not found");
clusterIds.add(clusterId);
}
}
// CREATE CLUSTER AS ARRAY OF INT
final int[] clIds = new int[clusterIds.size()];
int i = 0;
for (int c : clusterIds)
clIds[i++] = c;
final ORID[] range = getRange();
target = new ORecordIteratorClusters<ORecordInternal<?>>(database, database, clIds, request.isUseCache(), false).setRange(
range[0], range[1]);
}
protected void applyLimitAndSkip() {
if (tempResult != null && (limit > 0 || skip > 0)) {
final List<OIdentifiable> newList = new ArrayList<OIdentifiable>();
// APPLY LIMIT
if (tempResult instanceof List<?>) {
final List<OIdentifiable> t = (List<OIdentifiable>) tempResult;
final int start = Math.min(skip, t.size());
final int tot = Math.min(limit + start, t.size());
for (int i = start; i < tot; ++i)
newList.add(t.get(i));
t.clear();
tempResult = newList;
}
}
}
/**
* Optimizes the condition tree.
*
* @return
*/
protected void optimize() {
if (compiledFilter != null)
optimizeBranch(null, compiledFilter.getRootCondition());
}
/**
* Check function arguments and pre calculate it if possible
*
* @param function
* @return optimized function, same function if no change
*/
protected Object optimizeFunction(OSQLFunctionRuntime function) {
// boolean precalculate = true;
// for (int i = 0; i < function.configuredParameters.length; ++i) {
// if (function.configuredParameters[i] instanceof OSQLFilterItemField) {
// precalculate = false;
// } else if (function.configuredParameters[i] instanceof OSQLFunctionRuntime) {
// final Object res = optimizeFunction((OSQLFunctionRuntime) function.configuredParameters[i]);
// function.configuredParameters[i] = res;
// if (res instanceof OSQLFunctionRuntime || res instanceof OSQLFilterItemField) {
// // function might have been optimized but result is still not static
// precalculate = false;
// }
// }
// }
//
// if (precalculate) {
// // all fields are static, we can calculate it only once.
// return function.execute(null, null, null); // we can pass nulls here, they wont be used
// } else {
return function;
// }
}
protected void optimizeBranch(final OSQLFilterCondition iParentCondition, OSQLFilterCondition iCondition) {
if (iCondition == null)
return;
Object left = iCondition.getLeft();
if (left instanceof OSQLFilterCondition) {
// ANALYSE LEFT RECURSIVELY
optimizeBranch(iCondition, (OSQLFilterCondition) left);
} else if (left instanceof OSQLFunctionRuntime) {
left = optimizeFunction((OSQLFunctionRuntime) left);
iCondition.setLeft(left);
}
Object right = iCondition.getRight();
if (right instanceof OSQLFilterCondition) {
// ANALYSE RIGHT RECURSIVELY
optimizeBranch(iCondition, (OSQLFilterCondition) right);
} else if (right instanceof OSQLFunctionRuntime) {
right = optimizeFunction((OSQLFunctionRuntime) right);
iCondition.setRight(right);
}
final OQueryOperator oper = iCondition.getOperator();
Object result = null;
if (left instanceof OSQLFilterItemField && right instanceof OSQLFilterItemField) {
if (((OSQLFilterItemField) left).getRoot().equals(((OSQLFilterItemField) right).getRoot())) {
if (oper instanceof OQueryOperatorEquals)
result = Boolean.TRUE;
else if (oper instanceof OQueryOperatorNotEquals)
result = Boolean.FALSE;
}
}
if (result != null) {
if (iParentCondition != null)
if (iCondition == iParentCondition.getLeft())
// REPLACE LEFT
iCondition.setLeft(result);
else
// REPLACE RIGHT
iCondition.setRight(result);
else {
// REPLACE ROOT CONDITION
if (result instanceof Boolean && ((Boolean) result))
compiledFilter.setRootCondition(null);
}
}
}
protected ORID[] getRange() {
final ORID beginRange;
final ORID endRange;
final OSQLFilterCondition rootCondition = compiledFilter == null ? null : compiledFilter.getRootCondition();
if (compiledFilter == null || rootCondition == null) {
if (request instanceof OSQLSynchQuery)
beginRange = ((OSQLSynchQuery<ORecordSchemaAware<?>>) request).getNextPageRID();
else
beginRange = null;
endRange = null;
} else {
final ORID conditionBeginRange = rootCondition.getBeginRidRange();
final ORID conditionEndRange = rootCondition.getEndRidRange();
final ORID nextPageRid;
if (request instanceof OSQLSynchQuery)
nextPageRid = ((OSQLSynchQuery<ORecordSchemaAware<?>>) request).getNextPageRID();
else
nextPageRid = null;
if (conditionBeginRange != null && nextPageRid != null)
beginRange = conditionBeginRange.compareTo(nextPageRid) > 0 ? conditionBeginRange : nextPageRid;
else if (conditionBeginRange != null)
beginRange = conditionBeginRange;
else
beginRange = nextPageRid;
endRange = conditionEndRange;
}
return new ORID[] { beginRange, endRange };
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLResultsetAbstract.java |
108 | static class StaticMemberListener implements MembershipListener, InitialMembershipListener {
final CountDownLatch latch;
StaticMemberListener(CountDownLatch latch) {
this.latch = latch;
}
public void init(InitialMembershipEvent event) {
latch.countDown();
}
public void memberAdded(MembershipEvent membershipEvent) {
}
public void memberRemoved(MembershipEvent membershipEvent) {
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_ClientIssueTest.java |
9 | {
private Long highest;
@Override
public boolean reached( File file, long version, LogLoader source )
{
// Here we know that the log version exists (checked in AbstractPruneStrategy#prune)
long tx = source.getFirstCommittedTxId( version );
if ( highest == null )
{
highest = source.getLastCommittedTxId();
return false;
}
return highest-tx >= maxTransactionCount;
}
}; | 1no label
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogPruneStrategies.java |
3,417 | public static final SnapshotLock NO_SNAPSHOT_LOCK = new SnapshotLock() {
@Override
public void release() {
}
}; | 0true
| src_main_java_org_elasticsearch_index_gateway_IndexShardGateway.java |
770 | @Deprecated
public interface SkuAvailability extends Serializable {
/**
* Returns the id of this SkuAvailability
*/
public Long getId();
/**
* Sets the id of this SkuAvailability record
*/
public void setId(Long id);
/**
* Returns the id of this SKU associated with SkuAvailability record
*/
public Long getSkuId();
/**
* Sets the id of this SKU
*/
public void setSkuId(Long id);
/**
* Returns the USPSLocation id of this skuAvailability. SKU availability records may or may not be location specific and
* using null locations are a common implementation model.
*
*/
public Long getLocationId();
/**
* Sets the USPSLocation id of this skuAvailability. SKU availability records may or may not be location specific and
* using null locations are a common implementation model.
*/
public void setLocationId(Long id);
/**
* Returns an implementation specific availability status. This property can return null.
*/
public AvailabilityStatusType getAvailabilityStatus();
/**
* Sets the availability status.
*/
public void setAvailabilityStatus(AvailabilityStatusType status);
/**
* Returns the data the SKU will be available.
* This property may return null which has an implementation specific meaning.
*/
public Date getAvailabilityDate();
/**
* Sets the date the SKU will be available. Setting to null is allowed and has an
* implementation specific meaning.
*/
public void setAvailabilityDate(Date availabilityDate);
/**
* Returns the number of this items that are currently in stock and available for sell.
* Returning null has an implementation specific meaning.
*/
public Integer getQuantityOnHand();
/**
* Sets the quantity on hand. Setting to null is allowed and has an
* implementation specific meaning.
*/
public void setQuantityOnHand(Integer quantityOnHand);
/**
* Returns the reserve quantity. Nulls will be treated the same as 0.
* Implementations may want to manage a reserve quantity at each location so that the
* available quantity for purchases is the quantityOnHand - reserveQuantity.
*/
public Integer getReserveQuantity();
/**
* Sets the reserve quantity.
* Implementations may want to manage a reserve quantity at each location so that the
* available quantity for purchases is the quantityOnHand - reserveQuantity.
*/
public void setReserveQuantity(Integer reserveQuantity);
/**
* Returns the getQuantityOnHand() - getReserveQuantity().
* Preferred implementation is to return null if getQuantityOnHand() is null and to treat
* a null in getReserveQuantity() as ZERO.
*/
public Integer getAvailableQuantity();
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_inventory_domain_SkuAvailability.java |
714 | constructors[COLLECTION_GET_ALL] = new ConstructorFunction<Integer, Portable>() {
public Portable createNew(Integer arg) {
return new CollectionGetAllRequest();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_collection_CollectionPortableHook.java |
339 | public class NodeReplace extends NodeReplaceInsert {
@Override
protected boolean checkNode(List<Node> usedNodes, Node[] primaryNodes, Node node) {
if (replaceNode(primaryNodes, node, usedNodes)) {
return true;
}
//check if this same node already exists
if (exactNodeExists(primaryNodes, node, usedNodes)) {
return true;
}
return false;
}
protected boolean replaceNode(Node[] primaryNodes, Node testNode, List<Node> usedNodes) {
boolean foundItem = false;
for (int j=0;j<primaryNodes.length;j++){
if (primaryNodes[j].getNodeName().equals(testNode.getNodeName())) {
Node newNode = primaryNodes[j].getOwnerDocument().importNode(testNode.cloneNode(true), true);
primaryNodes[j].getParentNode().replaceChild(newNode, primaryNodes[j]);
usedNodes.add(testNode);
foundItem = true;
}
}
return foundItem;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_handlers_NodeReplace.java |
104 | public class OMurmurHash3 {
static class State {
long h1;
long h2;
long k1;
long k2;
long c1;
long c2;
}
static long getblock(byte[] key, int i) {
return (((long) key[i + 0] & 0x00000000000000FFL)) | (((long) key[i + 1] & 0x00000000000000FFL) << 8)
| (((long) key[i + 2] & 0x00000000000000FFL) << 16) | (((long) key[i + 3] & 0x00000000000000FFL) << 24)
| (((long) key[i + 4] & 0x00000000000000FFL) << 32) | (((long) key[i + 5] & 0x00000000000000FFL) << 40)
| (((long) key[i + 6] & 0x00000000000000FFL) << 48) | (((long) key[i + 7] & 0x00000000000000FFL) << 56);
}
static void bmix(State state) {
state.k1 *= state.c1;
state.k1 = (state.k1 << 23) | (state.k1 >>> 64 - 23);
state.k1 *= state.c2;
state.h1 ^= state.k1;
state.h1 += state.h2;
state.h2 = (state.h2 << 41) | (state.h2 >>> 64 - 41);
state.k2 *= state.c2;
state.k2 = (state.k2 << 23) | (state.k2 >>> 64 - 23);
state.k2 *= state.c1;
state.h2 ^= state.k2;
state.h2 += state.h1;
state.h1 = state.h1 * 3 + 0x52dce729;
state.h2 = state.h2 * 3 + 0x38495ab5;
state.c1 = state.c1 * 5 + 0x7b7d159c;
state.c2 = state.c2 * 5 + 0x6bce6396;
}
static long fmix(long k) {
k ^= k >>> 33;
k *= 0xff51afd7ed558ccdL;
k ^= k >>> 33;
k *= 0xc4ceb9fe1a85ec53L;
k ^= k >>> 33;
return k;
}
public static long murmurHash3_x64_64(final byte[] key, final int seed) {
State state = new State();
state.h1 = 0x9368e53c2f6af274L ^ seed;
state.h2 = 0x586dcd208f7cd3fdL ^ seed;
state.c1 = 0x87c37b91114253d5L;
state.c2 = 0x4cf5ad432745937fL;
for (int i = 0; i < key.length / 16; i++) {
state.k1 = getblock(key, i * 2 * 8);
state.k2 = getblock(key, (i * 2 + 1) * 8);
bmix(state);
}
state.k1 = 0;
state.k2 = 0;
int tail = (key.length >>> 4) << 4;
switch (key.length & 15) {
case 15:
state.k2 ^= (long) key[tail + 14] << 48;
case 14:
state.k2 ^= (long) key[tail + 13] << 40;
case 13:
state.k2 ^= (long) key[tail + 12] << 32;
case 12:
state.k2 ^= (long) key[tail + 11] << 24;
case 11:
state.k2 ^= (long) key[tail + 10] << 16;
case 10:
state.k2 ^= (long) key[tail + 9] << 8;
case 9:
state.k2 ^= (long) key[tail + 8];
case 8:
state.k1 ^= (long) key[tail + 7] << 56;
case 7:
state.k1 ^= (long) key[tail + 6] << 48;
case 6:
state.k1 ^= (long) key[tail + 5] << 40;
case 5:
state.k1 ^= (long) key[tail + 4] << 32;
case 4:
state.k1 ^= (long) key[tail + 3] << 24;
case 3:
state.k1 ^= (long) key[tail + 2] << 16;
case 2:
state.k1 ^= (long) key[tail + 1] << 8;
case 1:
state.k1 ^= (long) key[tail + 0];
bmix(state);
}
state.h2 ^= key.length;
state.h1 += state.h2;
state.h2 += state.h1;
state.h1 = fmix(state.h1);
state.h2 = fmix(state.h2);
state.h1 += state.h2;
state.h2 += state.h1;
return state.h1;
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_hash_OMurmurHash3.java |
2,621 | public static interface MembershipListener {
ClusterState onJoin(DiscoveryNode node);
void onLeave(DiscoveryNode node);
} | 0true
| src_main_java_org_elasticsearch_discovery_zen_membership_MembershipAction.java |
342 | public class NodesShutdownAction extends ClusterAction<NodesShutdownRequest, NodesShutdownResponse, NodesShutdownRequestBuilder> {
public static final NodesShutdownAction INSTANCE = new NodesShutdownAction();
public static final String NAME = "cluster/nodes/shutdown";
private NodesShutdownAction() {
super(NAME);
}
@Override
public NodesShutdownResponse newResponse() {
return new NodesShutdownResponse();
}
@Override
public NodesShutdownRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new NodesShutdownRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_NodesShutdownAction.java |
112 | public class ForkJoinPool extends AbstractExecutorService {
/*
* Implementation Overview
*
* This class and its nested classes provide the main
* functionality and control for a set of worker threads:
* Submissions from non-FJ threads enter into submission queues.
* Workers take these tasks and typically split them into subtasks
* that may be stolen by other workers. Preference rules give
* first priority to processing tasks from their own queues (LIFO
* or FIFO, depending on mode), then to randomized FIFO steals of
* tasks in other queues.
*
* WorkQueues
* ==========
*
* Most operations occur within work-stealing queues (in nested
* class WorkQueue). These are special forms of Deques that
* support only three of the four possible end-operations -- push,
* pop, and poll (aka steal), under the further constraints that
* push and pop are called only from the owning thread (or, as
* extended here, under a lock), while poll may be called from
* other threads. (If you are unfamiliar with them, you probably
* want to read Herlihy and Shavit's book "The Art of
* Multiprocessor programming", chapter 16 describing these in
* more detail before proceeding.) The main work-stealing queue
* design is roughly similar to those in the papers "Dynamic
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
* (http://research.sun.com/scalable/pubs/index.html) and
* "Idempotent work stealing" by Michael, Saraswat, and Vechev,
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
* See also "Correct and Efficient Work-Stealing for Weak Memory
* Models" by Le, Pop, Cohen, and Nardelli, PPoPP 2013
* (http://www.di.ens.fr/~zappa/readings/ppopp13.pdf) for an
* analysis of memory ordering (atomic, volatile etc) issues. The
* main differences ultimately stem from GC requirements that we
* null out taken slots as soon as we can, to maintain as small a
* footprint as possible even in programs generating huge numbers
* of tasks. To accomplish this, we shift the CAS arbitrating pop
* vs poll (steal) from being on the indices ("base" and "top") to
* the slots themselves. So, both a successful pop and poll
* mainly entail a CAS of a slot from non-null to null. Because
* we rely on CASes of references, we do not need tag bits on base
* or top. They are simple ints as used in any circular
* array-based queue (see for example ArrayDeque). Updates to the
* indices must still be ordered in a way that guarantees that top
* == base means the queue is empty, but otherwise may err on the
* side of possibly making the queue appear nonempty when a push,
* pop, or poll have not fully committed. Note that this means
* that the poll operation, considered individually, is not
* wait-free. One thief cannot successfully continue until another
* in-progress one (or, if previously empty, a push) completes.
* However, in the aggregate, we ensure at least probabilistic
* non-blockingness. If an attempted steal fails, a thief always
* chooses a different random victim target to try next. So, in
* order for one thief to progress, it suffices for any
* in-progress poll or new push on any empty queue to
* complete. (This is why we normally use method pollAt and its
* variants that try once at the apparent base index, else
* consider alternative actions, rather than method poll.)
*
* This approach also enables support of a user mode in which local
* task processing is in FIFO, not LIFO order, simply by using
* poll rather than pop. This can be useful in message-passing
* frameworks in which tasks are never joined. However neither
* mode considers affinities, loads, cache localities, etc, so
* rarely provide the best possible performance on a given
* machine, but portably provide good throughput by averaging over
* these factors. (Further, even if we did try to use such
* information, we do not usually have a basis for exploiting it.
* For example, some sets of tasks profit from cache affinities,
* but others are harmed by cache pollution effects.)
*
* WorkQueues are also used in a similar way for tasks submitted
* to the pool. We cannot mix these tasks in the same queues used
* for work-stealing (this would contaminate lifo/fifo
* processing). Instead, we randomly associate submission queues
* with submitting threads, using a form of hashing. The
* Submitter probe value serves as a hash code for
* choosing existing queues, and may be randomly repositioned upon
* contention with other submitters. In essence, submitters act
* like workers except that they are restricted to executing local
* tasks that they submitted (or in the case of CountedCompleters,
* others with the same root task). However, because most
* shared/external queue operations are more expensive than
* internal, and because, at steady state, external submitters
* will compete for CPU with workers, ForkJoinTask.join and
* related methods disable them from repeatedly helping to process
* tasks if all workers are active. Insertion of tasks in shared
* mode requires a lock (mainly to protect in the case of
* resizing) but we use only a simple spinlock (using bits in
* field qlock), because submitters encountering a busy queue move
* on to try or create other queues -- they block only when
* creating and registering new queues.
*
* Management
* ==========
*
* The main throughput advantages of work-stealing stem from
* decentralized control -- workers mostly take tasks from
* themselves or each other. We cannot negate this in the
* implementation of other management responsibilities. The main
* tactic for avoiding bottlenecks is packing nearly all
* essentially atomic control state into two volatile variables
* that are by far most often read (not written) as status and
* consistency checks.
*
* Field "ctl" contains 64 bits holding all the information needed
* to atomically decide to add, inactivate, enqueue (on an event
* queue), dequeue, and/or re-activate workers. To enable this
* packing, we restrict maximum parallelism to (1<<15)-1 (which is
* far in excess of normal operating range) to allow ids, counts,
* and their negations (used for thresholding) to fit into 16bit
* fields.
*
* Field "plock" is a form of sequence lock with a saturating
* shutdown bit (similarly for per-queue "qlocks"), mainly
* protecting updates to the workQueues array, as well as to
* enable shutdown. When used as a lock, it is normally only very
* briefly held, so is nearly always available after at most a
* brief spin, but we use a monitor-based backup strategy to
* block when needed.
*
* Recording WorkQueues. WorkQueues are recorded in the
* "workQueues" array that is created upon first use and expanded
* if necessary. Updates to the array while recording new workers
* and unrecording terminated ones are protected from each other
* by a lock but the array is otherwise concurrently readable, and
* accessed directly. To simplify index-based operations, the
* array size is always a power of two, and all readers must
* tolerate null slots. Worker queues are at odd indices. Shared
* (submission) queues are at even indices, up to a maximum of 64
* slots, to limit growth even if array needs to expand to add
* more workers. Grouping them together in this way simplifies and
* speeds up task scanning.
*
* All worker thread creation is on-demand, triggered by task
* submissions, replacement of terminated workers, and/or
* compensation for blocked workers. However, all other support
* code is set up to work with other policies. To ensure that we
* do not hold on to worker references that would prevent GC, ALL
* accesses to workQueues are via indices into the workQueues
* array (which is one source of some of the messy code
* constructions here). In essence, the workQueues array serves as
* a weak reference mechanism. Thus for example the wait queue
* field of ctl stores indices, not references. Access to the
* workQueues in associated methods (for example signalWork) must
* both index-check and null-check the IDs. All such accesses
* ignore bad IDs by returning out early from what they are doing,
* since this can only be associated with termination, in which
* case it is OK to give up. All uses of the workQueues array
* also check that it is non-null (even if previously
* non-null). This allows nulling during termination, which is
* currently not necessary, but remains an option for
* resource-revocation-based shutdown schemes. It also helps
* reduce JIT issuance of uncommon-trap code, which tends to
* unnecessarily complicate control flow in some methods.
*
* Event Queuing. Unlike HPC work-stealing frameworks, we cannot
* let workers spin indefinitely scanning for tasks when none can
* be found immediately, and we cannot start/resume workers unless
* there appear to be tasks available. On the other hand, we must
* quickly prod them into action when new tasks are submitted or
* generated. In many usages, ramp-up time to activate workers is
* the main limiting factor in overall performance (this is
* compounded at program start-up by JIT compilation and
* allocation). So we try to streamline this as much as possible.
* We park/unpark workers after placing in an event wait queue
* when they cannot find work. This "queue" is actually a simple
* Treiber stack, headed by the "id" field of ctl, plus a 15bit
* counter value (that reflects the number of times a worker has
* been inactivated) to avoid ABA effects (we need only as many
* version numbers as worker threads). Successors are held in
* field WorkQueue.nextWait. Queuing deals with several intrinsic
* races, mainly that a task-producing thread can miss seeing (and
* signalling) another thread that gave up looking for work but
* has not yet entered the wait queue. We solve this by requiring
* a full sweep of all workers (via repeated calls to method
* scan()) both before and after a newly waiting worker is added
* to the wait queue. Because enqueued workers may actually be
* rescanning rather than waiting, we set and clear the "parker"
* field of WorkQueues to reduce unnecessary calls to unpark.
* (This requires a secondary recheck to avoid missed signals.)
* Note the unusual conventions about Thread.interrupts
* surrounding parking and other blocking: Because interrupts are
* used solely to alert threads to check termination, which is
* checked anyway upon blocking, we clear status (using
* Thread.interrupted) before any call to park, so that park does
* not immediately return due to status being set via some other
* unrelated call to interrupt in user code.
*
* Signalling. We create or wake up workers only when there
* appears to be at least one task they might be able to find and
* execute. When a submission is added or another worker adds a
* task to a queue that has fewer than two tasks, they signal
* waiting workers (or trigger creation of new ones if fewer than
* the given parallelism level -- signalWork). These primary
* signals are buttressed by others whenever other threads remove
* a task from a queue and notice that there are other tasks there
* as well. So in general, pools will be over-signalled. On most
* platforms, signalling (unpark) overhead time is noticeably
* long, and the time between signalling a thread and it actually
* making progress can be very noticeably long, so it is worth
* offloading these delays from critical paths as much as
* possible. Additionally, workers spin-down gradually, by staying
* alive so long as they see the ctl state changing. Similar
* stability-sensing techniques are also used before blocking in
* awaitJoin and helpComplete.
*
* Trimming workers. To release resources after periods of lack of
* use, a worker starting to wait when the pool is quiescent will
* time out and terminate if the pool has remained quiescent for a
* given period -- a short period if there are more threads than
* parallelism, longer as the number of threads decreases. This
* will slowly propagate, eventually terminating all workers after
* periods of non-use.
*
* Shutdown and Termination. A call to shutdownNow atomically sets
* a plock bit and then (non-atomically) sets each worker's
* qlock status, cancels all unprocessed tasks, and wakes up
* all waiting workers. Detecting whether termination should
* commence after a non-abrupt shutdown() call requires more work
* and bookkeeping. We need consensus about quiescence (i.e., that
* there is no more work). The active count provides a primary
* indication but non-abrupt shutdown still requires a rechecking
* scan for any workers that are inactive but not queued.
*
* Joining Tasks
* =============
*
* Any of several actions may be taken when one worker is waiting
* to join a task stolen (or always held) by another. Because we
* are multiplexing many tasks on to a pool of workers, we can't
* just let them block (as in Thread.join). We also cannot just
* reassign the joiner's run-time stack with another and replace
* it later, which would be a form of "continuation", that even if
* possible is not necessarily a good idea since we sometimes need
* both an unblocked task and its continuation to progress.
* Instead we combine two tactics:
*
* Helping: Arranging for the joiner to execute some task that it
* would be running if the steal had not occurred.
*
* Compensating: Unless there are already enough live threads,
* method tryCompensate() may create or re-activate a spare
* thread to compensate for blocked joiners until they unblock.
*
* A third form (implemented in tryRemoveAndExec) amounts to
* helping a hypothetical compensator: If we can readily tell that
* a possible action of a compensator is to steal and execute the
* task being joined, the joining thread can do so directly,
* without the need for a compensation thread (although at the
* expense of larger run-time stacks, but the tradeoff is
* typically worthwhile).
*
* The ManagedBlocker extension API can't use helping so relies
* only on compensation in method awaitBlocker.
*
* The algorithm in tryHelpStealer entails a form of "linear"
* helping: Each worker records (in field currentSteal) the most
* recent task it stole from some other worker. Plus, it records
* (in field currentJoin) the task it is currently actively
* joining. Method tryHelpStealer uses these markers to try to
* find a worker to help (i.e., steal back a task from and execute
* it) that could hasten completion of the actively joined task.
* In essence, the joiner executes a task that would be on its own
* local deque had the to-be-joined task not been stolen. This may
* be seen as a conservative variant of the approach in Wagner &
* Calder "Leapfrogging: a portable technique for implementing
* efficient futures" SIGPLAN Notices, 1993
* (http://portal.acm.org/citation.cfm?id=155354). It differs in
* that: (1) We only maintain dependency links across workers upon
* steals, rather than use per-task bookkeeping. This sometimes
* requires a linear scan of workQueues array to locate stealers,
* but often doesn't because stealers leave hints (that may become
* stale/wrong) of where to locate them. It is only a hint
* because a worker might have had multiple steals and the hint
* records only one of them (usually the most current). Hinting
* isolates cost to when it is needed, rather than adding to
* per-task overhead. (2) It is "shallow", ignoring nesting and
* potentially cyclic mutual steals. (3) It is intentionally
* racy: field currentJoin is updated only while actively joining,
* which means that we miss links in the chain during long-lived
* tasks, GC stalls etc (which is OK since blocking in such cases
* is usually a good idea). (4) We bound the number of attempts
* to find work (see MAX_HELP) and fall back to suspending the
* worker and if necessary replacing it with another.
*
* Helping actions for CountedCompleters are much simpler: Method
* helpComplete can take and execute any task with the same root
* as the task being waited on. However, this still entails some
* traversal of completer chains, so is less efficient than using
* CountedCompleters without explicit joins.
*
* It is impossible to keep exactly the target parallelism number
* of threads running at any given time. Determining the
* existence of conservatively safe helping targets, the
* availability of already-created spares, and the apparent need
* to create new spares are all racy, so we rely on multiple
* retries of each. Compensation in the apparent absence of
* helping opportunities is challenging to control on JVMs, where
* GC and other activities can stall progress of tasks that in
* turn stall out many other dependent tasks, without us being
* able to determine whether they will ever require compensation.
* Even though work-stealing otherwise encounters little
* degradation in the presence of more threads than cores,
* aggressively adding new threads in such cases entails risk of
* unwanted positive feedback control loops in which more threads
* cause more dependent stalls (as well as delayed progress of
* unblocked threads to the point that we know they are available)
* leading to more situations requiring more threads, and so
* on. This aspect of control can be seen as an (analytically
* intractable) game with an opponent that may choose the worst
* (for us) active thread to stall at any time. We take several
* precautions to bound losses (and thus bound gains), mainly in
* methods tryCompensate and awaitJoin.
*
* Common Pool
* ===========
*
* The static common pool always exists after static
* initialization. Since it (or any other created pool) need
* never be used, we minimize initial construction overhead and
* footprint to the setup of about a dozen fields, with no nested
* allocation. Most bootstrapping occurs within method
* fullExternalPush during the first submission to the pool.
*
* When external threads submit to the common pool, they can
* perform subtask processing (see externalHelpJoin and related
* methods). This caller-helps policy makes it sensible to set
* common pool parallelism level to one (or more) less than the
* total number of available cores, or even zero for pure
* caller-runs. We do not need to record whether external
* submissions are to the common pool -- if not, externalHelpJoin
* returns quickly (at the most helping to signal some common pool
* workers). These submitters would otherwise be blocked waiting
* for completion, so the extra effort (with liberally sprinkled
* task status checks) in inapplicable cases amounts to an odd
* form of limited spin-wait before blocking in ForkJoinTask.join.
*
* Style notes
* ===========
*
* There is a lot of representation-level coupling among classes
* ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
* fields of WorkQueue maintain data structures managed by
* ForkJoinPool, so are directly accessed. There is little point
* trying to reduce this, since any associated future changes in
* representations will need to be accompanied by algorithmic
* changes anyway. Several methods intrinsically sprawl because
* they must accumulate sets of consistent reads of volatiles held
* in local variables. Methods signalWork() and scan() are the
* main bottlenecks, so are especially heavily
* micro-optimized/mangled. There are lots of inline assignments
* (of form "while ((local = field) != 0)") which are usually the
* simplest way to ensure the required read orderings (which are
* sometimes critical). This leads to a "C"-like style of listing
* declarations of these locals at the heads of methods or blocks.
* There are several occurrences of the unusual "do {} while
* (!cas...)" which is the simplest way to force an update of a
* CAS'ed variable. There are also other coding oddities (including
* several unnecessary-looking hoisted null checks) that help
* some methods perform reasonably even when interpreted (not
* compiled).
*
* The order of declarations in this file is:
* (1) Static utility functions
* (2) Nested (static) classes
* (3) Static fields
* (4) Fields, along with constants used when unpacking some of them
* (5) Internal control methods
* (6) Callbacks and other support for ForkJoinTask methods
* (7) Exported methods
* (8) Static block initializing statics in minimally dependent order
*/
// Static utilities
/**
* If there is a security manager, makes sure caller has
* permission to modify threads.
*/
private static void checkPermission() {
SecurityManager security = System.getSecurityManager();
if (security != null)
security.checkPermission(modifyThreadPermission);
}
// Nested classes
/**
* Factory for creating new {@link ForkJoinWorkerThread}s.
* A {@code ForkJoinWorkerThreadFactory} must be defined and used
* for {@code ForkJoinWorkerThread} subclasses that extend base
* functionality or initialize threads with different contexts.
*/
public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
* @return the new worker thread
* @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
}
/**
* Default ForkJoinWorkerThreadFactory implementation; creates a
* new ForkJoinWorkerThread.
*/
static final class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
}
/**
* Class for artificial tasks that are used to replace the target
* of local joins if they are removed from an interior queue slot
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to
* actually do anything beyond having a unique identity.
*/
static final class EmptyTask extends ForkJoinTask<Void> {
private static final long serialVersionUID = -7721805057305804111L;
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
public final Void getRawResult() { return null; }
public final void setRawResult(Void x) {}
public final boolean exec() { return true; }
}
/**
* Queues supporting work-stealing as well as external task
* submission. See above for main rationale and algorithms.
* Implementation relies heavily on "Unsafe" intrinsics
* and selective use of "volatile":
*
* Field "base" is the index (mod array.length) of the least valid
* queue slot, which is always the next position to steal (poll)
* from if nonempty. Reads and writes require volatile orderings
* but not CAS, because updates are only performed after slot
* CASes.
*
* Field "top" is the index (mod array.length) of the next queue
* slot to push to or pop from. It is written only by owner thread
* for push, or under lock for external/shared push, and accessed
* by other threads only after reading (volatile) base. Both top
* and base are allowed to wrap around on overflow, but (top -
* base) (or more commonly -(base - top) to force volatile read of
* base before top) still estimates size. The lock ("qlock") is
* forced to -1 on termination, causing all further lock attempts
* to fail. (Note: we don't need CAS for termination state because
* upon pool shutdown, all shared-queues will stop being used
* anyway.) Nearly all lock bodies are set up so that exceptions
* within lock bodies are "impossible" (modulo JVM errors that
* would cause failure anyway.)
*
* The array slots are read and written using the emulation of
* volatiles/atomics provided by Unsafe. Insertions must in
* general use putOrderedObject as a form of releasing store to
* ensure that all writes to the task object are ordered before
* its publication in the queue. All removals entail a CAS to
* null. The array is always a power of two. To ensure safety of
* Unsafe array operations, all accesses perform explicit null
* checks and implicit bounds checks via power-of-two masking.
*
* In addition to basic queuing support, this class contains
* fields described elsewhere to control execution. It turns out
* to work better memory-layout-wise to include them in this class
* rather than a separate class.
*
* Performance on most platforms is very sensitive to placement of
* instances of both WorkQueues and their arrays -- we absolutely
* do not want multiple WorkQueue instances or multiple queue
* arrays sharing cache lines. (It would be best for queue objects
* and their arrays to share, but there is nothing available to
* help arrange that). The @Contended annotation alerts JVMs to
* try to keep instances apart.
*/
static final class WorkQueue {
/**
* Capacity of work-stealing queue array upon initialization.
* Must be a power of two; at least 4, but should be larger to
* reduce or eliminate cacheline sharing among queues.
* Currently, it is much larger, as a partial workaround for
* the fact that JVMs often place arrays in locations that
* share GC bookkeeping (especially cardmarks) such that
* per-write accesses encounter serious memory contention.
*/
static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
/**
* Maximum size for queue arrays. Must be a power of two less
* than or equal to 1 << (31 - width of array entry) to ensure
* lack of wraparound of index calculations, but defined to a
* value a bit less than this to help users trap runaway
* programs before saturating systems.
*/
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
volatile int eventCount; // encoded inactivation count; < 0 if inactive
int nextWait; // encoded record of next event waiter
int nsteals; // number of steals
int hint; // steal index hint
short poolIndex; // index of this queue in pool
final short mode; // 0: lifo, > 0: fifo, < 0: shared
volatile int qlock; // 1: locked, -1: terminate; else 0
volatile int base; // index of next slot for poll
int top; // index of next slot for push
ForkJoinTask<?>[] array; // the elements (initially unallocated)
final ForkJoinPool pool; // the containing pool (may be null)
final ForkJoinWorkerThread owner; // owning thread or null if shared
volatile Thread parker; // == owner during call to park; else null
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
ForkJoinTask<?> currentSteal; // current non-local task being executed
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
int seed) {
this.pool = pool;
this.owner = owner;
this.mode = (short)mode;
this.hint = seed; // store initial seed for runWorker
// Place indices in the center of array (that is not yet allocated)
base = top = INITIAL_QUEUE_CAPACITY >>> 1;
}
/**
* Returns the approximate number of tasks in the queue.
*/
final int queueSize() {
int n = base - top; // non-owner callers must read base first
return (n >= 0) ? 0 : -n; // ignore transient negative
}
/**
* Provides a more accurate estimate of whether this queue has
* any tasks than does queueSize, by checking whether a
* near-empty queue has at least one unclaimed task.
*/
final boolean isEmpty() {
ForkJoinTask<?>[] a; int m, s;
int n = base - (s = top);
return (n >= 0 ||
(n == -1 &&
((a = array) == null ||
(m = a.length - 1) < 0 ||
U.getObject
(a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
}
/**
* Pushes a task. Call only by owner in unshared queues. (The
* shared-queue version is embedded in method externalPush.)
*
* @param task the task. Caller must ensure non-null.
* @throws RejectedExecutionException if array cannot be resized
*/
final void push(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a; ForkJoinPool p;
int s = top, n;
if ((a = array) != null) { // ignore if queue removed
int m = a.length - 1;
U.putOrderedObject(a, ((m & s) << ASHIFT) + ABASE, task);
if ((n = (top = s + 1) - base) <= 2)
(p = pool).signalWork(p.workQueues, this);
else if (n >= m)
growArray();
}
}
/**
* Initializes or doubles the capacity of array. Call either
* by owner or with lock held -- it is OK for base, but not
* top, to move while resizings are in progress.
*/
final ForkJoinTask<?>[] growArray() {
ForkJoinTask<?>[] oldA = array;
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
if (size > MAXIMUM_QUEUE_CAPACITY)
throw new RejectedExecutionException("Queue capacity exceeded");
int oldMask, t, b;
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
(t = top) - (b = base) > 0) {
int mask = size - 1;
do {
ForkJoinTask<?> x;
int oldj = ((b & oldMask) << ASHIFT) + ABASE;
int j = ((b & mask) << ASHIFT) + ABASE;
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
if (x != null &&
U.compareAndSwapObject(oldA, oldj, x, null))
U.putObjectVolatile(a, j, x);
} while (++b != t);
}
return a;
}
/**
* Takes next task, if one exists, in LIFO order. Call only
* by owner in unshared queues.
*/
final ForkJoinTask<?> pop() {
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
if ((a = array) != null && (m = a.length - 1) >= 0) {
for (int s; (s = top - 1) - base >= 0;) {
long j = ((m & s) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
break;
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
return t;
}
}
}
return null;
}
/**
* Takes a task in FIFO order if b is base of queue and a task
* can be claimed without contention. Specialized versions
* appear in ForkJoinPool methods scan and tryHelpStealer.
*/
final ForkJoinTask<?> pollAt(int b) {
ForkJoinTask<?> t; ForkJoinTask<?>[] a;
if ((a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
base == b && U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
return t;
}
}
return null;
}
/**
* Takes next task, if one exists, in FIFO order.
*/
final ForkJoinTask<?> poll() {
ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
while ((b = base) - top < 0 && (a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t != null) {
if (U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
return t;
}
}
else if (base == b) {
if (b + 1 == top)
break;
Thread.yield(); // wait for lagging update (very rare)
}
}
return null;
}
/**
* Takes next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> nextLocalTask() {
return mode == 0 ? pop() : poll();
}
/**
* Returns next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> peek() {
ForkJoinTask<?>[] a = array; int m;
if (a == null || (m = a.length - 1) < 0)
return null;
int i = mode == 0 ? top - 1 : base;
int j = ((i & m) << ASHIFT) + ABASE;
return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
}
/**
* Pops the given task only if it is at the current top.
* (A shared version is available only via FJP.tryExternalUnpush)
*/
final boolean tryUnpush(ForkJoinTask<?> t) {
ForkJoinTask<?>[] a; int s;
if ((a = array) != null && (s = top) != base &&
U.compareAndSwapObject
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
top = s;
return true;
}
return false;
}
/**
* Removes and cancels all known tasks, ignoring any exceptions.
*/
final void cancelAll() {
ForkJoinTask.cancelIgnoringExceptions(currentJoin);
ForkJoinTask.cancelIgnoringExceptions(currentSteal);
for (ForkJoinTask<?> t; (t = poll()) != null; )
ForkJoinTask.cancelIgnoringExceptions(t);
}
// Specialized execution methods
/**
* Polls and runs tasks until empty.
*/
final void pollAndExecAll() {
for (ForkJoinTask<?> t; (t = poll()) != null;)
t.doExec();
}
/**
* Executes a top-level task and any local tasks remaining
* after execution.
*/
final void runTask(ForkJoinTask<?> task) {
if ((currentSteal = task) != null) {
task.doExec();
ForkJoinTask<?>[] a = array;
int md = mode;
++nsteals;
currentSteal = null;
if (md != 0)
pollAndExecAll();
else if (a != null) {
int s, m = a.length - 1;
while ((s = top - 1) - base >= 0) {
long i = ((m & s) << ASHIFT) + ABASE;
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObject(a, i);
if (t == null)
break;
if (U.compareAndSwapObject(a, i, t, null)) {
top = s;
t.doExec();
}
}
}
}
}
/**
* If present, removes from queue and executes the given task,
* or any other cancelled task. Returns (true) on any CAS
* or consistency check failure so caller can retry.
*
* @return false if no progress can be made, else true
*/
final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
boolean stat;
ForkJoinTask<?>[] a; int m, s, b, n;
if (task != null && (a = array) != null && (m = a.length - 1) >= 0 &&
(n = (s = top) - (b = base)) > 0) {
boolean removed = false, empty = true;
stat = true;
for (ForkJoinTask<?> t;;) { // traverse from s to b
long j = ((--s & m) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObject(a, j);
if (t == null) // inconsistent length
break;
else if (t == task) {
if (s + 1 == top) { // pop
if (!U.compareAndSwapObject(a, j, task, null))
break;
top = s;
removed = true;
}
else if (base == b) // replace with proxy
removed = U.compareAndSwapObject(a, j, task,
new EmptyTask());
break;
}
else if (t.status >= 0)
empty = false;
else if (s + 1 == top) { // pop and throw away
if (U.compareAndSwapObject(a, j, t, null))
top = s;
break;
}
if (--n == 0) {
if (!empty && base == b)
stat = false;
break;
}
}
if (removed)
task.doExec();
}
else
stat = false;
return stat;
}
/**
* Tries to poll for and execute the given task or any other
* task in its CountedCompleter computation.
*/
final boolean pollAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int b; Object o; CountedCompleter<?> t, r;
if ((b = base) - top < 0 && (a = array) != null) {
long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((o = U.getObjectVolatile(a, j)) == null)
return true; // retry
if (o instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
t.doExec();
}
return true;
}
else if ((r = r.completer) == null)
break; // not part of root computation
}
}
}
return false;
}
/**
* Tries to pop and execute the given task or any other task
* in its CountedCompleter computation.
*/
final boolean externalPopAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
if (base - (s = top) < 0 && (a = array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (U.compareAndSwapInt(this, QLOCK, 0, 1)) {
if (top == s && array == a &&
U.compareAndSwapObject(a, j, t, null)) {
top = s - 1;
qlock = 0;
t.doExec();
}
else
qlock = 0;
}
return true;
}
else if ((r = r.completer) == null)
break;
}
}
}
return false;
}
/**
* Internal version
*/
final boolean internalPopAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
if (base - (s = top) < 0 && (a = array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (U.compareAndSwapObject(a, j, t, null)) {
top = s - 1;
t.doExec();
}
return true;
}
else if ((r = r.completer) == null)
break;
}
}
}
return false;
}
/**
* Returns true if owned and not known to be blocked.
*/
final boolean isApparentlyUnblocked() {
Thread wt; Thread.State s;
return (eventCount >= 0 &&
(wt = owner) != null &&
(s = wt.getState()) != Thread.State.BLOCKED &&
s != Thread.State.WAITING &&
s != Thread.State.TIMED_WAITING);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long QBASE;
private static final long QLOCK;
private static final int ABASE;
private static final int ASHIFT;
static {
try {
U = getUnsafe();
Class<?> k = WorkQueue.class;
Class<?> ak = ForkJoinTask[].class;
QBASE = U.objectFieldOffset
(k.getDeclaredField("base"));
QLOCK = U.objectFieldOffset
(k.getDeclaredField("qlock"));
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
}
// static fields (initialized in static initializer below)
/**
* Per-thread submission bookkeeping. Shared across all pools
* to reduce ThreadLocal pollution and because random motion
* to avoid contention in one pool is likely to hold for others.
* Lazily initialized on first submission (but null-checked
* in other contexts to avoid unnecessary initialization).
*/
static final ThreadLocal<Submitter> submitters;
/**
* Creates a new ForkJoinWorkerThread. This factory is used unless
* overridden in ForkJoinPool constructors.
*/
public static final ForkJoinWorkerThreadFactory
defaultForkJoinWorkerThreadFactory;
/**
* Permission required for callers of methods that may start or
* kill threads.
*/
private static final RuntimePermission modifyThreadPermission;
/**
* Common (static) pool. Non-null for public use unless a static
* construction exception, but internal usages null-check on use
* to paranoically avoid potential initialization circularities
* as well as to simplify generated code.
*/
static final ForkJoinPool common;
/**
* Common pool parallelism. To allow simpler use and management
* when common pool threads are disabled, we allow the underlying
* common.parallelism field to be zero, but in that case still report
* parallelism as 1 to reflect resulting caller-runs mechanics.
*/
static final int commonParallelism;
/**
* Sequence number for creating workerNamePrefix.
*/
private static int poolNumberSequence;
/**
* Returns the next sequence number. We don't expect this to
* ever contend, so use simple builtin sync.
*/
private static final synchronized int nextPoolId() {
return ++poolNumberSequence;
}
// static constants
/**
* Initial timeout value (in nanoseconds) for the thread
* triggering quiescence to park waiting for new work. On timeout,
* the thread will instead try to shrink the number of
* workers. The value should be large enough to avoid overly
* aggressive shrinkage during most transient stalls (long GCs
* etc).
*/
private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
/**
* Timeout value when there are more threads than parallelism level
*/
private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
/**
* Tolerance for idle timeouts, to cope with timer undershoots
*/
private static final long TIMEOUT_SLOP = 2000000L;
/**
* The maximum stolen->joining link depth allowed in method
* tryHelpStealer. Must be a power of two. Depths for legitimate
* chains are unbounded, but we use a fixed constant to avoid
* (otherwise unchecked) cycles and to bound staleness of
* traversal parameters at the expense of sometimes blocking when
* we could be helping.
*/
private static final int MAX_HELP = 64;
/**
* Increment for seed generators. See class ThreadLocal for
* explanation.
*/
private static final int SEED_INCREMENT = 0x61c88647;
/*
* Bits and masks for control variables
*
* Field ctl is a long packed with:
* AC: Number of active running workers minus target parallelism (16 bits)
* TC: Number of total workers minus target parallelism (16 bits)
* ST: true if pool is terminating (1 bit)
* EC: the wait count of top waiting thread (15 bits)
* ID: poolIndex of top of Treiber stack of waiters (16 bits)
*
* When convenient, we can extract the upper 32 bits of counts and
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
* (int)ctl. The ec field is never accessed alone, but always
* together with id and st. The offsets of counts by the target
* parallelism and the positionings of fields makes it possible to
* perform the most common checks via sign tests of fields: When
* ac is negative, there are not enough active workers, when tc is
* negative, there are not enough total workers, and when e is
* negative, the pool is terminating. To deal with these possibly
* negative fields, we use casts in and out of "short" and/or
* signed shifts to maintain signedness.
*
* When a thread is queued (inactivated), its eventCount field is
* set negative, which is the only way to tell if a worker is
* prevented from executing tasks, even though it must continue to
* scan for them to avoid queuing races. Note however that
* eventCount updates lag releases so usage requires care.
*
* Field plock is an int packed with:
* SHUTDOWN: true if shutdown is enabled (1 bit)
* SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
* SIGNAL: set when threads may be waiting on the lock (1 bit)
*
* The sequence number enables simple consistency checks:
* Staleness of read-only operations on the workQueues array can
* be checked by comparing plock before vs after the reads.
*/
// bit positions/shifts for fields
private static final int AC_SHIFT = 48;
private static final int TC_SHIFT = 32;
private static final int ST_SHIFT = 31;
private static final int EC_SHIFT = 16;
// bounds
private static final int SMASK = 0xffff; // short bits
private static final int MAX_CAP = 0x7fff; // max #workers - 1
private static final int EVENMASK = 0xfffe; // even short bits
private static final int SQMASK = 0x007e; // max 64 (even) slots
private static final int SHORT_SIGN = 1 << 15;
private static final int INT_SIGN = 1 << 31;
// masks
private static final long STOP_BIT = 0x0001L << ST_SHIFT;
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
// units for incrementing and decrementing
private static final long TC_UNIT = 1L << TC_SHIFT;
private static final long AC_UNIT = 1L << AC_SHIFT;
// masks and units for dealing with u = (int)(ctl >>> 32)
private static final int UAC_SHIFT = AC_SHIFT - 32;
private static final int UTC_SHIFT = TC_SHIFT - 32;
private static final int UAC_MASK = SMASK << UAC_SHIFT;
private static final int UTC_MASK = SMASK << UTC_SHIFT;
private static final int UAC_UNIT = 1 << UAC_SHIFT;
private static final int UTC_UNIT = 1 << UTC_SHIFT;
// masks and units for dealing with e = (int)ctl
private static final int E_MASK = 0x7fffffff; // no STOP_BIT
private static final int E_SEQ = 1 << EC_SHIFT;
// plock bits
private static final int SHUTDOWN = 1 << 31;
private static final int PL_LOCK = 2;
private static final int PL_SIGNAL = 1;
private static final int PL_SPINS = 1 << 8;
// access mode for WorkQueue
static final int LIFO_QUEUE = 0;
static final int FIFO_QUEUE = 1;
static final int SHARED_QUEUE = -1;
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
// Instance fields
volatile long stealCount; // collects worker counts
volatile long ctl; // main pool control
volatile int plock; // shutdown status and seqLock
volatile int indexSeed; // worker/submitter index seed
final short parallelism; // parallelism level
final short mode; // LIFO/FIFO
WorkQueue[] workQueues; // main registry
final ForkJoinWorkerThreadFactory factory;
final UncaughtExceptionHandler ueh; // per-worker UEH
final String workerNamePrefix; // to create worker name string
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b;
/**
* Acquires the plock lock to protect worker array and related
* updates. This method is called only if an initial CAS on plock
* fails. This acts as a spinlock for normal cases, but falls back
* to builtin monitor to block when (rarely) needed. This would be
* a terrible idea for a highly contended lock, but works fine as
* a more conservative alternative to a pure spinlock.
*/
private int acquirePlock() {
int spins = PL_SPINS, ps, nps;
for (;;) {
if (((ps = plock) & PL_LOCK) == 0 &&
U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
return nps;
else if (spins >= 0) {
if (ThreadLocalRandom.current().nextInt() >= 0)
--spins;
}
else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
synchronized (this) {
if ((plock & PL_SIGNAL) != 0) {
try {
wait();
} catch (InterruptedException ie) {
try {
Thread.currentThread().interrupt();
} catch (SecurityException ignore) {
}
}
}
else
notifyAll();
}
}
}
}
/**
* Unlocks and signals any thread waiting for plock. Called only
* when CAS of seq value for unlock fails.
*/
private void releasePlock(int ps) {
plock = ps;
synchronized (this) { notifyAll(); }
}
/**
* Tries to create and start one worker if fewer than target
* parallelism level exist. Adjusts counts etc on failure.
*/
private void tryAddWorker() {
long c; int u, e;
while ((u = (int)((c = ctl) >>> 32)) < 0 &&
(u & SHORT_SIGN) != 0 && (e = (int)c) >= 0) {
long nc = ((long)(((u + UTC_UNIT) & UTC_MASK) |
((u + UAC_UNIT) & UAC_MASK)) << 32) | (long)e;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
ForkJoinWorkerThreadFactory fac;
Throwable ex = null;
ForkJoinWorkerThread wt = null;
try {
if ((fac = factory) != null &&
(wt = fac.newThread(this)) != null) {
wt.start();
break;
}
} catch (Throwable rex) {
ex = rex;
}
deregisterWorker(wt, ex);
break;
}
}
}
// Registering and deregistering workers
/**
* Callback from ForkJoinWorkerThread to establish and record its
* WorkQueue. To avoid scanning bias due to packing entries in
* front of the workQueues array, we treat the array as a simple
* power-of-two hash table using per-thread seed as hash,
* expanding as needed.
*
* @param wt the worker thread
* @return the worker's queue
*/
final WorkQueue registerWorker(ForkJoinWorkerThread wt) {
UncaughtExceptionHandler handler; WorkQueue[] ws; int s, ps;
wt.setDaemon(true);
if ((handler = ueh) != null)
wt.setUncaughtExceptionHandler(handler);
do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed,
s += SEED_INCREMENT) ||
s == 0); // skip 0
WorkQueue w = new WorkQueue(this, wt, mode, s);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
try {
if ((ws = workQueues) != null) { // skip if shutting down
int n = ws.length, m = n - 1;
int r = (s << 1) | 1; // use odd-numbered indices
if (ws[r &= m] != null) { // collision
int probes = 0; // step by approx half size
int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
while (ws[r = (r + step) & m] != null) {
if (++probes >= n) {
workQueues = ws = Arrays.copyOf(ws, n <<= 1);
m = n - 1;
probes = 0;
}
}
}
w.poolIndex = (short)r;
w.eventCount = r; // volatile write orders
ws[r] = w;
}
} finally {
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
wt.setName(workerNamePrefix.concat(Integer.toString(w.poolIndex >>> 1)));
return w;
}
/**
* Final callback from terminating worker, as well as upon failure
* to construct or start a worker. Removes record of worker from
* array, and adjusts counts. If pool is shutting down, tries to
* complete termination.
*
* @param wt the worker thread, or null if construction failed
* @param ex the exception causing failure, or null if none
*/
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
WorkQueue w = null;
if (wt != null && (w = wt.workQueue) != null) {
int ps; long sc;
w.qlock = -1; // ensure set
do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
sc = stealCount,
sc + w.nsteals));
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
try {
int idx = w.poolIndex;
WorkQueue[] ws = workQueues;
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
ws[idx] = null;
} finally {
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
}
long c; // adjust ctl counts
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
((c - TC_UNIT) & TC_MASK) |
(c & ~(AC_MASK|TC_MASK)))));
if (!tryTerminate(false, false) && w != null && w.array != null) {
w.cancelAll(); // cancel remaining tasks
WorkQueue[] ws; WorkQueue v; Thread p; int u, i, e;
while ((u = (int)((c = ctl) >>> 32)) < 0 && (e = (int)c) >= 0) {
if (e > 0) { // activate or create replacement
if ((ws = workQueues) == null ||
(i = e & SMASK) >= ws.length ||
(v = ws[i]) == null)
break;
long nc = (((long)(v.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (v.eventCount != (e | INT_SIGN))
break;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
v.eventCount = (e + E_SEQ) & E_MASK;
if ((p = v.parker) != null)
U.unpark(p);
break;
}
}
else {
if ((short)u < 0)
tryAddWorker();
break;
}
}
}
if (ex == null) // help clean refs on way out
ForkJoinTask.helpExpungeStaleExceptions();
else // rethrow
ForkJoinTask.rethrow(ex);
}
// Submissions
/**
* Per-thread records for threads that submit to pools. Currently
* holds only pseudo-random seed / index that is used to choose
* submission queues in method externalPush. In the future, this may
* also incorporate a means to implement different task rejection
* and resubmission policies.
*
* Seeds for submitters and workers/workQueues work in basically
* the same way but are initialized and updated using slightly
* different mechanics. Both are initialized using the same
* approach as in class ThreadLocal, where successive values are
* unlikely to collide with previous values. Seeds are then
* randomly modified upon collisions using xorshifts, which
* requires a non-zero seed.
*/
static final class Submitter {
int seed;
Submitter(int s) { seed = s; }
}
/**
* Unless shutting down, adds the given task to a submission queue
* at submitter's current queue index (modulo submission
* range). Only the most common path is directly handled in this
* method. All others are relayed to fullExternalPush.
*
* @param task the task. Caller must ensure non-null.
*/
final void externalPush(ForkJoinTask<?> task) {
Submitter z = submitters.get();
WorkQueue q; int r, m, s, n, am; ForkJoinTask<?>[] a;
int ps = plock;
WorkQueue[] ws = workQueues;
if (z != null && ps > 0 && ws != null && (m = (ws.length - 1)) >= 0 &&
(q = ws[m & (r = z.seed) & SQMASK]) != null && r != 0 &&
U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
if ((a = q.array) != null &&
(am = a.length - 1) > (n = (s = q.top) - q.base)) {
int j = ((am & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
q.top = s + 1; // push on to deque
q.qlock = 0;
if (n <= 1)
signalWork(ws, q);
return;
}
q.qlock = 0;
}
fullExternalPush(task);
}
/**
* Full version of externalPush. This method is called, among
* other times, upon the first submission of the first task to the
* pool, so must perform secondary initialization. It also
* detects first submission by an external thread by looking up
* its ThreadLocal, and creates a new shared queue if the one at
* index if empty or contended. The plock lock body must be
* exception-free (so no try/finally) so we optimistically
* allocate new queues outside the lock and throw them away if
* (very rarely) not needed.
*
* Secondary initialization occurs when plock is zero, to create
* workQueue array and set plock to a valid value. This lock body
* must also be exception-free. Because the plock seq value can
* eventually wrap around zero, this method harmlessly fails to
* reinitialize if workQueues exists, while still advancing plock.
*/
private void fullExternalPush(ForkJoinTask<?> task) {
int r = 0; // random index seed
for (Submitter z = submitters.get();;) {
WorkQueue[] ws; WorkQueue q; int ps, m, k;
if (z == null) {
if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed,
r += SEED_INCREMENT) && r != 0)
submitters.set(z = new Submitter(r));
}
else if (r == 0) { // move to a different index
r = z.seed;
r ^= r << 13; // same xorshift as WorkQueues
r ^= r >>> 17;
z.seed = r ^= (r << 5);
}
if ((ps = plock) < 0)
throw new RejectedExecutionException();
else if (ps == 0 || (ws = workQueues) == null ||
(m = ws.length - 1) < 0) { // initialize workQueues
int p = parallelism; // find power of two table size
int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4;
n |= n >>> 8; n |= n >>> 16; n = (n + 1) << 1;
WorkQueue[] nws = ((ws = workQueues) == null || ws.length == 0 ?
new WorkQueue[n] : null);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
if (((ws = workQueues) == null || ws.length == 0) && nws != null)
workQueues = nws;
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
else if ((q = ws[k = r & m & SQMASK]) != null) {
if (q.qlock == 0 && U.compareAndSwapInt(q, QLOCK, 0, 1)) {
ForkJoinTask<?>[] a = q.array;
int s = q.top;
boolean submitted = false;
try { // locked version of push
if ((a != null && a.length > s + 1 - q.base) ||
(a = q.growArray()) != null) { // must presize
int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
q.top = s + 1;
submitted = true;
}
} finally {
q.qlock = 0; // unlock
}
if (submitted) {
signalWork(ws, q);
return;
}
}
r = 0; // move on failure
}
else if (((ps = plock) & PL_LOCK) == 0) { // create new queue
q = new WorkQueue(this, null, SHARED_QUEUE, r);
q.poolIndex = (short)k;
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
if ((ws = workQueues) != null && k < ws.length && ws[k] == null)
ws[k] = q;
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
else
r = 0;
}
}
// Maintaining ctl counts
/**
* Increments active count; mainly called upon return from blocking.
*/
final void incrementActiveCount() {
long c;
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, ((c & ~AC_MASK) |
((c & AC_MASK) + AC_UNIT))));
}
/**
* Tries to create or activate a worker if too few are active.
*
* @param ws the worker array to use to find signallees
* @param q if non-null, the queue holding tasks to be processed
*/
final void signalWork(WorkQueue[] ws, WorkQueue q) {
for (;;) {
long c; int e, u, i; WorkQueue w; Thread p;
if ((u = (int)((c = ctl) >>> 32)) >= 0)
break;
if ((e = (int)c) <= 0) {
if ((short)u < 0)
tryAddWorker();
break;
}
if (ws == null || ws.length <= (i = e & SMASK) ||
(w = ws[i]) == null)
break;
long nc = (((long)(w.nextWait & E_MASK)) |
((long)(u + UAC_UNIT)) << 32);
int ne = (e + E_SEQ) & E_MASK;
if (w.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = ne;
if ((p = w.parker) != null)
U.unpark(p);
break;
}
if (q != null && q.base >= q.top)
break;
}
}
// Scanning for tasks
/**
* Top-level runloop for workers, called by ForkJoinWorkerThread.run.
*/
final void runWorker(WorkQueue w) {
w.growArray(); // allocate queue
for (int r = w.hint; scan(w, r) == 0; ) {
r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift
}
}
/**
* Scans for and, if found, runs one task, else possibly
* inactivates the worker. This method operates on single reads of
* volatile state and is designed to be re-invoked continuously,
* in part because it returns upon detecting inconsistencies,
* contention, or state changes that indicate possible success on
* re-invocation.
*
* The scan searches for tasks across queues starting at a random
* index, checking each at least twice. The scan terminates upon
* either finding a non-empty queue, or completing the sweep. If
* the worker is not inactivated, it takes and runs a task from
* this queue. Otherwise, if not activated, it tries to activate
* itself or some other worker by signalling. On failure to find a
* task, returns (for retry) if pool state may have changed during
* an empty scan, or tries to inactivate if active, else possibly
* blocks or terminates via method awaitWork.
*
* @param w the worker (via its WorkQueue)
* @param r a random seed
* @return worker qlock status if would have waited, else 0
*/
private final int scan(WorkQueue w, int r) {
WorkQueue[] ws; int m;
long c = ctl; // for consistency check
if ((ws = workQueues) != null && (m = ws.length - 1) >= 0 && w != null) {
for (int j = m + m + 1, ec = w.eventCount;;) {
WorkQueue q; int b, e; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
if ((q = ws[(r - j) & m]) != null &&
(b = q.base) - q.top < 0 && (a = q.array) != null) {
long i = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((t = ((ForkJoinTask<?>)
U.getObjectVolatile(a, i))) != null) {
if (ec < 0)
helpRelease(c, ws, w, q, b);
else if (q.base == b &&
U.compareAndSwapObject(a, i, t, null)) {
U.putOrderedInt(q, QBASE, b + 1);
if ((b + 1) - q.top < 0)
signalWork(ws, q);
w.runTask(t);
}
}
break;
}
else if (--j < 0) {
if ((ec | (e = (int)c)) < 0) // inactive or terminating
return awaitWork(w, c, ec);
else if (ctl == c) { // try to inactivate and enqueue
long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK));
w.nextWait = e;
w.eventCount = ec | INT_SIGN;
if (!U.compareAndSwapLong(this, CTL, c, nc))
w.eventCount = ec; // back out
}
break;
}
}
}
return 0;
}
/**
* A continuation of scan(), possibly blocking or terminating
* worker w. Returns without blocking if pool state has apparently
* changed since last invocation. Also, if inactivating w has
* caused the pool to become quiescent, checks for pool
* termination, and, so long as this is not the only worker, waits
* for event for up to a given duration. On timeout, if ctl has
* not changed, terminates the worker, which will in turn wake up
* another worker to possibly repeat this process.
*
* @param w the calling worker
* @param c the ctl value on entry to scan
* @param ec the worker's eventCount on entry to scan
*/
private final int awaitWork(WorkQueue w, long c, int ec) {
int stat, ns; long parkTime, deadline;
if ((stat = w.qlock) >= 0 && w.eventCount == ec && ctl == c &&
!Thread.interrupted()) {
int e = (int)c;
int u = (int)(c >>> 32);
int d = (u >> UAC_SHIFT) + parallelism; // active count
if (e < 0 || (d <= 0 && tryTerminate(false, false)))
stat = w.qlock = -1; // pool is terminating
else if ((ns = w.nsteals) != 0) { // collect steals and retry
long sc;
w.nsteals = 0;
do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
sc = stealCount, sc + ns));
}
else {
long pc = ((d > 0 || ec != (e | INT_SIGN)) ? 0L :
((long)(w.nextWait & E_MASK)) | // ctl to restore
((long)(u + UAC_UNIT)) << 32);
if (pc != 0L) { // timed wait if last waiter
int dc = -(short)(c >>> TC_SHIFT);
parkTime = (dc < 0 ? FAST_IDLE_TIMEOUT:
(dc + 1) * IDLE_TIMEOUT);
deadline = System.nanoTime() + parkTime - TIMEOUT_SLOP;
}
else
parkTime = deadline = 0L;
if (w.eventCount == ec && ctl == c) {
Thread wt = Thread.currentThread();
U.putObject(wt, PARKBLOCKER, this);
w.parker = wt; // emulate LockSupport.park
if (w.eventCount == ec && ctl == c)
U.park(false, parkTime); // must recheck before park
w.parker = null;
U.putObject(wt, PARKBLOCKER, null);
if (parkTime != 0L && ctl == c &&
deadline - System.nanoTime() <= 0L &&
U.compareAndSwapLong(this, CTL, c, pc))
stat = w.qlock = -1; // shrink pool
}
}
}
return stat;
}
/**
* Possibly releases (signals) a worker. Called only from scan()
* when a worker with apparently inactive status finds a non-empty
* queue. This requires revalidating all of the associated state
* from caller.
*/
private final void helpRelease(long c, WorkQueue[] ws, WorkQueue w,
WorkQueue q, int b) {
WorkQueue v; int e, i; Thread p;
if (w != null && w.eventCount < 0 && (e = (int)c) > 0 &&
ws != null && ws.length > (i = e & SMASK) &&
(v = ws[i]) != null && ctl == c) {
long nc = (((long)(v.nextWait & E_MASK)) |
((long)((int)(c >>> 32) + UAC_UNIT)) << 32);
int ne = (e + E_SEQ) & E_MASK;
if (q != null && q.base == b && w.eventCount < 0 &&
v.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, c, nc)) {
v.eventCount = ne;
if ((p = v.parker) != null)
U.unpark(p);
}
}
}
/**
* Tries to locate and execute tasks for a stealer of the given
* task, or in turn one of its stealers, Traces currentSteal ->
* currentJoin links looking for a thread working on a descendant
* of the given task and with a non-empty queue to steal back and
* execute tasks from. The first call to this method upon a
* waiting join will often entail scanning/search, (which is OK
* because the joiner has nothing better to do), but this method
* leaves hints in workers to speed up subsequent calls. The
* implementation is very branchy to cope with potential
* inconsistencies or loops encountering chains that are stale,
* unknown, or so long that they are likely cyclic.
*
* @param joiner the joining worker
* @param task the task to join
* @return 0 if no progress can be made, negative if task
* known complete, else positive
*/
private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
int stat = 0, steps = 0; // bound to avoid cycles
if (task != null && joiner != null &&
joiner.base - joiner.top >= 0) { // hoist checks
restart: for (;;) {
ForkJoinTask<?> subtask = task; // current target
for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
WorkQueue[] ws; int m, s, h;
if ((s = task.status) < 0) {
stat = s;
break restart;
}
if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
break restart; // shutting down
if ((v = ws[h = (j.hint | 1) & m]) == null ||
v.currentSteal != subtask) {
for (int origin = h;;) { // find stealer
if (((h = (h + 2) & m) & 15) == 1 &&
(subtask.status < 0 || j.currentJoin != subtask))
continue restart; // occasional staleness check
if ((v = ws[h]) != null &&
v.currentSteal == subtask) {
j.hint = h; // save hint
break;
}
if (h == origin)
break restart; // cannot find stealer
}
}
for (;;) { // help stealer or descend to its stealer
ForkJoinTask[] a; int b;
if (subtask.status < 0) // surround probes with
continue restart; // consistency checks
if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
ForkJoinTask<?> t =
(ForkJoinTask<?>)U.getObjectVolatile(a, i);
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
stat = 1; // apparent progress
if (v.base == b) {
if (t == null)
break restart;
if (U.compareAndSwapObject(a, i, t, null)) {
U.putOrderedInt(v, QBASE, b + 1);
ForkJoinTask<?> ps = joiner.currentSteal;
int jt = joiner.top;
do {
joiner.currentSteal = t;
t.doExec(); // clear local tasks too
} while (task.status >= 0 &&
joiner.top != jt &&
(t = joiner.pop()) != null);
joiner.currentSteal = ps;
break restart;
}
}
}
else { // empty -- try to descend
ForkJoinTask<?> next = v.currentJoin;
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
else if (next == null || ++steps == MAX_HELP)
break restart; // dead-end or maybe cyclic
else {
subtask = next;
j = v;
break;
}
}
}
}
}
}
return stat;
}
/**
* Analog of tryHelpStealer for CountedCompleters. Tries to steal
* and run tasks within the target's computation.
*
* @param task the task to join
*/
private int helpComplete(WorkQueue joiner, CountedCompleter<?> task) {
WorkQueue[] ws; int m;
int s = 0;
if ((ws = workQueues) != null && (m = ws.length - 1) >= 0 &&
joiner != null && task != null) {
int j = joiner.poolIndex;
int scans = m + m + 1;
long c = 0L; // for stability check
for (int k = scans; ; j += 2) {
WorkQueue q;
if ((s = task.status) < 0)
break;
else if (joiner.internalPopAndExecCC(task))
k = scans;
else if ((s = task.status) < 0)
break;
else if ((q = ws[j & m]) != null && q.pollAndExecCC(task))
k = scans;
else if (--k < 0) {
if (c == (c = ctl))
break;
k = scans;
}
}
}
return s;
}
/**
* Tries to decrement active count (sometimes implicitly) and
* possibly release or create a compensating worker in preparation
* for blocking. Fails on contention or termination. Otherwise,
* adds a new thread if no idle workers are available and pool
* may become starved.
*
* @param c the assumed ctl value
*/
final boolean tryCompensate(long c) {
WorkQueue[] ws = workQueues;
int pc = parallelism, e = (int)c, m, tc;
if (ws != null && (m = ws.length - 1) >= 0 && e >= 0 && ctl == c) {
WorkQueue w = ws[e & m];
if (e != 0 && w != null) {
Thread p;
long nc = ((long)(w.nextWait & E_MASK) |
(c & (AC_MASK|TC_MASK)));
int ne = (e + E_SEQ) & E_MASK;
if (w.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = ne;
if ((p = w.parker) != null)
U.unpark(p);
return true; // replace with idle worker
}
}
else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 &&
(int)(c >> AC_SHIFT) + pc > 1) {
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc))
return true; // no compensation
}
else if (tc + pc < MAX_CAP) {
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc)) {
ForkJoinWorkerThreadFactory fac;
Throwable ex = null;
ForkJoinWorkerThread wt = null;
try {
if ((fac = factory) != null &&
(wt = fac.newThread(this)) != null) {
wt.start();
return true;
}
} catch (Throwable rex) {
ex = rex;
}
deregisterWorker(wt, ex); // clean up and return false
}
}
}
return false;
}
/**
* Helps and/or blocks until the given task is done.
*
* @param joiner the joining worker
* @param task the task
* @return task status on exit
*/
final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
int s = 0;
if (task != null && (s = task.status) >= 0 && joiner != null) {
ForkJoinTask<?> prevJoin = joiner.currentJoin;
joiner.currentJoin = task;
do {} while (joiner.tryRemoveAndExec(task) && // process local tasks
(s = task.status) >= 0);
if (s >= 0 && (task instanceof CountedCompleter))
s = helpComplete(joiner, (CountedCompleter<?>)task);
long cc = 0; // for stability checks
while (s >= 0 && (s = task.status) >= 0) {
if ((s = tryHelpStealer(joiner, task)) == 0 &&
(s = task.status) >= 0) {
if (!tryCompensate(cc))
cc = ctl;
else {
if (task.trySetSignal() && (s = task.status) >= 0) {
synchronized (task) {
if (task.status >= 0) {
try { // see ForkJoinTask
task.wait(); // for explanation
} catch (InterruptedException ie) {
}
}
else
task.notifyAll();
}
}
long c; // reactivate
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl,
((c & ~AC_MASK) |
((c & AC_MASK) + AC_UNIT))));
}
}
}
joiner.currentJoin = prevJoin;
}
return s;
}
/**
* Stripped-down variant of awaitJoin used by timed joins. Tries
* to help join only while there is continuous progress. (Caller
* will then enter a timed wait.)
*
* @param joiner the joining worker
* @param task the task
*/
final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
int s;
if (joiner != null && task != null && (s = task.status) >= 0) {
ForkJoinTask<?> prevJoin = joiner.currentJoin;
joiner.currentJoin = task;
do {} while (joiner.tryRemoveAndExec(task) && // process local tasks
(s = task.status) >= 0);
if (s >= 0) {
if (task instanceof CountedCompleter)
helpComplete(joiner, (CountedCompleter<?>)task);
do {} while (task.status >= 0 &&
tryHelpStealer(joiner, task) > 0);
}
joiner.currentJoin = prevJoin;
}
}
/**
* Returns a (probably) non-empty steal queue, if one is found
* during a scan, else null. This method must be retried by
* caller if, by the time it tries to use the queue, it is empty.
*/
private WorkQueue findNonEmptyStealQueue() {
int r = ThreadLocalRandom.current().nextInt();
for (;;) {
int ps = plock, m; WorkQueue[] ws; WorkQueue q;
if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) {
for (int j = (m + 1) << 2; j >= 0; --j) {
if ((q = ws[(((r - j) << 1) | 1) & m]) != null &&
q.base - q.top < 0)
return q;
}
}
if (plock == ps)
return null;
}
}
/**
* Runs tasks until {@code isQuiescent()}. We piggyback on
* active count ctl maintenance, but rather than blocking
* when tasks cannot be found, we rescan until all others cannot
* find tasks either.
*/
final void helpQuiescePool(WorkQueue w) {
ForkJoinTask<?> ps = w.currentSteal;
for (boolean active = true;;) {
long c; WorkQueue q; ForkJoinTask<?> t; int b;
while ((t = w.nextLocalTask()) != null)
t.doExec();
if ((q = findNonEmptyStealQueue()) != null) {
if (!active) { // re-establish active count
active = true;
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl,
((c & ~AC_MASK) |
((c & AC_MASK) + AC_UNIT))));
}
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
(w.currentSteal = t).doExec();
w.currentSteal = ps;
}
}
else if (active) { // decrement active count without queuing
long nc = ((c = ctl) & ~AC_MASK) | ((c & AC_MASK) - AC_UNIT);
if ((int)(nc >> AC_SHIFT) + parallelism == 0)
break; // bypass decrement-then-increment
if (U.compareAndSwapLong(this, CTL, c, nc))
active = false;
}
else if ((int)((c = ctl) >> AC_SHIFT) + parallelism <= 0 &&
U.compareAndSwapLong
(this, CTL, c, ((c & ~AC_MASK) |
((c & AC_MASK) + AC_UNIT))))
break;
}
}
/**
* Gets and removes a local or stolen task for the given worker.
*
* @return a task, if available
*/
final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
for (ForkJoinTask<?> t;;) {
WorkQueue q; int b;
if ((t = w.nextLocalTask()) != null)
return t;
if ((q = findNonEmptyStealQueue()) == null)
return null;
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
return t;
}
}
/**
* Returns a cheap heuristic guide for task partitioning when
* programmers, frameworks, tools, or languages have little or no
* idea about task granularity. In essence by offering this
* method, we ask users only about tradeoffs in overhead vs
* expected throughput and its variance, rather than how finely to
* partition tasks.
*
* In a steady state strict (tree-structured) computation, each
* thread makes available for stealing enough tasks for other
* threads to remain active. Inductively, if all threads play by
* the same rules, each thread should make available only a
* constant number of tasks.
*
* The minimum useful constant is just 1. But using a value of 1
* would require immediate replenishment upon each steal to
* maintain enough tasks, which is infeasible. Further,
* partitionings/granularities of offered tasks should minimize
* steal rates, which in general means that threads nearer the top
* of computation tree should generate more than those nearer the
* bottom. In perfect steady state, each thread is at
* approximately the same level of computation tree. However,
* producing extra tasks amortizes the uncertainty of progress and
* diffusion assumptions.
*
* So, users will want to use values larger (but not much larger)
* than 1 to both smooth over transient shortages and hedge
* against uneven progress; as traded off against the cost of
* extra task overhead. We leave the user to pick a threshold
* value to compare with the results of this call to guide
* decisions, but recommend values such as 3.
*
* When all threads are active, it is on average OK to estimate
* surplus strictly locally. In steady-state, if one thread is
* maintaining say 2 surplus tasks, then so are others. So we can
* just use estimated queue length. However, this strategy alone
* leads to serious mis-estimates in some non-steady-state
* conditions (ramp-up, ramp-down, other stalls). We can detect
* many of these by further considering the number of "idle"
* threads, that are known to have zero queued tasks, so
* compensate by a factor of (#idle/#active) threads.
*
* Note: The approximation of #busy workers as #active workers is
* not very good under current signalling scheme, and should be
* improved.
*/
static int getSurplusQueuedTaskCount() {
Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).parallelism;
int n = (q = wt.workQueue).top - q.base;
int a = (int)(pool.ctl >> AC_SHIFT) + p;
return n - (a > (p >>>= 1) ? 0 :
a > (p >>>= 1) ? 1 :
a > (p >>>= 1) ? 2 :
a > (p >>>= 1) ? 4 :
8);
}
return 0;
}
// Termination
/**
* Possibly initiates and/or completes termination. The caller
* triggering termination runs three passes through workQueues:
* (0) Setting termination status, followed by wakeups of queued
* workers; (1) cancelling all tasks; (2) interrupting lagging
* threads (likely in external tasks, but possibly also blocked in
* joins). Each pass repeats previous steps because of potential
* lagging thread creation.
*
* @param now if true, unconditionally terminate, else only
* if no work and no active workers
* @param enable if true, enable shutdown when next possible
* @return true if now terminating or terminated
*/
private boolean tryTerminate(boolean now, boolean enable) {
int ps;
if (this == common) // cannot shut down
return false;
if ((ps = plock) >= 0) { // enable by setting plock
if (!enable)
return false;
if ((ps & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = ((ps + PL_LOCK) & ~SHUTDOWN) | SHUTDOWN;
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
for (long c;;) {
if (((c = ctl) & STOP_BIT) != 0) { // already terminating
if ((short)(c >>> TC_SHIFT) + parallelism <= 0) {
synchronized (this) {
notifyAll(); // signal when 0 workers
}
}
return true;
}
if (!now) { // check if idle & no tasks
WorkQueue[] ws; WorkQueue w;
if ((int)(c >> AC_SHIFT) + parallelism > 0)
return false;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null &&
(!w.isEmpty() ||
((i & 1) != 0 && w.eventCount >= 0))) {
signalWork(ws, w);
return false;
}
}
}
}
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
for (int pass = 0; pass < 3; ++pass) {
WorkQueue[] ws; WorkQueue w; Thread wt;
if ((ws = workQueues) != null) {
int n = ws.length;
for (int i = 0; i < n; ++i) {
if ((w = ws[i]) != null) {
w.qlock = -1;
if (pass > 0) {
w.cancelAll();
if (pass > 1 && (wt = w.owner) != null) {
if (!wt.isInterrupted()) {
try {
wt.interrupt();
} catch (Throwable ignore) {
}
}
U.unpark(wt);
}
}
}
}
// Wake up workers parked on event queue
int i, e; long cc; Thread p;
while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
(i = e & SMASK) < n && i >= 0 &&
(w = ws[i]) != null) {
long nc = ((long)(w.nextWait & E_MASK) |
((cc + AC_UNIT) & AC_MASK) |
(cc & (TC_MASK|STOP_BIT)));
if (w.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, cc, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
w.qlock = -1;
if ((p = w.parker) != null)
U.unpark(p);
}
}
}
}
}
}
}
// external operations on common pool
/**
* Returns common pool queue for a thread that has submitted at
* least one task.
*/
static WorkQueue commonSubmitterQueue() {
Submitter z; ForkJoinPool p; WorkQueue[] ws; int m, r;
return ((z = submitters.get()) != null &&
(p = common) != null &&
(ws = p.workQueues) != null &&
(m = ws.length - 1) >= 0) ?
ws[m & z.seed & SQMASK] : null;
}
/**
* Tries to pop the given task from submitter's queue in common pool.
*/
final boolean tryExternalUnpush(ForkJoinTask<?> task) {
WorkQueue joiner; ForkJoinTask<?>[] a; int m, s;
Submitter z = submitters.get();
WorkQueue[] ws = workQueues;
boolean popped = false;
if (z != null && ws != null && (m = ws.length - 1) >= 0 &&
(joiner = ws[z.seed & m & SQMASK]) != null &&
joiner.base != (s = joiner.top) &&
(a = joiner.array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if (U.getObject(a, j) == task &&
U.compareAndSwapInt(joiner, QLOCK, 0, 1)) {
if (joiner.top == s && joiner.array == a &&
U.compareAndSwapObject(a, j, task, null)) {
joiner.top = s - 1;
popped = true;
}
joiner.qlock = 0;
}
}
return popped;
}
final int externalHelpComplete(CountedCompleter<?> task) {
WorkQueue joiner; int m, j;
Submitter z = submitters.get();
WorkQueue[] ws = workQueues;
int s = 0;
if (z != null && ws != null && (m = ws.length - 1) >= 0 &&
(joiner = ws[(j = z.seed) & m & SQMASK]) != null && task != null) {
int scans = m + m + 1;
long c = 0L; // for stability check
j |= 1; // poll odd queues
for (int k = scans; ; j += 2) {
WorkQueue q;
if ((s = task.status) < 0)
break;
else if (joiner.externalPopAndExecCC(task))
k = scans;
else if ((s = task.status) < 0)
break;
else if ((q = ws[j & m]) != null && q.pollAndExecCC(task))
k = scans;
else if (--k < 0) {
if (c == (c = ctl))
break;
k = scans;
}
}
}
return s;
}
// Exported methods
// Constructors
/**
* Creates a {@code ForkJoinPool} with parallelism equal to {@link
* java.lang.Runtime#availableProcessors}, using the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool() {
this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the indicated parallelism
* level, the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @param parallelism the parallelism level
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism) {
this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the given parameters.
*
* @param parallelism the parallelism level. For default value,
* use {@link java.lang.Runtime#availableProcessors}.
* @param factory the factory for creating new threads. For default value,
* use {@link #defaultForkJoinWorkerThreadFactory}.
* @param handler the handler for internal worker threads that
* terminate due to unrecoverable errors encountered while executing
* tasks. For default value, use {@code null}.
* @param asyncMode if true,
* establishes local first-in-first-out scheduling mode for forked
* tasks that are never joined. This mode may be more appropriate
* than default locally stack-based mode in applications in which
* worker threads only process event-style asynchronous tasks.
* For default value, use {@code false}.
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws NullPointerException if the factory is null
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism,
ForkJoinWorkerThreadFactory factory,
UncaughtExceptionHandler handler,
boolean asyncMode) {
this(checkParallelism(parallelism),
checkFactory(factory),
handler,
(asyncMode ? FIFO_QUEUE : LIFO_QUEUE),
"ForkJoinPool-" + nextPoolId() + "-worker-");
checkPermission();
}
private static int checkParallelism(int parallelism) {
if (parallelism <= 0 || parallelism > MAX_CAP)
throw new IllegalArgumentException();
return parallelism;
}
private static ForkJoinWorkerThreadFactory checkFactory
(ForkJoinWorkerThreadFactory factory) {
if (factory == null)
throw new NullPointerException();
return factory;
}
/**
* Creates a {@code ForkJoinPool} with the given parameters, without
* any security checks or parameter validation. Invoked directly by
* makeCommonPool.
*/
private ForkJoinPool(int parallelism,
ForkJoinWorkerThreadFactory factory,
UncaughtExceptionHandler handler,
int mode,
String workerNamePrefix) {
this.workerNamePrefix = workerNamePrefix;
this.factory = factory;
this.ueh = handler;
this.mode = (short)mode;
this.parallelism = (short)parallelism;
long np = (long)(-parallelism); // offset ctl counts
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
}
/**
* Returns the common pool instance. This pool is statically
* constructed; its run state is unaffected by attempts to {@link
* #shutdown} or {@link #shutdownNow}. However this pool and any
* ongoing processing are automatically terminated upon program
* {@link System#exit}. Any program that relies on asynchronous
* task processing to complete before program termination should
* invoke {@code commonPool().}{@link #awaitQuiescence awaitQuiescence},
* before exit.
*
* @return the common pool instance
* @since 1.8
*/
public static ForkJoinPool commonPool() {
// assert common != null : "static init error";
return common;
}
// Execution methods
/**
* Performs the given task, returning its result upon completion.
* If the computation encounters an unchecked Exception or Error,
* it is rethrown as the outcome of this invocation. Rethrown
* exceptions behave in the same way as regular exceptions, but,
* when possible, contain stack traces (as displayed for example
* using {@code ex.printStackTrace()}) of both the current thread
* as well as the thread actually encountering the exception;
* minimally only the latter.
*
* @param task the task
* @param <T> the type of the task's result
* @return the task's result
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> T invoke(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
return task.join();
}
/**
* Arranges for (asynchronous) execution of the given task.
*
* @param task the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(ForkJoinTask<?> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
}
// AbstractExecutorService methods
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.RunnableExecuteAction(task);
externalPush(job);
}
/**
* Submits a ForkJoinTask for execution.
*
* @param task the task to submit
* @param <T> the type of the task's result
* @return the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
return task;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Callable<T> task) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
externalPush(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Runnable task, T result) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
externalPush(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public ForkJoinTask<?> submit(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.AdaptedRunnableAction(task);
externalPush(job);
return job;
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws RejectedExecutionException {@inheritDoc}
*/
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
// In previous versions of this class, this method constructed
// a task to run ForkJoinTask.invokeAll, but now external
// invocation of multiple tasks is at least as efficient.
ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
boolean done = false;
try {
for (Callable<T> t : tasks) {
ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
futures.add(f);
externalPush(f);
}
for (int i = 0, size = futures.size(); i < size; i++)
((ForkJoinTask<?>)futures.get(i)).quietlyJoin();
done = true;
return futures;
} finally {
if (!done)
for (int i = 0, size = futures.size(); i < size; i++)
futures.get(i).cancel(false);
}
}
/**
* Returns the factory used for constructing new workers.
*
* @return the factory used for constructing new workers
*/
public ForkJoinWorkerThreadFactory getFactory() {
return factory;
}
/**
* Returns the handler for internal worker threads that terminate
* due to unrecoverable errors encountered while executing tasks.
*
* @return the handler, or {@code null} if none
*/
public UncaughtExceptionHandler getUncaughtExceptionHandler() {
return ueh;
}
/**
* Returns the targeted parallelism level of this pool.
*
* @return the targeted parallelism level of this pool
*/
public int getParallelism() {
int par;
return ((par = parallelism) > 0) ? par : 1;
}
/**
* Returns the targeted parallelism level of the common pool.
*
* @return the targeted parallelism level of the common pool
* @since 1.8
*/
public static int getCommonPoolParallelism() {
return commonParallelism;
}
/**
* Returns the number of worker threads that have started but not
* yet terminated. The result returned by this method may differ
* from {@link #getParallelism} when threads are created to
* maintain parallelism when others are cooperatively blocked.
*
* @return the number of worker threads
*/
public int getPoolSize() {
return parallelism + (short)(ctl >>> TC_SHIFT);
}
/**
* Returns {@code true} if this pool uses local first-in-first-out
* scheduling mode for forked tasks that are never joined.
*
* @return {@code true} if this pool uses async mode
*/
public boolean getAsyncMode() {
return mode == FIFO_QUEUE;
}
/**
* Returns an estimate of the number of worker threads that are
* not blocked waiting to join tasks or for other managed
* synchronization. This method may overestimate the
* number of running threads.
*
* @return the number of worker threads
*/
public int getRunningThreadCount() {
int rc = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null && w.isApparentlyUnblocked())
++rc;
}
}
return rc;
}
/**
* Returns an estimate of the number of threads that are currently
* stealing or executing tasks. This method may overestimate the
* number of active threads.
*
* @return the number of active threads
*/
public int getActiveThreadCount() {
int r = parallelism + (int)(ctl >> AC_SHIFT);
return (r <= 0) ? 0 : r; // suppress momentarily negative values
}
/**
* Returns {@code true} if all worker threads are currently idle.
* An idle worker is one that cannot obtain a task to execute
* because none are available to steal from other threads, and
* there are no pending submissions to the pool. This method is
* conservative; it might not return {@code true} immediately upon
* idleness of all threads, but will eventually become true if
* threads remain inactive.
*
* @return {@code true} if all threads are currently idle
*/
public boolean isQuiescent() {
return parallelism + (int)(ctl >> AC_SHIFT) <= 0;
}
/**
* Returns an estimate of the total number of tasks stolen from
* one thread's work queue by another. The reported value
* underestimates the actual total number of steals when the pool
* is not quiescent. This value may be useful for monitoring and
* tuning fork/join programs: in general, steal counts should be
* high enough to keep threads busy, but low enough to avoid
* overhead and contention across threads.
*
* @return the number of steals
*/
public long getStealCount() {
long count = stealCount;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.nsteals;
}
}
return count;
}
/**
* Returns an estimate of the total number of tasks currently held
* in queues by worker threads (but not including tasks submitted
* to the pool that have not begun executing). This value is only
* an approximation, obtained by iterating across all threads in
* the pool. This method may be useful for tuning task
* granularities.
*
* @return the number of queued tasks
*/
public long getQueuedTaskCount() {
long count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns an estimate of the number of tasks submitted to this
* pool that have not yet begun executing. This method may take
* time proportional to the number of submissions.
*
* @return the number of queued submissions
*/
public int getQueuedSubmissionCount() {
int count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns {@code true} if there are any tasks submitted to this
* pool that have not yet begun executing.
*
* @return {@code true} if there are any queued submissions
*/
public boolean hasQueuedSubmissions() {
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && !w.isEmpty())
return true;
}
}
return false;
}
/**
* Removes and returns the next unexecuted submission if one is
* available. This method may be useful in extensions to this
* class that re-assign work in systems with multiple pools.
*
* @return the next submission, or {@code null} if none
*/
protected ForkJoinTask<?> pollSubmission() {
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && (t = w.poll()) != null)
return t;
}
}
return null;
}
/**
* Removes all available unexecuted submitted and forked tasks
* from scheduling queues and adds them to the given collection,
* without altering their execution status. These may include
* artificially generated or wrapped tasks. This method is
* designed to be invoked only when the pool is known to be
* quiescent. Invocations at other times may not remove all
* tasks. A failure encountered while attempting to add elements
* to collection {@code c} may result in elements being in
* neither, either or both collections when the associated
* exception is thrown. The behavior of this operation is
* undefined if the specified collection is modified while the
* operation is in progress.
*
* @param c the collection to transfer elements into
* @return the number of elements transferred
*/
protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
int count = 0;
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
while ((t = w.poll()) != null) {
c.add(t);
++count;
}
}
}
}
return count;
}
/**
* Returns a string identifying this pool, as well as its state,
* including indications of run state, parallelism level, and
* worker and task counts.
*
* @return a string identifying this pool, as well as its state
*/
public String toString() {
// Use a single pass through workQueues to collect counts
long qt = 0L, qs = 0L; int rc = 0;
long st = stealCount;
long c = ctl;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
int size = w.queueSize();
if ((i & 1) == 0)
qs += size;
else {
qt += size;
st += w.nsteals;
if (w.isApparentlyUnblocked())
++rc;
}
}
}
}
int pc = parallelism;
int tc = pc + (short)(c >>> TC_SHIFT);
int ac = pc + (int)(c >> AC_SHIFT);
if (ac < 0) // ignore transient negative
ac = 0;
String level;
if ((c & STOP_BIT) != 0)
level = (tc == 0) ? "Terminated" : "Terminating";
else
level = plock < 0 ? "Shutting down" : "Running";
return super.toString() +
"[" + level +
", parallelism = " + pc +
", size = " + tc +
", active = " + ac +
", running = " + rc +
", steals = " + st +
", tasks = " + qt +
", submissions = " + qs +
"]";
}
/**
* Possibly initiates an orderly shutdown in which previously
* submitted tasks are executed, but no new tasks will be
* accepted. Invocation has no effect on execution state if this
* is the {@link #commonPool()}, and no additional effect if
* already shut down. Tasks that are in the process of being
* submitted concurrently during the course of this method may or
* may not be rejected.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public void shutdown() {
checkPermission();
tryTerminate(false, true);
}
/**
* Possibly attempts to cancel and/or stop all tasks, and reject
* all subsequently submitted tasks. Invocation has no effect on
* execution state if this is the {@link #commonPool()}, and no
* additional effect if already shut down. Otherwise, tasks that
* are in the process of being submitted or executed concurrently
* during the course of this method may or may not be
* rejected. This method cancels both existing and unexecuted
* tasks, in order to permit termination in the presence of task
* dependencies. So the method always returns an empty list
* (unlike the case for some other Executors).
*
* @return an empty list
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public List<Runnable> shutdownNow() {
checkPermission();
tryTerminate(true, true);
return Collections.emptyList();
}
/**
* Returns {@code true} if all tasks have completed following shut down.
*
* @return {@code true} if all tasks have completed following shut down
*/
public boolean isTerminated() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) + parallelism <= 0);
}
/**
* Returns {@code true} if the process of termination has
* commenced but not yet completed. This method may be useful for
* debugging. A return of {@code true} reported a sufficient
* period after shutdown may indicate that submitted tasks have
* ignored or suppressed interruption, or are waiting for I/O,
* causing this executor not to properly terminate. (See the
* advisory notes for class {@link ForkJoinTask} stating that
* tasks should not normally entail blocking operations. But if
* they do, they must abort them on interrupt.)
*
* @return {@code true} if terminating but not yet terminated
*/
public boolean isTerminating() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) + parallelism > 0);
}
/**
* Returns {@code true} if this pool has been shut down.
*
* @return {@code true} if this pool has been shut down
*/
public boolean isShutdown() {
return plock < 0;
}
/**
* Blocks until all tasks have completed execution after a
* shutdown request, or the timeout occurs, or the current thread
* is interrupted, whichever happens first. Because the {@link
* #commonPool()} never terminates until program shutdown, when
* applied to the common pool, this method is equivalent to {@link
* #awaitQuiescence(long, TimeUnit)} but always returns {@code false}.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return {@code true} if this executor terminated and
* {@code false} if the timeout elapsed before termination
* @throws InterruptedException if interrupted while waiting
*/
public boolean awaitTermination(long timeout, TimeUnit unit)
throws InterruptedException {
if (Thread.interrupted())
throw new InterruptedException();
if (this == common) {
awaitQuiescence(timeout, unit);
return false;
}
long nanos = unit.toNanos(timeout);
if (isTerminated())
return true;
if (nanos <= 0L)
return false;
long deadline = System.nanoTime() + nanos;
synchronized (this) {
for (;;) {
if (isTerminated())
return true;
if (nanos <= 0L)
return false;
long millis = TimeUnit.NANOSECONDS.toMillis(nanos);
wait(millis > 0L ? millis : 1L);
nanos = deadline - System.nanoTime();
}
}
}
/**
* If called by a ForkJoinTask operating in this pool, equivalent
* in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
* waits and/or attempts to assist performing tasks until this
* pool {@link #isQuiescent} or the indicated timeout elapses.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return {@code true} if quiescent; {@code false} if the
* timeout elapsed.
*/
public boolean awaitQuiescence(long timeout, TimeUnit unit) {
long nanos = unit.toNanos(timeout);
ForkJoinWorkerThread wt;
Thread thread = Thread.currentThread();
if ((thread instanceof ForkJoinWorkerThread) &&
(wt = (ForkJoinWorkerThread)thread).pool == this) {
helpQuiescePool(wt.workQueue);
return true;
}
long startTime = System.nanoTime();
WorkQueue[] ws;
int r = 0, m;
boolean found = true;
while (!isQuiescent() && (ws = workQueues) != null &&
(m = ws.length - 1) >= 0) {
if (!found) {
if ((System.nanoTime() - startTime) > nanos)
return false;
Thread.yield(); // cannot block
}
found = false;
for (int j = (m + 1) << 2; j >= 0; --j) {
ForkJoinTask<?> t; WorkQueue q; int b;
if ((q = ws[r++ & m]) != null && (b = q.base) - q.top < 0) {
found = true;
if ((t = q.pollAt(b)) != null)
t.doExec();
break;
}
}
}
return true;
}
/**
* Waits and/or attempts to assist performing tasks indefinitely
* until the {@link #commonPool()} {@link #isQuiescent}.
*/
static void quiesceCommonPool() {
common.awaitQuiescence(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
}
/**
* Interface for extending managed parallelism for tasks running
* in {@link ForkJoinPool}s.
*
* <p>A {@code ManagedBlocker} provides two methods. Method
* {@code isReleasable} must return {@code true} if blocking is
* not necessary. Method {@code block} blocks the current thread
* if necessary (perhaps internally invoking {@code isReleasable}
* before actually blocking). These actions are performed by any
* thread invoking {@link ForkJoinPool#managedBlock(ManagedBlocker)}.
* The unusual methods in this API accommodate synchronizers that
* may, but don't usually, block for long periods. Similarly, they
* allow more efficient internal handling of cases in which
* additional workers may be, but usually are not, needed to
* ensure sufficient parallelism. Toward this end,
* implementations of method {@code isReleasable} must be amenable
* to repeated invocation.
*
* <p>For example, here is a ManagedBlocker based on a
* ReentrantLock:
* <pre> {@code
* class ManagedLocker implements ManagedBlocker {
* final ReentrantLock lock;
* boolean hasLock = false;
* ManagedLocker(ReentrantLock lock) { this.lock = lock; }
* public boolean block() {
* if (!hasLock)
* lock.lock();
* return true;
* }
* public boolean isReleasable() {
* return hasLock || (hasLock = lock.tryLock());
* }
* }}</pre>
*
* <p>Here is a class that possibly blocks waiting for an
* item on a given queue:
* <pre> {@code
* class QueueTaker<E> implements ManagedBlocker {
* final BlockingQueue<E> queue;
* volatile E item = null;
* QueueTaker(BlockingQueue<E> q) { this.queue = q; }
* public boolean block() throws InterruptedException {
* if (item == null)
* item = queue.take();
* return true;
* }
* public boolean isReleasable() {
* return item != null || (item = queue.poll()) != null;
* }
* public E getItem() { // call after pool.managedBlock completes
* return item;
* }
* }}</pre>
*/
public static interface ManagedBlocker {
/**
* Possibly blocks the current thread, for example waiting for
* a lock or condition.
*
* @return {@code true} if no additional blocking is necessary
* (i.e., if isReleasable would return true)
* @throws InterruptedException if interrupted while waiting
* (the method is not required to do so, but is allowed to)
*/
boolean block() throws InterruptedException;
/**
* Returns {@code true} if blocking is unnecessary.
* @return {@code true} if blocking is unnecessary
*/
boolean isReleasable();
}
/**
* Blocks in accord with the given blocker. If the current thread
* is a {@link ForkJoinWorkerThread}, this method possibly
* arranges for a spare thread to be activated if necessary to
* ensure sufficient parallelism while the current thread is blocked.
*
* <p>If the caller is not a {@link ForkJoinTask}, this method is
* behaviorally equivalent to
* <pre> {@code
* while (!blocker.isReleasable())
* if (blocker.block())
* return;
* }</pre>
*
* If the caller is a {@code ForkJoinTask}, then the pool may
* first be expanded to ensure parallelism, and later adjusted.
*
* @param blocker the blocker
* @throws InterruptedException if blocker.block did so
*/
public static void managedBlock(ManagedBlocker blocker)
throws InterruptedException {
Thread t = Thread.currentThread();
if (t instanceof ForkJoinWorkerThread) {
ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
while (!blocker.isReleasable()) {
if (p.tryCompensate(p.ctl)) {
try {
do {} while (!blocker.isReleasable() &&
!blocker.block());
} finally {
p.incrementActiveCount();
}
break;
}
}
}
else {
do {} while (!blocker.isReleasable() &&
!blocker.block());
}
}
// AbstractExecutorService overrides. These rely on undocumented
// fact that ForkJoinTask.adapt returns ForkJoinTasks that also
// implement RunnableFuture.
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
}
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
return new ForkJoinTask.AdaptedCallable<T>(callable);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long CTL;
private static final long PARKBLOCKER;
private static final int ABASE;
private static final int ASHIFT;
private static final long STEALCOUNT;
private static final long PLOCK;
private static final long INDEXSEED;
private static final long QBASE;
private static final long QLOCK;
static {
// initialize field offsets for CAS etc
try {
U = getUnsafe();
Class<?> k = ForkJoinPool.class;
CTL = U.objectFieldOffset
(k.getDeclaredField("ctl"));
STEALCOUNT = U.objectFieldOffset
(k.getDeclaredField("stealCount"));
PLOCK = U.objectFieldOffset
(k.getDeclaredField("plock"));
INDEXSEED = U.objectFieldOffset
(k.getDeclaredField("indexSeed"));
Class<?> tk = Thread.class;
PARKBLOCKER = U.objectFieldOffset
(tk.getDeclaredField("parkBlocker"));
Class<?> wk = WorkQueue.class;
QBASE = U.objectFieldOffset
(wk.getDeclaredField("base"));
QLOCK = U.objectFieldOffset
(wk.getDeclaredField("qlock"));
Class<?> ak = ForkJoinTask[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
submitters = new ThreadLocal<Submitter>();
defaultForkJoinWorkerThreadFactory =
new DefaultForkJoinWorkerThreadFactory();
modifyThreadPermission = new RuntimePermission("modifyThread");
common = java.security.AccessController.doPrivileged
(new java.security.PrivilegedAction<ForkJoinPool>() {
public ForkJoinPool run() { return makeCommonPool(); }});
int par = common.parallelism; // report 1 even if threads disabled
commonParallelism = par > 0 ? par : 1;
}
/**
* Creates and returns the common pool, respecting user settings
* specified via system properties.
*/
private static ForkJoinPool makeCommonPool() {
int parallelism = -1;
ForkJoinWorkerThreadFactory factory
= defaultForkJoinWorkerThreadFactory;
UncaughtExceptionHandler handler = null;
try { // ignore exceptions in accessing/parsing properties
String pp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.parallelism");
String fp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.threadFactory");
String hp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.exceptionHandler");
if (pp != null)
parallelism = Integer.parseInt(pp);
if (fp != null)
factory = ((ForkJoinWorkerThreadFactory)ClassLoader.
getSystemClassLoader().loadClass(fp).newInstance());
if (hp != null)
handler = ((UncaughtExceptionHandler)ClassLoader.
getSystemClassLoader().loadClass(hp).newInstance());
} catch (Exception ignore) {
}
if (parallelism < 0 && // default 1 less than #cores
(parallelism = Runtime.getRuntime().availableProcessors() - 1) < 0)
parallelism = 0;
if (parallelism > MAX_CAP)
parallelism = MAX_CAP;
return new ForkJoinPool(parallelism, factory, handler, LIFO_QUEUE,
"ForkJoinPool.commonPool-worker-");
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
} | 0true
| src_main_java_jsr166e_ForkJoinPool.java |
5,469 | public class SearchPhaseController extends AbstractComponent {
public static Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = new Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>>() {
@Override
public int compare(AtomicArray.Entry<? extends QuerySearchResultProvider> o1, AtomicArray.Entry<? extends QuerySearchResultProvider> o2) {
int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index());
if (i == 0) {
i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId();
}
return i;
}
};
public static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
private final CacheRecycler cacheRecycler;
private final boolean optimizeSingleShard;
@Inject
public SearchPhaseController(Settings settings, CacheRecycler cacheRecycler) {
super(settings);
this.cacheRecycler = cacheRecycler;
this.optimizeSingleShard = componentSettings.getAsBoolean("optimize_single_shard", true);
}
public boolean optimizeSingleShard() {
return optimizeSingleShard;
}
public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {
ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics = HppcMaps.newNoNullKeysMap();
ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
long aggMaxDoc = 0;
for (AtomicArray.Entry<DfsSearchResult> lEntry : results.asList()) {
final Term[] terms = lEntry.value.terms();
final TermStatistics[] stats = lEntry.value.termStatistics();
assert terms.length == stats.length;
for (int i = 0; i < terms.length; i++) {
assert terms[i] != null;
TermStatistics existing = termStatistics.get(terms[i]);
if (existing != null) {
assert terms[i].bytes().equals(existing.term());
// totalTermFrequency is an optional statistic we need to check if either one or both
// are set to -1 which means not present and then set it globally to -1
termStatistics.put(terms[i], new TermStatistics(existing.term(),
existing.docFreq() + stats[i].docFreq(),
optionalSum(existing.totalTermFreq(), stats[i].totalTermFreq())));
} else {
termStatistics.put(terms[i], stats[i]);
}
}
final boolean[] states = lEntry.value.fieldStatistics().allocated;
final Object[] keys = lEntry.value.fieldStatistics().keys;
final Object[] values = lEntry.value.fieldStatistics().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
String key = (String) keys[i];
CollectionStatistics value = (CollectionStatistics) values[i];
assert key != null;
CollectionStatistics existing = fieldStatistics.get(key);
if (existing != null) {
CollectionStatistics merged = new CollectionStatistics(
key, existing.maxDoc() + value.maxDoc(),
optionalSum(existing.docCount(), value.docCount()),
optionalSum(existing.sumTotalTermFreq(), value.sumTotalTermFreq()),
optionalSum(existing.sumDocFreq(), value.sumDocFreq())
);
fieldStatistics.put(key, merged);
} else {
fieldStatistics.put(key, value);
}
}
}
aggMaxDoc += lEntry.value.maxDoc();
}
return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc);
}
private static long optionalSum(long left, long right) {
return Math.min(left, right) == -1 ? -1 : left + right;
}
public ScoreDoc[] sortDocs(AtomicArray<? extends QuerySearchResultProvider> resultsArr) {
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results = resultsArr.asList();
if (results.isEmpty()) {
return EMPTY_DOCS;
}
if (optimizeSingleShard) {
boolean canOptimize = false;
QuerySearchResult result = null;
int shardIndex = -1;
if (results.size() == 1) {
canOptimize = true;
result = results.get(0).value.queryResult();
shardIndex = results.get(0).index;
} else {
// lets see if we only got hits from a single shard, if so, we can optimize...
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
if (entry.value.queryResult().topDocs().scoreDocs.length > 0) {
if (result != null) { // we already have one, can't really optimize
canOptimize = false;
break;
}
canOptimize = true;
result = entry.value.queryResult();
shardIndex = entry.index;
}
}
}
if (canOptimize) {
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
if (scoreDocs.length < result.from()) {
return EMPTY_DOCS;
}
int resultDocsSize = result.size();
if ((scoreDocs.length - result.from()) < resultDocsSize) {
resultDocsSize = scoreDocs.length - result.from();
}
if (result.topDocs() instanceof TopFieldDocs) {
ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
for (int i = 0; i < resultDocsSize; i++) {
ScoreDoc scoreDoc = scoreDocs[result.from() + i];
scoreDoc.shardIndex = shardIndex;
docs[i] = scoreDoc;
}
return docs;
} else {
ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
for (int i = 0; i < resultDocsSize; i++) {
ScoreDoc scoreDoc = scoreDocs[result.from() + i];
scoreDoc.shardIndex = shardIndex;
docs[i] = scoreDoc;
}
return docs;
}
}
}
@SuppressWarnings("unchecked")
AtomicArray.Entry<? extends QuerySearchResultProvider>[] sortedResults = results.toArray(new AtomicArray.Entry[results.size()]);
Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
QuerySearchResultProvider firstResult = sortedResults[0].value;
int totalNumDocs = 0;
int queueSize = firstResult.queryResult().from() + firstResult.queryResult().size();
if (firstResult.includeFetch()) {
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
// this is also important since we shortcut and fetch only docs from "from" and up to "size"
queueSize *= sortedResults.length;
}
// we don't use TopDocs#merge here because with TopDocs#merge, when pagination, we need to ask for "from + size" topN
// hits, which ends up creating a "from + size" ScoreDoc[], while in our implementation, we can actually get away with
// just create "size" ScoreDoc (the reverse order in the queue). would be nice to improve TopDocs#merge to allow for
// it in which case we won't need this logic...
PriorityQueue queue;
if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) {
// sorting, first if the type is a String, chance CUSTOM to STRING so we handle nulls properly (since our CUSTOM String sorting might return null)
TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
for (int i = 0; i < fieldDocs.fields.length; i++) {
boolean allValuesAreNull = true;
boolean resolvedField = false;
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
for (ScoreDoc doc : entry.value.queryResult().topDocs().scoreDocs) {
FieldDoc fDoc = (FieldDoc) doc;
if (fDoc.fields[i] != null) {
allValuesAreNull = false;
if (fDoc.fields[i] instanceof String) {
fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.Type.STRING, fieldDocs.fields[i].getReverse());
}
resolvedField = true;
break;
}
}
if (resolvedField) {
break;
}
}
if (!resolvedField && allValuesAreNull && fieldDocs.fields[i].getField() != null) {
// we did not manage to resolve a field (and its not score or doc, which have no field), and all the fields are null (which can only happen for STRING), make it a STRING
fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.Type.STRING, fieldDocs.fields[i].getReverse());
}
}
queue = new ShardFieldDocSortedHitQueue(fieldDocs.fields, queueSize);
// we need to accumulate for all and then filter the from
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
QuerySearchResult result = entry.value.queryResult();
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
totalNumDocs += scoreDocs.length;
for (ScoreDoc doc : scoreDocs) {
doc.shardIndex = entry.index;
if (queue.insertWithOverflow(doc) == doc) {
// filled the queue, break
break;
}
}
}
} else {
queue = new ScoreDocQueue(queueSize); // we need to accumulate for all and then filter the from
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
QuerySearchResult result = entry.value.queryResult();
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
totalNumDocs += scoreDocs.length;
for (ScoreDoc doc : scoreDocs) {
doc.shardIndex = entry.index;
if (queue.insertWithOverflow(doc) == doc) {
// filled the queue, break
break;
}
}
}
}
int resultDocsSize = firstResult.queryResult().size();
if (firstResult.includeFetch()) {
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
resultDocsSize *= sortedResults.length;
}
if (totalNumDocs < queueSize) {
resultDocsSize = totalNumDocs - firstResult.queryResult().from();
}
if (resultDocsSize <= 0) {
return EMPTY_DOCS;
}
// we only pop the first, this handles "from" nicely since the "from" are down the queue
// that we already fetched, so we are actually popping the "from" and up to "size"
ScoreDoc[] shardDocs = new ScoreDoc[resultDocsSize];
for (int i = resultDocsSize - 1; i >= 0; i--) // put docs in array
shardDocs[i] = (ScoreDoc) queue.pop();
return shardDocs;
}
/**
* Builds an array, with potential null elements, with docs to load.
*/
public void fillDocIdsToLoad(AtomicArray<IntArrayList> docsIdsToLoad, ScoreDoc[] shardDocs) {
for (ScoreDoc shardDoc : shardDocs) {
IntArrayList list = docsIdsToLoad.get(shardDoc.shardIndex);
if (list == null) {
list = new IntArrayList(); // can't be shared!, uses unsafe on it later on
docsIdsToLoad.set(shardDoc.shardIndex, list);
}
list.add(shardDoc.doc);
}
}
public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray<? extends QuerySearchResultProvider> queryResultsArr, AtomicArray<? extends FetchSearchResultProvider> fetchResultsArr) {
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults = queryResultsArr.asList();
List<? extends AtomicArray.Entry<? extends FetchSearchResultProvider>> fetchResults = fetchResultsArr.asList();
if (queryResults.isEmpty()) {
return InternalSearchResponse.EMPTY;
}
QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
boolean sorted = false;
int sortScoreIndex = -1;
if (firstResult.topDocs() instanceof TopFieldDocs) {
sorted = true;
TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
for (int i = 0; i < fieldDocs.fields.length; i++) {
if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) {
sortScoreIndex = i;
}
}
}
// merge facets
InternalFacets facets = null;
if (!queryResults.isEmpty()) {
// we rely on the fact that the order of facets is the same on all query results
if (firstResult.facets() != null && firstResult.facets().facets() != null && !firstResult.facets().facets().isEmpty()) {
List<Facet> aggregatedFacets = Lists.newArrayList();
List<Facet> namedFacets = Lists.newArrayList();
for (Facet facet : firstResult.facets()) {
// aggregate each facet name into a single list, and aggregate it
namedFacets.clear();
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
for (Facet facet1 : entry.value.queryResult().facets()) {
if (facet.getName().equals(facet1.getName())) {
namedFacets.add(facet1);
}
}
}
if (!namedFacets.isEmpty()) {
Facet aggregatedFacet = ((InternalFacet) namedFacets.get(0)).reduce(new InternalFacet.ReduceContext(cacheRecycler, namedFacets));
aggregatedFacets.add(aggregatedFacet);
}
}
facets = new InternalFacets(aggregatedFacets);
}
}
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
long totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
boolean timedOut = false;
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
QuerySearchResult result = entry.value.queryResult();
if (result.searchTimedOut()) {
timedOut = true;
}
totalHits += result.topDocs().totalHits;
if (!Float.isNaN(result.topDocs().getMaxScore())) {
maxScore = Math.max(maxScore, result.topDocs().getMaxScore());
}
}
if (Float.isInfinite(maxScore)) {
maxScore = Float.NaN;
}
// clean the fetch counter
for (AtomicArray.Entry<? extends FetchSearchResultProvider> entry : fetchResults) {
entry.value.fetchResult().initCounter();
}
// merge hits
List<InternalSearchHit> hits = new ArrayList<InternalSearchHit>();
if (!fetchResults.isEmpty()) {
for (ScoreDoc shardDoc : sortedDocs) {
FetchSearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
if (fetchResultProvider == null) {
continue;
}
FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
int index = fetchResult.counterGetAndIncrement();
if (index < fetchResult.hits().internalHits().length) {
InternalSearchHit searchHit = fetchResult.hits().internalHits()[index];
searchHit.score(shardDoc.score);
searchHit.shard(fetchResult.shardTarget());
if (sorted) {
FieldDoc fieldDoc = (FieldDoc) shardDoc;
searchHit.sortValues(fieldDoc.fields);
if (sortScoreIndex != -1) {
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
}
}
hits.add(searchHit);
}
}
}
// merge suggest results
Suggest suggest = null;
if (!queryResults.isEmpty()) {
final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<String, List<Suggest.Suggestion>>();
boolean hasSuggestions = false;
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
Suggest shardResult = entry.value.queryResult().queryResult().suggest();
if (shardResult == null) {
continue;
}
hasSuggestions = true;
Suggest.group(groupedSuggestions, shardResult);
}
suggest = hasSuggestions ? new Suggest(Suggest.Fields.SUGGEST, Suggest.reduce(groupedSuggestions)) : null;
}
// merge addAggregation
InternalAggregations aggregations = null;
if (!queryResults.isEmpty()) {
if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) {
List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(queryResults.size());
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
}
aggregations = InternalAggregations.reduce(aggregationsList, cacheRecycler);
}
}
InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
return new InternalSearchResponse(searchHits, facets, aggregations, suggest, timedOut);
}
} | 1no label
| src_main_java_org_elasticsearch_search_controller_SearchPhaseController.java |
2,165 | class FixedBitSetIterator extends FilteredDocIdSetIterator {
FixedBitSetIterator(DocIdSetIterator innerIter) {
super(innerIter);
}
@Override
protected boolean match(int doc) {
return matchDoc(doc);
}
} | 1no label
| src_main_java_org_elasticsearch_common_lucene_docset_MatchDocIdSet.java |
1,446 | public static class VertexMap extends Mapper<NullWritable, FaunusVertex, LongWritable, Holder<FaunusVertex>> {
private TitanGraph graph;
private boolean trackState;
private ModifiableHadoopConfiguration faunusConf;
private LoaderScriptWrapper loaderScript;
private final Holder<FaunusVertex> vertexHolder = new Holder<FaunusVertex>();
private final LongWritable longWritable = new LongWritable();
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
graph = TitanGraphOutputMapReduce.generateGraph(faunusConf);
trackState = context.getConfiguration().getBoolean(Tokens.TITAN_HADOOP_PIPELINE_TRACK_STATE, false);
// Check whether a script is defined in the config
if (faunusConf.has(OUTPUT_LOADER_SCRIPT_FILE)) {
Path scriptPath = new Path(faunusConf.get(OUTPUT_LOADER_SCRIPT_FILE));
FileSystem scriptFS = FileSystem.get(DEFAULT_COMPAT.getJobContextConfiguration(context));
loaderScript = new LoaderScriptWrapper(scriptFS, scriptPath);
}
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, LongWritable, Holder<FaunusVertex>>.Context context) throws IOException, InterruptedException {
try {
final TitanVertex titanVertex = this.getCreateOrDeleteVertex(value, context);
if (null != titanVertex) { // the vertex was state != deleted (if it was we know incident edges are deleted too)
// Propagate shell vertices with Titan ids
final FaunusVertex shellVertex = new FaunusVertex(faunusConf, value.getLongId());
shellVertex.setProperty(TITAN_ID, titanVertex.getLongId());
for (final TitanEdge edge : value.query().direction(OUT).titanEdges()) {
if (!trackState || edge.isNew()) { //Only need to propagate ids for new edges
this.longWritable.set(edge.getVertex(IN).getLongId());
context.write(this.longWritable, this.vertexHolder.set('s', shellVertex));
}
}
this.longWritable.set(value.getLongId());
// value.getPropertiesWithState().clear(); // no longer needed in reduce phase
value.setProperty(TITAN_ID, titanVertex.getLongId()); // need this for id resolution in edge-map phase
// value.removeEdges(Tokens.Action.DROP, OUT); // no longer needed in reduce phase
context.write(this.longWritable, this.vertexHolder.set('v', value));
}
} catch (final Exception e) {
graph.rollback();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.FAILED_TRANSACTIONS, 1L);
throw new IOException(e.getMessage(), e);
}
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, LongWritable, Holder<FaunusVertex>>.Context context) throws IOException, InterruptedException {
try {
graph.commit();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.SUCCESSFUL_TRANSACTIONS, 1L);
} catch (Exception e) {
LOGGER.error("Could not commit transaction during Map.cleanup(): ", e);
graph.rollback();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.FAILED_TRANSACTIONS, 1L);
throw new IOException(e.getMessage(), e);
}
graph.shutdown();
}
public TitanVertex getCreateOrDeleteVertex(final FaunusVertex faunusVertex, final Mapper<NullWritable, FaunusVertex, LongWritable, Holder<FaunusVertex>>.Context context) throws InterruptedException {
if (this.trackState && faunusVertex.isRemoved()) {
final Vertex titanVertex = graph.getVertex(faunusVertex.getLongId());
if (null == titanVertex)
DEFAULT_COMPAT.incrementContextCounter(context, Counters.NULL_VERTICES_IGNORED, 1L);
else {
titanVertex.remove();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_REMOVED, 1L);
}
return null;
} else {
final TitanVertex titanVertex;
if (faunusVertex.isNew()) {
// Vertex is new to this faunus run, but might already exist in Titan
titanVertex = getTitanVertex(faunusVertex, context);
} else {
titanVertex = (TitanVertex) graph.getVertex(faunusVertex.getLongId());
if (titanVertex==null) {
DEFAULT_COMPAT.incrementContextCounter(context, Counters.NULL_VERTICES_IGNORED, 1L);
return null;
}
}
if (faunusVertex.isNew() || faunusVertex.isModified()) {
//Synchronize properties
for (final TitanProperty p : faunusVertex.query().queryAll().properties()) {
if (null != loaderScript && loaderScript.hasVPropMethod()) {
loaderScript.getVProp(p, titanVertex, graph, context);
} else {
getCreateOrDeleteRelation(graph, trackState, OUT, faunusVertex, titanVertex,
(StandardFaunusProperty) p, context);
}
}
}
return titanVertex;
}
}
private TitanVertex getTitanVertex(FaunusVertex faunusVertex, Mapper<NullWritable, FaunusVertex, LongWritable, Holder<FaunusVertex>>.Context context) {
if (null != loaderScript && loaderScript.hasVertexMethod()) {
return loaderScript.getVertex(faunusVertex, graph, context);
} else {
VertexLabel titanLabel = BaseVertexLabel.DEFAULT_VERTEXLABEL;
FaunusVertexLabel faunusLabel = faunusVertex.getVertexLabel();
if (!faunusLabel.isDefault()) titanLabel = graph.getVertexLabel(faunusLabel.getName());
TitanVertex tv = graph.addVertexWithLabel(titanLabel);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_ADDED, 1L);
return tv;
}
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_util_TitanGraphOutputMapReduce.java |
2,268 | public class OsUtils {
/**
* The value of <tt>System.getProperty("os.name")<tt>.
*/
public static final String OS_NAME = System.getProperty("os.name");
/**
* True iff running on Linux.
*/
public static final boolean LINUX = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("linux");
/**
* True iff running on Windows.
*/
public static final boolean WINDOWS = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("windows");
/**
* True iff running on SunOS.
*/
public static final boolean SOLARIS = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("sun");
/**
* True iff running on Mac.
*/
public static final boolean MAC = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("mac");
/**
* True iff running on HP.
*/
public static final boolean HP = OS_NAME.trim().toLowerCase(Locale.ROOT).startsWith("hp");
private OsUtils() {
}
} | 0true
| src_main_java_org_elasticsearch_common_os_OsUtils.java |
2,050 | @Component("blCustomerStateRefresher")
public class CustomerStateRefresher implements ApplicationListener<CustomerPersistedEvent> {
/**
* Removes the complete {@link Customer} stored in session and adds a new session variable for just the customer ID. This
* should occur once the session-based {@link Customer} (all anonymous Customers start out this way) has been persisted.
*
* <p>Also updates {@link CustomerState} with the persisted {@link Customer} so that it will always represent the most
* up-to-date version that is in the database</p>
*
* @param request
* @param databaseCustomer
*/
@Override
public void onApplicationEvent(final CustomerPersistedEvent event) {
Customer dbCustomer = event.getCustomer();
//if there is an active request, remove the session-based customer if it exists and update CustomerState
WebRequest request = BroadleafRequestContext.getBroadleafRequestContext().getWebRequest();
if (request != null) {
String customerAttribute = CustomerStateRequestProcessor.getAnonymousCustomerSessionAttributeName();
String customerIdAttribute = CustomerStateRequestProcessor.getAnonymousCustomerIdSessionAttributeName();
Customer sessionCustomer = (Customer) request.getAttribute(customerAttribute, WebRequest.SCOPE_GLOBAL_SESSION);
//invalidate the session-based customer if it's there and the ID is the same as the Customer that has been
//persisted
if (sessionCustomer != null && sessionCustomer.getId().equals(dbCustomer.getId())) {
request.removeAttribute(customerAttribute, WebRequest.SCOPE_GLOBAL_SESSION);
request.setAttribute(customerIdAttribute, dbCustomer.getId(), WebRequest.SCOPE_GLOBAL_SESSION);
}
//Update CustomerState if the persisted Customer ID is the same
if (CustomerState.getCustomer() != null && CustomerState.getCustomer().getId().equals(dbCustomer.getId())) {
CustomerState.setCustomer(event.getCustomer());
}
}
}
} | 1no label
| core_broadleaf-profile-web_src_main_java_org_broadleafcommerce_profile_web_core_CustomerStateRefresher.java |
1,095 | public abstract class AbstractOrderItemRequest {
protected Sku sku;
protected Category category;
protected Product product;
protected Order order;
protected int quantity;
protected Money salePriceOverride;
protected Money retailPriceOverride;
protected PersonalMessage personalMessage;
protected Map<String,String> itemAttributes = new HashMap<String,String>();
public Sku getSku() {
return sku;
}
public void setSku(Sku sku) {
this.sku = sku;
}
public Category getCategory() {
return category;
}
public void setCategory(Category category) {
this.category = category;
}
public Product getProduct() {
return product;
}
public void setProduct(Product product) {
this.product = product;
}
public void setOrder(Order order) {
this.order = order;
}
public Order getOrder() {
return order;
}
public int getQuantity() {
return quantity;
}
public void setQuantity(int quantity) {
this.quantity = quantity;
}
public Map<String, String> getItemAttributes() {
return itemAttributes;
}
public void setItemAttributes(Map<String, String> itemAttributes) {
this.itemAttributes = itemAttributes;
}
public Money getSalePriceOverride() {
return salePriceOverride;
}
public void setSalePriceOverride(Money salePriceOverride) {
this.salePriceOverride = salePriceOverride;
}
public Money getRetailPriceOverride() {
return retailPriceOverride;
}
public void setRetailPriceOverride(Money retailPriceOverride) {
this.retailPriceOverride = retailPriceOverride;
}
protected void copyProperties(AbstractOrderItemRequest newRequest) {
newRequest.setCategory(category);
newRequest.setItemAttributes(itemAttributes);
newRequest.setPersonalMessage(personalMessage);
newRequest.setProduct(product);
newRequest.setQuantity(quantity);
newRequest.setSku(sku);
newRequest.setOrder(order);
newRequest.setSalePriceOverride(salePriceOverride);
newRequest.setRetailPriceOverride(retailPriceOverride);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof AbstractOrderItemRequest)) return false;
AbstractOrderItemRequest that = (AbstractOrderItemRequest) o;
if (quantity != that.quantity) return false;
if (category != null ? !category.equals(that.category) : that.category != null) return false;
if (product != null ? !product.equals(that.product) : that.product != null) return false;
if (salePriceOverride != null ? !salePriceOverride.equals(that.salePriceOverride) : that.salePriceOverride != null)
return false;
if (sku != null ? !sku.equals(that.sku) : that.sku != null) return false;
if (order != null ? !order.equals(that.order) : that.order != null) return false;
return true;
}
@Override
public int hashCode() {
int result = sku != null ? sku.hashCode() : 0;
result = 31 * result + (category != null ? category.hashCode() : 0);
result = 31 * result + (product != null ? product.hashCode() : 0);
result = 31 * result + (order != null ? order.hashCode() : 0);
result = 31 * result + quantity;
result = 31 * result + (salePriceOverride != null ? salePriceOverride.hashCode() : 0);
return result;
}
public PersonalMessage getPersonalMessage() {
return personalMessage;
}
public void setPersonalMessage(PersonalMessage personalMessage) {
this.personalMessage = personalMessage;
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_call_AbstractOrderItemRequest.java |
127 | public class ClientResponse implements IdentifiedDataSerializable {
private Data response;
private int callId;
private boolean isEvent;
private boolean isError;
public ClientResponse() {
}
public ClientResponse(Data response, boolean isError, int callId) {
this.response = response;
this.isError = isError;
this.callId = callId;
}
public ClientResponse(Data response, int callId, boolean isEvent) {
this.response = response;
this.callId = callId;
this.isEvent = isEvent;
}
public Data getResponse() {
return response;
}
public int getCallId() {
return callId;
}
public boolean isEvent() {
return isEvent;
}
public boolean isError() {
return isError;
}
@Override
public int getFactoryId() {
return ClientDataSerializerHook.ID;
}
@Override
public int getId() {
return ClientDataSerializerHook.CLIENT_RESPONSE;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeInt(callId);
out.writeBoolean(isEvent);
out.writeBoolean(isError);
response.writeData(out);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
callId = in.readInt();
isEvent = in.readBoolean();
isError = in.readBoolean();
response = new Data();
response.readData(in);
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_client_ClientResponse.java |
729 | @Service("blCatalogService")
public class CatalogServiceImpl implements CatalogService {
@Resource(name="blCategoryDao")
protected CategoryDao categoryDao;
@Resource(name="blProductDao")
protected ProductDao productDao;
@Resource(name="blSkuDao")
protected SkuDao skuDao;
@Resource(name="blProductOptionDao")
protected ProductOptionDao productOptionDao;
@Override
public Product findProductById(Long productId) {
return productDao.readProductById(productId);
}
@Override
public List<Product> findProductsByName(String searchName) {
return productDao.readProductsByName(searchName);
}
@Override
public List<Product> findProductsByName(String searchName, int limit, int offset) {
return productDao.readProductsByName(searchName, limit, offset);
}
@Override
public List<Product> findActiveProductsByCategory(Category category) {
return productDao.readActiveProductsByCategory(category.getId());
}
@Override
public List<Product> findFilteredActiveProductsByCategory(Category category, ProductSearchCriteria searchCriteria) {
return productDao.readFilteredActiveProductsByCategory(category.getId(), searchCriteria);
}
@Override
public List<Product> findFilteredActiveProductsByQuery(String query, ProductSearchCriteria searchCriteria) {
return productDao.readFilteredActiveProductsByQuery(query, searchCriteria);
}
@Override
public List<Product> findActiveProductsByCategory(Category category, int limit, int offset) {
return productDao.readActiveProductsByCategory(category.getId(), limit, offset);
}
@Override
@Deprecated
public List<Product> findActiveProductsByCategory(Category category, Date currentDate) {
return productDao.readActiveProductsByCategory(category.getId(), currentDate);
}
@Override
@Deprecated
public List<Product> findFilteredActiveProductsByCategory(Category category, Date currentDate, ProductSearchCriteria searchCriteria) {
return productDao.readFilteredActiveProductsByCategory(category.getId(), currentDate, searchCriteria);
}
@Override
@Deprecated
public List<Product> findFilteredActiveProductsByQuery(String query, Date currentDate, ProductSearchCriteria searchCriteria) {
return productDao.readFilteredActiveProductsByQuery(query, currentDate, searchCriteria);
}
@Override
@Deprecated
public List<Product> findActiveProductsByCategory(Category category, Date currentDate, int limit, int offset) {
return productDao.readActiveProductsByCategory(category.getId(), currentDate, limit, offset);
}
@Override
public List<ProductBundle> findAutomaticProductBundles() {
List<ProductBundle> bundles = productDao.readAutomaticProductBundles();
Collections.sort(bundles, new ProductBundleComparator());
return bundles;
}
@Override
@Transactional("blTransactionManager")
public Product saveProduct(Product product) {
return productDao.save(product);
}
@Override
public Category findCategoryById(Long categoryId) {
return categoryDao.readCategoryById(categoryId);
}
@Override
@Deprecated
public Category findCategoryByName(String categoryName) {
return categoryDao.readCategoryByName(categoryName);
}
@Override
public List<Category> findCategoriesByName(String categoryName) {
return categoryDao.readCategoriesByName(categoryName);
}
@Override
public List<Category> findCategoriesByName(String categoryName, int limit, int offset) {
return categoryDao.readCategoriesByName(categoryName, limit, offset);
}
@Override
@Transactional("blTransactionManager")
public Category saveCategory(Category category) {
return categoryDao.save(category);
}
@Override
@Transactional("blTransactionManager")
public void removeCategory(Category category){
categoryDao.delete(category);
}
@Override
public List<Category> findAllCategories() {
return categoryDao.readAllCategories();
}
@Override
public List<Category> findAllCategories(int limit, int offset) {
return categoryDao.readAllCategories(limit, offset);
}
@Override
public List<Category> findAllParentCategories() {
return categoryDao.readAllParentCategories();
}
@Override
public List<Category> findAllSubCategories(Category category) {
return categoryDao.readAllSubCategories(category);
}
@Override
public List<Category> findAllSubCategories(Category category, int limit, int offset) {
return categoryDao.readAllSubCategories(category, limit, offset);
}
@Override
public List<Category> findActiveSubCategoriesByCategory(Category category) {
return categoryDao.readActiveSubCategoriesByCategory(category);
}
@Override
public List<Category> findActiveSubCategoriesByCategory(Category category, int limit, int offset) {
return categoryDao.readActiveSubCategoriesByCategory(category, limit, offset);
}
@Override
public List<Product> findAllProducts() {
return categoryDao.readAllProducts();
}
@Override
public List<Product> findAllProducts(int limit, int offset) {
return categoryDao.readAllProducts(limit, offset);
}
@Override
public List<Sku> findAllSkus() {
return skuDao.readAllSkus();
}
@Override
public Sku findSkuById(Long skuId) {
return skuDao.readSkuById(skuId);
}
@Override
@Transactional("blTransactionManager")
public Sku saveSku(Sku sku) {
return skuDao.save(sku);
}
@Override
@Transactional("blTransactionManager")
public SkuFee saveSkuFee(SkuFee fee) {
return skuDao.saveSkuFee(fee);
}
@Override
public List<Sku> findSkusByIds(List<Long> ids) {
return skuDao.readSkusById(ids);
}
public void setProductDao(ProductDao productDao) {
this.productDao = productDao;
}
public void setSkuDao(SkuDao skuDao) {
this.skuDao = skuDao;
}
@Override
public List<Product> findProductsForCategory(Category category) {
return productDao.readProductsByCategory(category.getId());
}
@Override
public List<Product> findProductsForCategory(Category category, int limit, int offset) {
return productDao.readProductsByCategory(category.getId(), limit, offset);
}
public void setCategoryDao(CategoryDao categoryDao) {
this.categoryDao = categoryDao;
}
@Override
@Deprecated
public Map<String, List<Long>> getChildCategoryURLMapByCategoryId(Long categoryId) {
Category category = findCategoryById(categoryId);
if (category != null) {
return category.getChildCategoryURLMap();
}
return null;
}
@Override
public Category createCategory() {
return categoryDao.create();
}
@Override
public Sku createSku() {
return skuDao.create();
}
@Override
public Product createProduct(ProductType productType) {
return productDao.create(productType);
}
@Override
public List<ProductOption> readAllProductOptions() {
return productOptionDao.readAllProductOptions();
}
@Transactional("blTransactionManager")
public ProductOption saveProductOption(ProductOption option) {
return productOptionDao.saveProductOption(option);
}
@Override
public ProductOption findProductOptionById(Long productOptionId) {
return productOptionDao.readProductOptionById(productOptionId);
}
@Override
public ProductOptionValue findProductOptionValueById(Long productOptionValueId) {
return productOptionDao.readProductOptionValueById(productOptionValueId);
}
@Override
public Category findCategoryByURI(String uri) {
return categoryDao.findCategoryByURI(uri);
}
@Override
public Product findProductByURI(String uri) {
List<Product> products = productDao.findProductByURI(uri);
if (products == null || products.size() == 0) {
return null;
} else if (products.size() == 1) {
return products.get(0);
} else {
// First check for a direct hit on the url
for(Product product : products) {
if (uri.equals(product.getUrl())) {
return product;
}
}
for(Product product : products) {
// Next check for a direct hit on the generated URL.
if (uri.equals(product.getGeneratedUrl())) {
return product;
}
}
// Otherwise, return the first product
return products.get(0);
}
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_service_CatalogServiceImpl.java |
2,808 | public class AnalysisModuleTests extends ElasticsearchTestCase {
private Injector injector;
public AnalysisService getAnalysisService(Settings settings) {
Index index = new Index("test");
Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
injector = new ModulesBuilder().add(
new IndexSettingsModule(index, settings),
new IndexNameModule(index),
new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
.createChildInjector(parentInjector);
return injector.getInstance(AnalysisService.class);
}
@Test
public void testSimpleConfigurationJson() {
Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.json").build();
testSimpleConfiguration(settings);
}
@Test
public void testSimpleConfigurationYaml() {
Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml").build();
testSimpleConfiguration(settings);
}
@Test
public void testDefaultFactoryTokenFilters() {
assertTokenFilter("keyword_repeat", KeywordRepeatFilter.class);
assertTokenFilter("persian_normalization", PersianNormalizationFilter.class);
assertTokenFilter("arabic_normalization", ArabicNormalizationFilter.class);
}
@Test
public void testVersionedAnalyzers() throws Exception {
Settings settings2 = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml")
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build();
AnalysisService analysisService2 = getAnalysisService(settings2);
// indicesanalysisservice always has the current version
IndicesAnalysisService indicesAnalysisService2 = injector.getInstance(IndicesAnalysisService.class);
assertThat(indicesAnalysisService2.analyzer("default"), is(instanceOf(NamedAnalyzer.class)));
NamedAnalyzer defaultNamedAnalyzer = (NamedAnalyzer) indicesAnalysisService2.analyzer("default");
assertThat(defaultNamedAnalyzer.analyzer(), is(instanceOf(StandardAnalyzer.class)));
assertLuceneAnalyzerVersion(Version.CURRENT.luceneVersion, defaultNamedAnalyzer.analyzer());
// analysis service has the expected version
assertThat(analysisService2.analyzer("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
assertLuceneAnalyzerVersion(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("standard").analyzer());
assertLuceneAnalyzerVersion(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("thai").analyzer());
}
// ugly reflection based hack to extract the lucene version from an analyzer
private void assertLuceneAnalyzerVersion(org.apache.lucene.util.Version luceneVersion, Analyzer analyzer) throws Exception {
Field field = analyzer.getClass().getSuperclass().getDeclaredField("matchVersion");
boolean currentAccessible = field.isAccessible();
field.setAccessible(true);
Object obj = field.get(analyzer);
field.setAccessible(currentAccessible);
assertThat(obj, instanceOf(org.apache.lucene.util.Version.class));
org.apache.lucene.util.Version analyzerVersion = (org.apache.lucene.util.Version) obj;
assertThat(analyzerVersion, is(luceneVersion));
}
private void assertTokenFilter(String name, Class clazz) {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(ImmutableSettings.settingsBuilder().build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter(name);
Tokenizer tokenizer = new WhitespaceTokenizer(Version.CURRENT.luceneVersion, new StringReader("foo bar"));
TokenStream stream = tokenFilter.create(tokenizer);
assertThat(stream, instanceOf(clazz));
}
private void testSimpleConfiguration(Settings settings) {
AnalysisService analysisService = getAnalysisService(settings);
Analyzer analyzer = analysisService.analyzer("custom1").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom1 = (CustomAnalyzer) analyzer;
assertThat(custom1.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
assertThat(custom1.tokenFilters().length, equalTo(2));
StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0];
assertThat(stop1.stopWords().size(), equalTo(1));
//assertThat((Iterable<char[]>) stop1.stopWords(), hasItem("test-stop".toCharArray()));
analyzer = analysisService.analyzer("custom2").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom2 = (CustomAnalyzer) analyzer;
// HtmlStripCharFilterFactory html = (HtmlStripCharFilterFactory) custom2.charFilters()[0];
// assertThat(html.readAheadLimit(), equalTo(HTMLStripCharFilter.DEFAULT_READ_AHEAD));
//
// html = (HtmlStripCharFilterFactory) custom2.charFilters()[1];
// assertThat(html.readAheadLimit(), equalTo(1024));
// verify characters mapping
analyzer = analysisService.analyzer("custom5").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom5 = (CustomAnalyzer) analyzer;
assertThat(custom5.charFilters()[0], instanceOf(MappingCharFilterFactory.class));
// verify aliases
analyzer = analysisService.analyzer("alias1").analyzer();
assertThat(analyzer, instanceOf(StandardAnalyzer.class));
// check custom pattern replace filter
analyzer = analysisService.analyzer("custom3").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
PatternReplaceCharFilterFactory patternReplaceCharFilterFactory = (PatternReplaceCharFilterFactory) custom3.charFilters()[0];
assertThat(patternReplaceCharFilterFactory.getPattern().pattern(), equalTo("sample(.*)"));
assertThat(patternReplaceCharFilterFactory.getReplacement(), equalTo("replacedSample $1"));
// check custom class name (my)
analyzer = analysisService.analyzer("custom4").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom4 = (CustomAnalyzer) analyzer;
assertThat(custom4.tokenFilters()[0], instanceOf(MyFilterTokenFilterFactory.class));
// // verify Czech stemmer
// analyzer = analysisService.analyzer("czechAnalyzerWithStemmer").analyzer();
// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
// CustomAnalyzer czechstemmeranalyzer = (CustomAnalyzer) analyzer;
// assertThat(czechstemmeranalyzer.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
// assertThat(czechstemmeranalyzer.tokenFilters().length, equalTo(4));
// assertThat(czechstemmeranalyzer.tokenFilters()[3], instanceOf(CzechStemTokenFilterFactory.class));
//
// // check dictionary decompounder
// analyzer = analysisService.analyzer("decompoundingAnalyzer").analyzer();
// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
// CustomAnalyzer dictionaryDecompounderAnalyze = (CustomAnalyzer) analyzer;
// assertThat(dictionaryDecompounderAnalyze.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
// assertThat(dictionaryDecompounderAnalyze.tokenFilters().length, equalTo(1));
// assertThat(dictionaryDecompounderAnalyze.tokenFilters()[0], instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
Set<?> wordList = Analysis.getWordSet(null, settings, "index.analysis.filter.dict_dec.word_list", Lucene.VERSION);
MatcherAssert.assertThat(wordList.size(), equalTo(6));
// MatcherAssert.assertThat(wordList, hasItems("donau", "dampf", "schiff", "spargel", "creme", "suppe"));
}
@Test
public void testWordListPath() throws Exception {
Environment env = new Environment(ImmutableSettings.Builder.EMPTY_SETTINGS);
String[] words = new String[]{"donau", "dampf", "schiff", "spargel", "creme", "suppe"};
File wordListFile = generateWordList(words);
Settings settings = settingsBuilder().loadFromSource("index: \n word_list_path: " + wordListFile.getAbsolutePath()).build();
Set<?> wordList = Analysis.getWordSet(env, settings, "index.word_list", Lucene.VERSION);
MatcherAssert.assertThat(wordList.size(), equalTo(6));
// MatcherAssert.assertThat(wordList, hasItems(words));
}
private File generateWordList(String[] words) throws Exception {
File wordListFile = File.createTempFile("wordlist", ".txt");
wordListFile.deleteOnExit();
BufferedWriter writer = null;
try {
writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(wordListFile), Charsets.UTF_8));
for (String word : words) {
writer.write(word);
writer.write('\n');
}
} finally {
if (writer != null) {
writer.close();
}
}
return wordListFile;
}
} | 0true
| src_test_java_org_elasticsearch_index_analysis_AnalysisModuleTests.java |
1,089 | public class OSQLPredicate extends OBaseParser implements OCommandPredicate {
protected Set<OProperty> properties = new HashSet<OProperty>();
protected OSQLFilterCondition rootCondition;
protected List<String> recordTransformed;
protected List<OSQLFilterItemParameter> parameterItems;
protected int braces;
protected OCommandContext context;
public OSQLPredicate() {
}
public OSQLPredicate(final String iText) {
text(iText);
}
protected void throwSyntaxErrorException(final String iText) {
final String syntax = getSyntax();
if (syntax.equals("?"))
throw new OCommandSQLParsingException(iText, parserText, parserGetPreviousPosition());
throw new OCommandSQLParsingException(iText + ". Use " + syntax, parserText, parserGetPreviousPosition());
}
public OSQLPredicate text(final String iText) {
if (iText == null)
throw new OCommandSQLParsingException("Query text is null");
try {
parserText = iText;
parserTextUpperCase = parserText.toUpperCase(Locale.ENGLISH);
parserSetCurrentPosition(0);
parserSkipWhiteSpaces();
rootCondition = (OSQLFilterCondition) extractConditions(null);
optimize();
} catch (OQueryParsingException e) {
if (e.getText() == null)
// QUERY EXCEPTION BUT WITHOUT TEXT: NEST IT
throw new OQueryParsingException("Error on parsing query", parserText, parserGetCurrentPosition(), e);
throw e;
} catch (Throwable t) {
throw new OQueryParsingException("Error on parsing query", parserText, parserGetCurrentPosition(), t);
}
return this;
}
public Object evaluate() {
return evaluate(null, null, null);
}
public Object evaluate(final OCommandContext iContext) {
return evaluate(null, null, iContext);
}
public Object evaluate(final ORecord<?> iRecord, ODocument iCurrentResult, final OCommandContext iContext) {
if (rootCondition == null)
return true;
return rootCondition.evaluate((ORecordSchemaAware<?>) iRecord, iCurrentResult, iContext);
}
private Object extractConditions(final OSQLFilterCondition iParentCondition) {
final int oldPosition = parserGetCurrentPosition();
parserNextWord(true, " )=><,\r\n");
final String word = parserGetLastWord();
if (word.length() > 0 && (word.equalsIgnoreCase("SELECT") || word.equalsIgnoreCase("TRAVERSE"))) {
// SUB QUERY
final StringBuilder embedded = new StringBuilder();
OStringSerializerHelper.getEmbedded(parserText, oldPosition - 1, -1, embedded);
parserSetCurrentPosition(oldPosition + embedded.length() + 1);
return new OSQLSynchQuery<Object>(embedded.toString());
}
parserSetCurrentPosition(oldPosition);
OSQLFilterCondition currentCondition = extractCondition();
// CHECK IF THERE IS ANOTHER CONDITION ON RIGHT
while (parserSkipWhiteSpaces()) {
if (!parserIsEnded() && parserGetCurrentChar() == ')')
return currentCondition;
final OQueryOperator nextOperator = extractConditionOperator();
if (nextOperator == null)
return currentCondition;
if (nextOperator.precedence > currentCondition.getOperator().precedence) {
// SWAP ITEMS
final OSQLFilterCondition subCondition = new OSQLFilterCondition(currentCondition.right, nextOperator);
currentCondition.right = subCondition;
subCondition.right = extractConditionItem(false, 1);
} else {
final OSQLFilterCondition parentCondition = new OSQLFilterCondition(currentCondition, nextOperator);
parentCondition.right = extractConditions(parentCondition);
currentCondition = parentCondition;
}
}
// END OF TEXT
return currentCondition;
}
protected OSQLFilterCondition extractCondition() {
if (!parserSkipWhiteSpaces())
// END OF TEXT
return null;
// EXTRACT ITEMS
Object left = extractConditionItem(true, 1);
if (left != null && checkForEnd(left.toString()))
return null;
OQueryOperator oper;
final Object right;
if (left instanceof OQueryOperator && ((OQueryOperator) left).isUnary()) {
oper = (OQueryOperator) left;
left = extractConditionItem(false, 1);
right = null;
} else {
oper = extractConditionOperator();
if (oper instanceof OQueryOperatorNot)
// SPECIAL CASE: READ NEXT OPERATOR
oper = new OQueryOperatorNot(extractConditionOperator());
right = oper != null ? extractConditionItem(false, oper.expectedRightWords) : null;
}
// CREATE THE CONDITION OBJECT
return new OSQLFilterCondition(left, oper, right);
}
protected boolean checkForEnd(final String iWord) {
if (iWord != null
&& (iWord.equals(OCommandExecutorSQLSelect.KEYWORD_ORDER) || iWord.equals(OCommandExecutorSQLSelect.KEYWORD_LIMIT) || iWord
.equals(OCommandExecutorSQLSelect.KEYWORD_SKIP))) {
parserMoveCurrentPosition(iWord.length() * -1);
return true;
}
return false;
}
private OQueryOperator extractConditionOperator() {
if (!parserSkipWhiteSpaces())
// END OF PARSING: JUST RETURN
return null;
if (parserGetCurrentChar() == ')')
// FOUND ')': JUST RETURN
return null;
final OQueryOperator[] operators = OSQLEngine.getInstance().getRecordOperators();
final String[] candidateOperators = new String[operators.length];
for (int i = 0; i < candidateOperators.length; ++i)
candidateOperators[i] = operators[i].keyword;
final int operatorPos = parserNextChars(true, false, candidateOperators);
if (operatorPos == -1) {
parserGoBack();
return null;
}
final OQueryOperator op = operators[operatorPos];
if (op.expectsParameters) {
// PARSE PARAMETERS IF ANY
parserGoBack();
parserNextWord(true, " 0123456789'\"");
final String word = parserGetLastWord();
final List<String> params = new ArrayList<String>();
// CHECK FOR PARAMETERS
if (word.length() > op.keyword.length() && word.charAt(op.keyword.length()) == OStringSerializerHelper.EMBEDDED_BEGIN) {
int paramBeginPos = parserGetCurrentPosition() - (word.length() - op.keyword.length());
parserSetCurrentPosition(OStringSerializerHelper.getParameters(parserText, paramBeginPos, -1, params));
} else if (!word.equals(op.keyword))
throw new OQueryParsingException("Malformed usage of operator '" + op.toString() + "'. Parsed operator is: " + word);
try {
// CONFIGURE COULD INSTANTIATE A NEW OBJECT: ACT AS A FACTORY
return op.configure(params);
} catch (Exception e) {
throw new OQueryParsingException("Syntax error using the operator '" + op.toString() + "'. Syntax is: " + op.getSyntax());
}
} else
parserMoveCurrentPosition(+1);
return op;
}
private Object extractConditionItem(final boolean iAllowOperator, final int iExpectedWords) {
final Object[] result = new Object[iExpectedWords];
for (int i = 0; i < iExpectedWords; ++i) {
parserNextWord(false, " =><,\r\n");
String word = parserGetLastWord();
if (word.length() == 0)
break;
final String uWord = word.toUpperCase();
final int lastPosition = parserIsEnded() ? parserText.length() : parserGetCurrentPosition();
if (word.length() > 0 && word.charAt(0) == OStringSerializerHelper.EMBEDDED_BEGIN) {
braces++;
// SUB-CONDITION
parserSetCurrentPosition(lastPosition - word.length() + 1);
final Object subCondition = extractConditions(null);
if (!parserSkipWhiteSpaces() || parserGetCurrentChar() == ')') {
braces--;
parserMoveCurrentPosition(+1);
}
result[i] = subCondition;
} else if (word.charAt(0) == OStringSerializerHelper.LIST_BEGIN) {
// COLLECTION OF ELEMENTS
parserSetCurrentPosition(lastPosition - word.length());
final List<String> stringItems = new ArrayList<String>();
parserSetCurrentPosition(OStringSerializerHelper.getCollection(parserText, parserGetCurrentPosition(), stringItems));
result[i] = convertCollectionItems(stringItems);
parserMoveCurrentPosition(+1);
} else if (uWord.startsWith(OSQLFilterItemFieldAll.NAME + OStringSerializerHelper.EMBEDDED_BEGIN)) {
result[i] = new OSQLFilterItemFieldAll(this, word);
} else if (uWord.startsWith(OSQLFilterItemFieldAny.NAME + OStringSerializerHelper.EMBEDDED_BEGIN)) {
result[i] = new OSQLFilterItemFieldAny(this, word);
} else {
if (uWord.equals("NOT")) {
if (iAllowOperator)
return new OQueryOperatorNot();
else {
// GET THE NEXT VALUE
parserNextWord(false, " )=><,\r\n");
final String nextWord = parserGetLastWord();
if (nextWord.length() > 0) {
word += " " + nextWord;
if (word.endsWith(")"))
word = word.substring(0, word.length() - 1);
}
}
} else if (uWord.equals("AND"))
// SPECIAL CASE IN "BETWEEN X AND Y"
result[i] = word;
while (word.endsWith(")")) {
final int openParenthesis = word.indexOf('(');
if (openParenthesis == -1) {
// DISCARD END PARENTHESIS
word = word.substring(0, word.length() - 1);
parserMoveCurrentPosition(-1);
} else
break;
}
result[i] = OSQLHelper.parseValue(this, this, word, context);
}
}
return iExpectedWords == 1 ? result[0] : result;
}
private List<Object> convertCollectionItems(List<String> stringItems) {
List<Object> coll = new ArrayList<Object>();
for (String s : stringItems) {
coll.add(OSQLHelper.parseValue(this, this, s, context));
}
return coll;
}
public OSQLFilterCondition getRootCondition() {
return rootCondition;
}
@Override
public String toString() {
if (rootCondition != null)
return "Parsed: " + rootCondition.toString();
return "Unparsed: " + parserText;
}
/**
* Binds parameters.
*
* @param iArgs
*/
public void bindParameters(final Map<Object, Object> iArgs) {
if (parameterItems == null || iArgs == null || iArgs.size() == 0)
return;
for (Entry<Object, Object> entry : iArgs.entrySet()) {
if (entry.getKey() instanceof Integer)
parameterItems.get(((Integer) entry.getKey())).setValue(entry.setValue(entry.getValue()));
else {
String paramName = entry.getKey().toString();
for (OSQLFilterItemParameter value : parameterItems) {
if (value.getName().equalsIgnoreCase(paramName)) {
value.setValue(entry.getValue());
break;
}
}
}
}
}
public OSQLFilterItemParameter addParameter(final String iName) {
final String name;
if (iName.charAt(0) == OStringSerializerHelper.PARAMETER_NAMED) {
name = iName.substring(1);
// CHECK THE PARAMETER NAME IS CORRECT
if (!OStringSerializerHelper.isAlphanumeric(name)) {
throw new OQueryParsingException("Parameter name '" + name + "' is invalid, only alphanumeric characters are allowed");
}
} else
name = iName;
final OSQLFilterItemParameter param = new OSQLFilterItemParameter(name);
if (parameterItems == null)
parameterItems = new ArrayList<OSQLFilterItemParameter>();
parameterItems.add(param);
return param;
}
public void setRootCondition(final OSQLFilterCondition iCondition) {
rootCondition = iCondition;
}
protected void optimize() {
if (rootCondition != null)
computePrefetchFieldList(rootCondition, new HashSet<String>());
}
protected Set<String> computePrefetchFieldList(final OSQLFilterCondition iCondition, final Set<String> iFields) {
Object left = iCondition.getLeft();
Object right = iCondition.getRight();
if (left instanceof OSQLFilterItemField) {
((OSQLFilterItemField) left).setPreLoadedFields(iFields);
iFields.add(((OSQLFilterItemField) left).getRoot());
} else if (left instanceof OSQLFilterCondition)
computePrefetchFieldList((OSQLFilterCondition) left, iFields);
if (right instanceof OSQLFilterItemField) {
((OSQLFilterItemField) right).setPreLoadedFields(iFields);
iFields.add(((OSQLFilterItemField) right).getRoot());
} else if (right instanceof OSQLFilterCondition)
computePrefetchFieldList((OSQLFilterCondition) right, iFields);
return iFields;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_sql_filter_OSQLPredicate.java |
327 | public class MergeException extends Exception {
private static final long serialVersionUID = 1L;
public MergeException() {
super();
}
public MergeException(String arg0, Throwable arg1) {
super(arg0, arg1);
}
public MergeException(String arg0) {
super(arg0);
}
public MergeException(Throwable arg0) {
super(arg0);
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_exceptions_MergeException.java |
878 | private class OIterateBlockIterator implements Iterator<Object> {
private int p = 0;
public boolean hasNext() {
return p < objects.length;
}
public Object next() {
if (p < objects.length) {
final Object value = objects[p++];
context.setVariable(variableName, value);
return value;
} else {
throw new NoSuchElementException();
}
}
public void remove() {
throw new UnsupportedOperationException();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_processor_block_OIterateBlock.java |
1,186 | public class OQueryOperatorMajor extends OQueryOperatorEqualityNotNulls {
public OQueryOperatorMajor() {
super(">", 5, false);
}
@Override
@SuppressWarnings("unchecked")
protected boolean evaluateExpression(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft,
final Object iRight, OCommandContext iContext) {
final Object right = OType.convert(iRight, iLeft.getClass());
if (right == null)
return false;
return ((Comparable<Object>) iLeft).compareTo(right) > 0;
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
if (iRight == null || iLeft == null)
return OIndexReuseType.NO_INDEX;
return OIndexReuseType.INDEX_METHOD;
}
@Override
public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType,
List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) {
final OIndexDefinition indexDefinition = index.getDefinition();
final OIndexInternal<?> internalIndex = index.getInternal();
if (!internalIndex.canBeUsedInEqualityOperators() || !internalIndex.hasRangeQuerySupport())
return null;
final Object result;
if (indexDefinition.getParamCount() == 1) {
final Object key;
if (indexDefinition instanceof OIndexDefinitionMultiValue)
key = ((OIndexDefinitionMultiValue) indexDefinition).createSingleValue(keyParams.get(0));
else
key = indexDefinition.createValue(keyParams);
if (key == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(key, false, null, false, fetchLimit);
else if (resultListener != null) {
index.getValuesMajor(key, false, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesMajor(key, false);
} else {
// if we have situation like "field1 = 1 AND field2 > 2"
// then we fetch collection which left not included boundary is the smallest composite key in the
// index that contains keys with values field1=1 and field2=2 and which right included boundary
// is the biggest composite key in the index that contains key with value field1=1.
final OCompositeIndexDefinition compositeIndexDefinition = (OCompositeIndexDefinition) indexDefinition;
final Object keyOne = compositeIndexDefinition.createSingleValue(keyParams);
if (keyOne == null)
return null;
final Object keyTwo = compositeIndexDefinition.createSingleValue(keyParams.subList(0, keyParams.size() - 1));
if (keyTwo == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(keyOne, false, keyTwo, true, fetchLimit);
else if (resultListener != null) {
index.getValuesBetween(keyOne, false, keyTwo, true, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesBetween(keyOne, false, keyTwo, true);
}
updateProfiler(iContext, index, keyParams, indexDefinition);
return result;
}
@Override
public ORID getBeginRidRange(final Object iLeft, final Object iRight) {
if (iLeft instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot()))
if (iRight instanceof ORID)
return new ORecordId(((ORID) iRight).next());
else {
if (iRight instanceof OSQLFilterItemParameter && ((OSQLFilterItemParameter) iRight).getValue(null, null) instanceof ORID)
return new ORecordId(((ORID) ((OSQLFilterItemParameter) iRight).getValue(null, null)).next());
}
return null;
}
@Override
public ORID getEndRidRange(Object iLeft, Object iRight) {
return null;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorMajor.java |
192 | protected static abstract class Run {
/** The length of this run. */
protected int length;
public Run() {
init();
}
/**
* Returns <code>true</code> if this run consumes <code>ch</code>,
* <code>false</code> otherwise. If <code>true</code> is returned,
* the length of the receiver is adjusted accordingly.
*
* @param ch the character to test
* @return <code>true</code> if <code>ch</code> was consumed
*/
protected boolean consume(char ch) {
if (isValid(ch)) {
length++;
return true;
}
return false;
}
/**
* Whether this run accepts that character; does not update state. Called
* from the default implementation of <code>consume</code>.
*
* @param ch the character to test
* @return <code>true</code> if <code>ch</code> is accepted
*/
protected abstract boolean isValid(char ch);
/**
* Resets this run to the initial state.
*/
protected void init() {
length= 0;
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_CeylonBreakIterator.java |
395 | public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
public ClusterSearchShardsRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterSearchShardsRequest());
}
/**
* Sets the indices the search will be executed on.
*/
public ClusterSearchShardsRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
*/
public ClusterSearchShardsRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public ClusterSearchShardsRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public ClusterSearchShardsRequestBuilder setRouting(String... routing) {
request.routing(routing);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public ClusterSearchShardsRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* Specifies what type of requested indices to ignore and how to deal indices wildcard expressions.
* For example indices that don't exist.
*/
public ClusterSearchShardsRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return this;
}
@Override
protected void doExecute(ActionListener<ClusterSearchShardsResponse> listener) {
((ClusterAdminClient) client).searchShards(request, listener);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_shards_ClusterSearchShardsRequestBuilder.java |
654 | watchDog = new OMemoryWatchDog.Listener() {
public void memoryUsageLow(final long iFreeMemory, final long iFreeMemoryPercentage) {
map.setOptimization(iFreeMemoryPercentage < 10 ? 2 : 1);
}
}; | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_engine_OMVRBTreeIndexEngine.java |
1,362 | @ClusterScope(scope=Scope.TEST, numNodes=0)
public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
@Test
public void testSimpleAwareness() throws Exception {
Settings commonSettings = ImmutableSettings.settingsBuilder()
.put("cluster.routing.schedule", "10ms")
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
.build();
logger.info("--> starting 2 nodes on the same rack");
cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build());
cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build());
createIndex("test1");
createIndex("test2");
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
logger.info("--> starting 1 node on a different rack");
String node3 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_2").build());
long start = System.currentTimeMillis();
ObjectIntOpenHashMap<String> counts;
// On slow machines the initial relocation might be delayed
do {
Thread.sleep(100);
logger.info("--> waiting for no relocation");
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
logger.info("--> checking current state");
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
//System.out.println(clusterState.routingTable().prettyPrint());
// verify that we have 10 shards on node3
counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
} while (counts.get(node3) != 10 && (System.currentTimeMillis() - start) < 10000);
assertThat(counts.get(node3), equalTo(10));
}
@Test
@Slow
public void testAwarenessZones() throws InterruptedException {
Settings commonSettings = ImmutableSettings.settingsBuilder()
.put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
.put("cluster.routing.allocation.awareness.attributes", "zone")
.build();
logger.info("--> starting 6 nodes on different zones");
String A_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
String B_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
String B_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
String A_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder().put("index.number_of_shards", 5)
.put("index.number_of_replicas", 1)).execute().actionGet();
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
assertThat(counts.get(A_1), anyOf(equalTo(2),equalTo(3)));
assertThat(counts.get(B_1), anyOf(equalTo(2),equalTo(3)));
assertThat(counts.get(A_0), anyOf(equalTo(2),equalTo(3)));
assertThat(counts.get(B_0), anyOf(equalTo(2),equalTo(3)));
}
@Test
@Slow
public void testAwarenessZonesIncrementalNodes() throws InterruptedException {
Settings commonSettings = ImmutableSettings.settingsBuilder()
.put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
.put("cluster.routing.allocation.awareness.attributes", "zone")
.build();
logger.info("--> starting 2 nodes on zones 'a' & 'b'");
String A_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "a").build());
String B_0 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder().put("index.number_of_shards", 5)
.put("index.number_of_replicas", 1)).execute().actionGet();
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
assertThat(counts.get(A_0), equalTo(5));
assertThat(counts.get(B_0), equalTo(5));
logger.info("--> starting another node in zone 'b'");
String B_1 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
client().admin().cluster().prepareReroute().get();
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
assertThat(counts.get(A_0), equalTo(5));
assertThat(counts.get(B_0), equalTo(3));
assertThat(counts.get(B_1), equalTo(2));
String noZoneNode = cluster().startNode();
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
client().admin().cluster().prepareReroute().get();
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
assertThat(counts.get(A_0), equalTo(5));
assertThat(counts.get(B_0), equalTo(3));
assertThat(counts.get(B_1), equalTo(2));
assertThat(counts.containsKey(noZoneNode), equalTo(false));
client().admin().cluster().prepareUpdateSettings().setTransientSettings(ImmutableSettings.settingsBuilder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
assertThat(counts.get(A_0), equalTo(3));
assertThat(counts.get(B_0), equalTo(3));
assertThat(counts.get(B_1), equalTo(2));
assertThat(counts.get(noZoneNode), equalTo(2));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_allocation_AwarenessAllocationTests.java |
3,622 | public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrationTest {
@ParametersFactory
public static Iterable<Object[]> buildParameters() {
List<Object[]> parameters = new ArrayList<Object[]>();
for (boolean storeCountedFields : new boolean[] { true, false }) {
for (boolean loadCountedFields : new boolean[] { true, false }) {
parameters.add(new Object[] { storeCountedFields, loadCountedFields });
}
}
return parameters;
}
private final boolean storeCountedFields;
private final boolean loadCountedFields;
public TokenCountFieldMapperIntegrationTests(@Name("storeCountedFields") boolean storeCountedFields,
@Name("loadCountedFields") boolean loadCountedFields) {
this.storeCountedFields = storeCountedFields;
this.loadCountedFields = loadCountedFields;
}
/**
* It is possible to get the token count in a search response.
*/
@Test
public void searchReturnsTokenCount() throws ElasticsearchException, IOException {
init();
assertSearchReturns(searchById("single"), "single");
assertSearchReturns(searchById("bulk1"), "bulk1");
assertSearchReturns(searchById("bulk2"), "bulk2");
assertSearchReturns(searchById("multi"), "multi");
assertSearchReturns(searchById("multibulk1"), "multibulk1");
assertSearchReturns(searchById("multibulk2"), "multibulk2");
}
/**
* It is possible to search by token count.
*/
@Test
public void searchByTokenCount() throws ElasticsearchException, IOException {
init();
assertSearchReturns(searchByNumericRange(4, 4).get(), "single");
assertSearchReturns(searchByNumericRange(10, 10).get(), "multibulk2");
assertSearchReturns(searchByNumericRange(7, 10).get(), "multi", "multibulk1", "multibulk2");
assertSearchReturns(searchByNumericRange(1, 10).get(), "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
assertSearchReturns(searchByNumericRange(12, 12).get());
}
/**
* It is possible to search by token count.
*/
@Test
public void facetByTokenCount() throws ElasticsearchException, IOException {
init();
String facetField = randomFrom(ImmutableList.of(
"foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values"));
SearchResponse result = searchByNumericRange(1, 10)
.addFacet(new TermsFacetBuilder("facet").field(facetField)).get();
assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
assertThat(result.getFacets().facets().size(), equalTo(1));
TermsFacet facet = (TermsFacet) result.getFacets().facets().get(0);
assertThat(facet.getEntries().size(), equalTo(9));
}
private void init() throws ElasticsearchException, IOException {
prepareCreate("test").addMapping("test", jsonBuilder().startObject()
.startObject("test")
.startObject("properties")
.startObject("foo")
.field("type", "multi_field")
.startObject("fields")
.startObject("foo")
.field("type", "string")
.field("store", storeCountedFields)
.field("analyzer", "simple")
.endObject()
.startObject("token_count")
.field("type", "token_count")
.field("analyzer", "standard")
.field("store", true)
.endObject()
.startObject("token_count_unstored")
.field("type", "token_count")
.field("analyzer", "standard")
.endObject()
.startObject("token_count_with_doc_values")
.field("type", "token_count")
.field("analyzer", "standard")
.startObject("fielddata")
.field("format", LuceneTestCase.defaultCodecSupportsSortedSet() ? "doc_values" : null)
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject().endObject()).get();
ensureGreen();
assertTrue(prepareIndex("single", "I have four terms").get().isCreated());
BulkResponse bulk = client().prepareBulk()
.add(prepareIndex("bulk1", "bulk three terms"))
.add(prepareIndex("bulk2", "this has five bulk terms")).get();
assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
assertTrue(prepareIndex("multi", "two terms", "wow now I have seven lucky terms").get().isCreated());
bulk = client().prepareBulk()
.add(prepareIndex("multibulk1", "one", "oh wow now I have eight unlucky terms"))
.add(prepareIndex("multibulk2", "six is a bunch of terms", "ten! ten terms is just crazy! too many too count!")).get();
assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
assertThat(refresh().getFailedShards(), equalTo(0));
}
private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOException {
return client().prepareIndex("test", "test", id).setSource("foo", texts);
}
private SearchResponse searchById(String id) {
return prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).get();
}
private SearchRequestBuilder searchByNumericRange(int low, int high) {
return prepareSearch().setQuery(QueryBuilders.rangeQuery(randomFrom(
ImmutableList.of("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")
)).gte(low).lte(high));
}
private SearchRequestBuilder prepareSearch() {
SearchRequestBuilder request = client().prepareSearch("test").setTypes("test");
request.addField("foo.token_count");
if (loadCountedFields) {
request.addField("foo");
}
return request;
}
private void assertSearchReturns(SearchResponse result, String... ids) {
assertThat(result.getHits().getTotalHits(), equalTo((long) ids.length));
assertThat(result.getHits().hits().length, equalTo(ids.length));
List<String> foundIds = new ArrayList<String>();
for (SearchHit hit : result.getHits()) {
foundIds.add(hit.id());
}
assertThat(foundIds, containsInAnyOrder(ids));
for (SearchHit hit : result.getHits()) {
String id = hit.id();
if (id.equals("single")) {
assertSearchHit(hit, 4);
} else if (id.equals("bulk1")) {
assertSearchHit(hit, 3);
} else if (id.equals("bulk2")) {
assertSearchHit(hit, 5);
} else if (id.equals("multi")) {
assertSearchHit(hit, 2, 7);
} else if (id.equals("multibulk1")) {
assertSearchHit(hit, 1, 8);
} else if (id.equals("multibulk2")) {
assertSearchHit(hit, 6, 10);
} else {
throw new ElasticsearchException("Unexpected response!");
}
}
}
private void assertSearchHit(SearchHit hit, int... termCounts) {
assertThat(hit.field("foo.token_count"), not(nullValue()));
assertThat(hit.field("foo.token_count").values().size(), equalTo(termCounts.length));
for (int i = 0; i < termCounts.length; i++) {
assertThat((Integer) hit.field("foo.token_count").values().get(i), equalTo(termCounts[i]));
}
if (loadCountedFields && storeCountedFields) {
assertThat(hit.field("foo").values().size(), equalTo(termCounts.length));
}
}
} | 0true
| src_test_java_org_elasticsearch_index_mapper_core_TokenCountFieldMapperIntegrationTests.java |
223 | private static class OffsetsEnum implements Comparable<OffsetsEnum> {
DocsAndPositionsEnum dp;
int pos;
int id;
OffsetsEnum(DocsAndPositionsEnum dp, int id) throws IOException {
this.dp = dp;
this.id = id;
this.pos = 1;
}
@Override
public int compareTo(OffsetsEnum other) {
try {
int off = dp.startOffset();
int otherOff = other.dp.startOffset();
if (off == otherOff) {
return id - other.id;
} else {
return Long.signum(((long)off) - otherOff);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} | 0true
| src_main_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighter.java |
2,084 | public class FileSystemUtils {
private static ESLogger logger = ESLoggerFactory.getLogger(FileSystemUtils.class.getName());
private static final long mkdirsStallTimeout = TimeValue.timeValueMinutes(5).millis();
private static final Object mkdirsMutex = new Object();
private static volatile Thread mkdirsThread;
private static volatile long mkdirsStartTime;
public static boolean mkdirs(File dir) {
synchronized (mkdirsMutex) {
try {
mkdirsThread = Thread.currentThread();
mkdirsStartTime = System.currentTimeMillis();
return dir.mkdirs();
} finally {
mkdirsThread = null;
}
}
}
public static void checkMkdirsStall(long currentTime) {
Thread mkdirsThread1 = mkdirsThread;
long stallTime = currentTime - mkdirsStartTime;
if (mkdirsThread1 != null && (stallTime > mkdirsStallTimeout)) {
logger.error("mkdirs stalled for {} on {}, trying to interrupt", new TimeValue(stallTime), mkdirsThread1.getName());
mkdirsThread1.interrupt(); // try and interrupt it...
}
}
public static int maxOpenFiles(File testDir) {
boolean dirCreated = false;
if (!testDir.exists()) {
dirCreated = true;
testDir.mkdirs();
}
List<RandomAccessFile> files = new ArrayList<RandomAccessFile>();
try {
while (true) {
files.add(new RandomAccessFile(new File(testDir, "tmp" + files.size()), "rw"));
}
} catch (IOException ioe) {
int i = 0;
for (RandomAccessFile raf : files) {
try {
raf.close();
} catch (IOException e) {
// ignore
}
new File(testDir, "tmp" + i++).delete();
}
if (dirCreated) {
deleteRecursively(testDir);
}
}
return files.size();
}
public static boolean hasExtensions(File root, String... extensions) {
if (root != null && root.exists()) {
if (root.isDirectory()) {
File[] children = root.listFiles();
if (children != null) {
for (File child : children) {
if (child.isDirectory()) {
boolean has = hasExtensions(child, extensions);
if (has) {
return true;
}
} else {
for (String extension : extensions) {
if (child.getName().endsWith(extension)) {
return true;
}
}
}
}
}
}
}
return false;
}
/**
* Returns true if at least one of the files exists.
*/
public static boolean exists(File... files) {
for (File file : files) {
if (file.exists()) {
return true;
}
}
return false;
}
public static boolean deleteRecursively(File[] roots) {
boolean deleted = true;
for (File root : roots) {
deleted &= deleteRecursively(root);
}
return deleted;
}
public static boolean deleteRecursively(File root) {
return deleteRecursively(root, true);
}
private static boolean innerDeleteRecursively(File root) {
return deleteRecursively(root, true);
}
/**
* Delete the supplied {@link java.io.File} - for directories,
* recursively delete any nested directories or files as well.
*
* @param root the root <code>File</code> to delete
* @param deleteRoot whether or not to delete the root itself or just the content of the root.
* @return <code>true</code> if the <code>File</code> was deleted,
* otherwise <code>false</code>
*/
public static boolean deleteRecursively(File root, boolean deleteRoot) {
if (root != null && root.exists()) {
if (root.isDirectory()) {
File[] children = root.listFiles();
if (children != null) {
for (File aChildren : children) {
innerDeleteRecursively(aChildren);
}
}
}
if (deleteRoot) {
return root.delete();
} else {
return true;
}
}
return false;
}
public static void syncFile(File fileToSync) throws IOException {
boolean success = false;
int retryCount = 0;
IOException exc = null;
while (!success && retryCount < 5) {
retryCount++;
RandomAccessFile file = null;
try {
try {
file = new RandomAccessFile(fileToSync, "rw");
file.getFD().sync();
success = true;
} finally {
if (file != null)
file.close();
}
} catch (IOException ioe) {
if (exc == null)
exc = ioe;
try {
// Pause 5 msec
Thread.sleep(5);
} catch (InterruptedException ie) {
throw new InterruptedIOException(ie.getMessage());
}
}
}
}
public static void copyFile(File sourceFile, File destinationFile) throws IOException {
FileInputStream sourceIs = null;
FileChannel source = null;
FileOutputStream destinationOs = null;
FileChannel destination = null;
try {
sourceIs = new FileInputStream(sourceFile);
source = sourceIs.getChannel();
destinationOs = new FileOutputStream(destinationFile);
destination = destinationOs.getChannel();
destination.transferFrom(source, 0, source.size());
} finally {
if (source != null) {
source.close();
}
if (sourceIs != null) {
sourceIs.close();
}
if (destination != null) {
destination.close();
}
if (destinationOs != null) {
destinationOs.close();
}
}
}
private FileSystemUtils() {
}
} | 1no label
| src_main_java_org_elasticsearch_common_io_FileSystemUtils.java |
1,301 | public class ClusterState implements ToXContent {
public interface Custom {
interface Factory<T extends Custom> {
String type();
T readFrom(StreamInput in) throws IOException;
void writeTo(T customState, StreamOutput out) throws IOException;
void toXContent(T customState, XContentBuilder builder, ToXContent.Params params);
}
}
public static Map<String, Custom.Factory> customFactories = new HashMap<String, Custom.Factory>();
/**
* Register a custom index meta data factory. Make sure to call it from a static block.
*/
public static void registerFactory(String type, Custom.Factory factory) {
customFactories.put(type, factory);
}
@Nullable
public static <T extends Custom> Custom.Factory<T> lookupFactory(String type) {
return customFactories.get(type);
}
public static <T extends Custom> Custom.Factory<T> lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException {
Custom.Factory<T> factory = customFactories.get(type);
if (factory == null) {
throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]");
}
return factory;
}
private final long version;
private final RoutingTable routingTable;
private final DiscoveryNodes nodes;
private final MetaData metaData;
private final ClusterBlocks blocks;
private final AllocationExplanation allocationExplanation;
private final ImmutableOpenMap<String, Custom> customs;
// built on demand
private volatile RoutingNodes routingNodes;
private SettingsFilter settingsFilter;
public ClusterState(long version, ClusterState state) {
this(version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.allocationExplanation(), state.customs());
}
public ClusterState(long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, AllocationExplanation allocationExplanation, ImmutableOpenMap<String, Custom> customs) {
this.version = version;
this.metaData = metaData;
this.routingTable = routingTable;
this.nodes = nodes;
this.blocks = blocks;
this.allocationExplanation = allocationExplanation;
this.customs = customs;
}
public long version() {
return this.version;
}
public long getVersion() {
return version();
}
public DiscoveryNodes nodes() {
return this.nodes;
}
public DiscoveryNodes getNodes() {
return nodes();
}
public MetaData metaData() {
return this.metaData;
}
public MetaData getMetaData() {
return metaData();
}
public RoutingTable routingTable() {
return routingTable;
}
public RoutingTable getRoutingTable() {
return routingTable();
}
public RoutingNodes routingNodes() {
return routingTable.routingNodes(this);
}
public RoutingNodes getRoutingNodes() {
return readOnlyRoutingNodes();
}
public ClusterBlocks blocks() {
return this.blocks;
}
public ClusterBlocks getBlocks() {
return blocks;
}
public AllocationExplanation allocationExplanation() {
return this.allocationExplanation;
}
public AllocationExplanation getAllocationExplanation() {
return allocationExplanation();
}
public ImmutableOpenMap<String, Custom> customs() {
return this.customs;
}
public ImmutableOpenMap<String, Custom> getCustoms() {
return this.customs;
}
/**
* Returns a built (on demand) routing nodes view of the routing table. <b>NOTE, the routing nodes
* are mutable, use them just for read operations</b>
*/
public RoutingNodes readOnlyRoutingNodes() {
if (routingNodes != null) {
return routingNodes;
}
routingNodes = routingTable.routingNodes(this);
return routingNodes;
}
public ClusterState settingsFilter(SettingsFilter settingsFilter) {
this.settingsFilter = settingsFilter;
return this;
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append(nodes().prettyPrint());
sb.append(routingTable().prettyPrint());
sb.append(readOnlyRoutingNodes().prettyPrint());
return sb.toString();
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
Set<String> metrics = Strings.splitStringByCommaToSet(params.param("metric", "_all"));
boolean isAllMetricsOnly = metrics.size() == 1 && metrics.contains("_all");
if (isAllMetricsOnly || metrics.contains("nodes")) {
builder.field("master_node", nodes().masterNodeId());
}
if (isAllMetricsOnly || metrics.contains("blocks")) {
builder.startObject("blocks");
if (!blocks().global().isEmpty()) {
builder.startObject("global");
for (ClusterBlock block : blocks().global()) {
block.toXContent(builder, params);
}
builder.endObject();
}
if (!blocks().indices().isEmpty()) {
builder.startObject("indices");
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks().indices().entrySet()) {
builder.startObject(entry.getKey());
for (ClusterBlock block : entry.getValue()) {
block.toXContent(builder, params);
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
}
// nodes
if (isAllMetricsOnly || metrics.contains("nodes")) {
builder.startObject("nodes");
for (DiscoveryNode node : nodes()) {
builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("name", node.name());
builder.field("transport_address", node.address().toString());
builder.startObject("attributes");
for (Map.Entry<String, String> attr : node.attributes().entrySet()) {
builder.field(attr.getKey(), attr.getValue());
}
builder.endObject();
builder.endObject();
}
builder.endObject();
}
// meta data
if (isAllMetricsOnly || metrics.contains("metadata")) {
builder.startObject("metadata");
builder.startObject("templates");
for (ObjectCursor<IndexTemplateMetaData> cursor : metaData().templates().values()) {
IndexTemplateMetaData templateMetaData = cursor.value;
builder.startObject(templateMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("template", templateMetaData.template());
builder.field("order", templateMetaData.order());
builder.startObject("settings");
Settings settings = templateMetaData.settings();
if (settingsFilter != null) {
settings = settingsFilter.filterSettings(settings);
}
settings.toXContent(builder, params);
builder.endObject();
builder.startObject("mappings");
for (ObjectObjectCursor<String, CompressedString> cursor1 : templateMetaData.mappings()) {
byte[] mappingSource = cursor1.value.uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor1.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor1.key);
}
builder.field(cursor1.key);
builder.map(mapping);
}
builder.endObject();
builder.endObject();
}
builder.endObject();
builder.startObject("indices");
for (IndexMetaData indexMetaData : metaData()) {
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
builder.startObject("settings");
Settings settings = indexMetaData.settings();
if (settingsFilter != null) {
settings = settingsFilter.filterSettings(settings);
}
settings.toXContent(builder, params);
builder.endObject();
builder.startObject("mappings");
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
byte[] mappingSource = cursor.value.source().uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor.key);
}
builder.field(cursor.key);
builder.map(mapping);
}
builder.endObject();
builder.startArray("aliases");
for (ObjectCursor<String> cursor : indexMetaData.aliases().keys()) {
builder.value(cursor.value);
}
builder.endArray();
builder.endObject();
}
builder.endObject();
for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
builder.startObject(cursor.key);
MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
builder.endObject();
}
// routing table
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startObject("routing_table");
builder.startObject("indices");
for (IndexRoutingTable indexRoutingTable : routingTable()) {
builder.startObject(indexRoutingTable.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject("shards");
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id()));
for (ShardRouting shardRouting : indexShardRoutingTable) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
builder.endObject();
builder.endObject();
}
// routing nodes
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startObject("routing_nodes");
builder.startArray("unassigned");
for (ShardRouting shardRouting : readOnlyRoutingNodes().unassigned()) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
builder.startObject("nodes");
for (RoutingNode routingNode : readOnlyRoutingNodes()) {
builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE);
for (ShardRouting shardRouting : routingNode) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startArray("allocations");
for (Map.Entry<ShardId, List<AllocationExplanation.NodeExplanation>> entry : allocationExplanation().explanations().entrySet()) {
builder.startObject();
builder.field("index", entry.getKey().index().name());
builder.field("shard", entry.getKey().id());
builder.startArray("explanations");
for (AllocationExplanation.NodeExplanation nodeExplanation : entry.getValue()) {
builder.field("desc", nodeExplanation.description());
if (nodeExplanation.node() != null) {
builder.startObject("node");
builder.field("id", nodeExplanation.node().id());
builder.field("name", nodeExplanation.node().name());
builder.endObject();
}
}
builder.endArray();
builder.endObject();
}
builder.endArray();
}
if (isAllMetricsOnly || metrics.contains("customs")) {
for (ObjectObjectCursor<String, Custom> cursor : customs) {
builder.startObject(cursor.key);
lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
}
return builder;
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(ClusterState state) {
return new Builder(state);
}
public static class Builder {
private long version = 0;
private MetaData metaData = MetaData.EMPTY_META_DATA;
private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE;
private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES;
private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK;
private AllocationExplanation allocationExplanation = AllocationExplanation.EMPTY;
private final ImmutableOpenMap.Builder<String, Custom> customs;
public Builder() {
customs = ImmutableOpenMap.builder();
}
public Builder(ClusterState state) {
this.version = state.version();
this.nodes = state.nodes();
this.routingTable = state.routingTable();
this.metaData = state.metaData();
this.blocks = state.blocks();
this.allocationExplanation = state.allocationExplanation();
this.customs = ImmutableOpenMap.builder(state.customs());
}
public Builder nodes(DiscoveryNodes.Builder nodesBuilder) {
return nodes(nodesBuilder.build());
}
public Builder nodes(DiscoveryNodes nodes) {
this.nodes = nodes;
return this;
}
public Builder routingTable(RoutingTable.Builder routingTable) {
return routingTable(routingTable.build());
}
public Builder routingResult(RoutingAllocation.Result routingResult) {
this.routingTable = routingResult.routingTable();
this.allocationExplanation = routingResult.explanation();
return this;
}
public Builder routingTable(RoutingTable routingTable) {
this.routingTable = routingTable;
return this;
}
public Builder metaData(MetaData.Builder metaDataBuilder) {
return metaData(metaDataBuilder.build());
}
public Builder metaData(MetaData metaData) {
this.metaData = metaData;
return this;
}
public Builder blocks(ClusterBlocks.Builder blocksBuilder) {
return blocks(blocksBuilder.build());
}
public Builder blocks(ClusterBlocks block) {
this.blocks = block;
return this;
}
public Builder allocationExplanation(AllocationExplanation allocationExplanation) {
this.allocationExplanation = allocationExplanation;
return this;
}
public Builder version(long version) {
this.version = version;
return this;
}
public Custom getCustom(String type) {
return customs.get(type);
}
public Builder putCustom(String type, Custom custom) {
customs.put(type, custom);
return this;
}
public Builder removeCustom(String type) {
customs.remove(type);
return this;
}
public ClusterState build() {
return new ClusterState(version, metaData, routingTable, nodes, blocks, allocationExplanation, customs.build());
}
public static byte[] toBytes(ClusterState state) throws IOException {
BytesStreamOutput os = new BytesStreamOutput();
writeTo(state, os);
return os.bytes().toBytes();
}
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
return readFrom(new BytesStreamInput(data, false), localNode);
}
public static void writeTo(ClusterState state, StreamOutput out) throws IOException {
out.writeLong(state.version());
MetaData.Builder.writeTo(state.metaData(), out);
RoutingTable.Builder.writeTo(state.routingTable(), out);
DiscoveryNodes.Builder.writeTo(state.nodes(), out);
ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out);
state.allocationExplanation().writeTo(out);
out.writeVInt(state.customs().size());
for (ObjectObjectCursor<String, Custom> cursor : state.customs()) {
out.writeString(cursor.key);
lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
}
}
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
Builder builder = new Builder();
builder.version = in.readLong();
builder.metaData = MetaData.Builder.readFrom(in);
builder.routingTable = RoutingTable.Builder.readFrom(in);
builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode);
builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in);
builder.allocationExplanation = AllocationExplanation.readAllocationExplanation(in);
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
return builder.build();
}
}
} | 1no label
| src_main_java_org_elasticsearch_cluster_ClusterState.java |
82 | public static class Name {
public static final String File_Details = "StaticAssetImpl_FileDetails_Tab";
public static final String Advanced = "StaticAssetImpl_Advanced_Tab";
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_StaticAssetImpl.java |
167 | return executeRead(new Callable<Iterable<RawQuery.Result<String>>>() {
@Override
public Iterable<RawQuery.Result<String>> call() throws Exception {
return indexTx.query(query);
}
@Override
public String toString() {
return "RawQuery";
}
}); | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_BackendTransaction.java |
503 | public class OpenDeclarationAction extends Action {
private static final ImageDescriptor ICON = CeylonPlugin.getInstance().getImageRegistry()
.getDescriptor(CeylonResources.CEYLON_OPEN_DECLARATION);
private final IEditorPart editor;
public OpenDeclarationAction(IEditorPart editor) {
this("Open Ceylon Declaration...", editor);
}
public OpenDeclarationAction(String text, IEditorPart editor) {
super(text);
this.editor = editor;
setActionDefinitionId(PLUGIN_ID + ".action.openDeclaration");
setImageDescriptor(ICON);
}
@Override
public void run() {
Shell shell = getWorkbench().getActiveWorkbenchWindow().getShell();
OpenCeylonDeclarationDialog dialog =
new OpenCeylonDeclarationDialog(shell, editor);
dialog.setTitle("Open Ceylon Declaration");
dialog.setMessage("Select a Ceylon declaration to open:");
if (editor instanceof ITextEditor) {
dialog.setInitialPattern(getSelectionText((ITextEditor) editor));
}
dialog.open();
Object[] types = dialog.getResult();
if (types != null && types.length > 0) {
DeclarationWithProject dwp = (DeclarationWithProject) types[0];
gotoDeclaration(dwp.getDeclaration(), dwp.getProject(), editor);
}
}
} | 1no label
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_open_OpenDeclarationAction.java |
41 | @Component("blDynamicFieldPersistenceHandlerHelper")
public class DynamicFieldPersistenceHandlerHelper {
/**
* Builds all of the metadata for all of the dynamic properties within a {@link StructuredContentType}, gleaned from
* the {@link FieldGroup}s and {@link FieldDefinition}s.
*
* @param fieldGroups groups that the {@link Property}s are built from
* @param inheritedType the value that each built {@link FieldMetadata} for each property will use to notate where the
* dynamic field actually came from (meaning {@link FieldMetadata#setAvailableToTypes(String[])} and {@link FieldMetadata#setInheritedFromType(String)}
* @return
*/
public Property[] buildDynamicPropertyList(List<FieldGroup> fieldGroups, Class<?> inheritedType) {
List<Property> propertiesList = new ArrayList<Property>();
int groupCount = 1;
int fieldCount = 0;
for (FieldGroup group : fieldGroups) {
List<FieldDefinition> definitions = group.getFieldDefinitions();
for (FieldDefinition definition : definitions) {
Property property = new Property();
property.setName(definition.getName());
BasicFieldMetadata fieldMetadata = new BasicFieldMetadata();
property.setMetadata(fieldMetadata);
fieldMetadata.setFieldType(definition.getFieldType());
fieldMetadata.setMutable(true);
fieldMetadata.setInheritedFromType(inheritedType.getName());
fieldMetadata.setAvailableToTypes(new String[] {inheritedType.getName()});
fieldMetadata.setForeignKeyCollection(false);
fieldMetadata.setMergedPropertyType(MergedPropertyType.PRIMARY);
fieldMetadata.setLength(definition.getMaxLength());
if (definition.getFieldEnumeration() != null && !CollectionUtils.isEmpty(definition.getFieldEnumeration().getEnumerationItems())) {
int count = definition.getFieldEnumeration().getEnumerationItems().size();
String[][] enumItems = new String[count][2];
for (int j = 0; j < count; j++) {
FieldEnumerationItem item = definition.getFieldEnumeration().getEnumerationItems().get(j);
enumItems[j][0] = item.getName();
enumItems[j][1] = item.getFriendlyName();
}
fieldMetadata.setEnumerationValues(enumItems);
}
fieldMetadata.setName(definition.getName());
fieldMetadata.setFriendlyName(definition.getFriendlyName());
fieldMetadata.setSecurityLevel(definition.getSecurityLevel()==null?"":definition.getSecurityLevel());
fieldMetadata.setOrder(fieldCount++);
fieldMetadata.setVisibility(definition.getHiddenFlag()?VisibilityEnum.HIDDEN_ALL:VisibilityEnum.VISIBLE_ALL);
fieldMetadata.setGroup(group.getName());
fieldMetadata.setGroupOrder(groupCount);
fieldMetadata.setTab("General");
fieldMetadata.setTabOrder(100);
fieldMetadata.setGroupCollapsed(group.getInitCollapsedFlag());
fieldMetadata.setExplicitFieldType(SupportedFieldType.UNKNOWN);
fieldMetadata.setLargeEntry(definition.getTextAreaFlag());
fieldMetadata.setProminent(false);
fieldMetadata.setColumnWidth(String.valueOf(definition.getColumnWidth()));
fieldMetadata.setBroadleafEnumeration("");
fieldMetadata.setReadOnly(false);
if (definition.getValidationRegEx() != null) {
Map<String, String> itemMap = new HashMap<String, String>();
itemMap.put("regularExpression", definition.getValidationRegEx());
itemMap.put(ConfigurationItem.ERROR_MESSAGE, definition.getValidationErrorMesageKey());
fieldMetadata.getValidationConfigurations().put("org.broadleafcommerce.openadmin.server.service.persistence.validation.RegexPropertyValidator", itemMap);
}
propertiesList.add(property);
}
groupCount++;
fieldCount = 0;
}
Property property = new Property();
property.setName("id");
BasicFieldMetadata fieldMetadata = new BasicFieldMetadata();
property.setMetadata(fieldMetadata);
fieldMetadata.setFieldType(SupportedFieldType.ID);
fieldMetadata.setSecondaryType(SupportedFieldType.INTEGER);
fieldMetadata.setMutable(true);
fieldMetadata.setInheritedFromType(inheritedType.getName());
fieldMetadata.setAvailableToTypes(new String[] {inheritedType.getName()});
fieldMetadata.setForeignKeyCollection(false);
fieldMetadata.setMergedPropertyType(MergedPropertyType.PRIMARY);
fieldMetadata.setName("id");
fieldMetadata.setFriendlyName("ID");
fieldMetadata.setSecurityLevel("");
fieldMetadata.setVisibility(VisibilityEnum.HIDDEN_ALL);
fieldMetadata.setExplicitFieldType(SupportedFieldType.UNKNOWN);
fieldMetadata.setLargeEntry(false);
fieldMetadata.setProminent(false);
fieldMetadata.setColumnWidth("*");
fieldMetadata.setBroadleafEnumeration("");
fieldMetadata.setReadOnly(true);
propertiesList.add(property);
Property[] properties = new Property[propertiesList.size()];
properties = propertiesList.toArray(properties);
Arrays.sort(properties, new Comparator<Property>() {
@Override
public int compare(Property o1, Property o2) {
/*
* First, compare properties based on order fields
*/
if (o1.getMetadata().getOrder() != null && o2.getMetadata().getOrder() != null) {
return o1.getMetadata().getOrder().compareTo(o2.getMetadata().getOrder());
} else if (o1.getMetadata().getOrder() != null && o2.getMetadata().getOrder() == null) {
/*
* Always favor fields that have an order identified
*/
return -1;
} else if (o1.getMetadata().getOrder() == null && o2.getMetadata().getOrder() != null) {
/*
* Always favor fields that have an order identified
*/
return 1;
} else if (o1.getMetadata().getFriendlyName() != null && o2.getMetadata().getFriendlyName() != null) {
return o1.getMetadata().getFriendlyName().compareTo(o2.getMetadata().getFriendlyName());
} else {
return o1.getName().compareTo(o2.getName());
}
}
});
return properties;
}
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_DynamicFieldPersistenceHandlerHelper.java |
2,010 | binder.bind(providerMapKey).toProvider(new ProviderWithDependencies<Map<K, Provider<V>>>() {
private Map<K, Provider<V>> providerMap;
@SuppressWarnings("unused")
@Inject
void initialize() {
RealMapBinder.this.binder = null;
Map<K, Provider<V>> providerMapMutable = new LinkedHashMap<K, Provider<V>>();
for (Entry<K, Provider<V>> entry : entrySetProvider.get()) {
Multibinder.checkConfiguration(providerMapMutable.put(entry.getKey(), entry.getValue()) == null,
"Map injection failed due to duplicated key \"%s\"", entry.getKey());
}
providerMap = Collections.unmodifiableMap(providerMapMutable);
}
public Map<K, Provider<V>> get() {
return providerMap;
}
public Set<Dependency<?>> getDependencies() {
return dependencies;
}
}); | 0true
| src_main_java_org_elasticsearch_common_inject_multibindings_MapBinder.java |
5,262 | public class RangeParser implements Aggregator.Parser {
@Override
public String type() {
return InternalRange.TYPE.name();
}
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
String field = null;
List<RangeAggregator.Range> ranges = null;
String script = null;
String scriptLang = null;
Map<String, Object> scriptParams = null;
boolean keyed = false;
boolean assumeSorted = false;
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("field".equals(currentFieldName)) {
field = parser.text();
} else if ("script".equals(currentFieldName)) {
script = parser.text();
} else if ("lang".equals(currentFieldName)) {
scriptLang = parser.text();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("ranges".equals(currentFieldName)) {
ranges = new ArrayList<RangeAggregator.Range>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
double from = Double.NEGATIVE_INFINITY;
String fromAsStr = null;
double to = Double.POSITIVE_INFINITY;
String toAsStr = null;
String key = null;
String toOrFromOrKey = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
toOrFromOrKey = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NUMBER) {
if ("from".equals(toOrFromOrKey)) {
from = parser.doubleValue();
} else if ("to".equals(toOrFromOrKey)) {
to = parser.doubleValue();
}
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("from".equals(toOrFromOrKey)) {
fromAsStr = parser.text();
} else if ("to".equals(toOrFromOrKey)) {
toAsStr = parser.text();
} else if ("key".equals(toOrFromOrKey)) {
key = parser.text();
}
}
}
ranges.add(new RangeAggregator.Range(key, from, fromAsStr, to, toAsStr));
}
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("params".equals(currentFieldName)) {
scriptParams = parser.map();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
if ("keyed".equals(currentFieldName)) {
keyed = parser.booleanValue();
} else if ("script_values_sorted".equals(currentFieldName)) {
assumeSorted = parser.booleanValue();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else {
throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
}
}
if (ranges == null) {
throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]");
}
if (script != null) {
config.script(context.scriptService().search(context.lookup(), scriptLang, script, scriptParams));
}
if (!assumeSorted) {
// we need values to be sorted and unique for efficiency
config.ensureSorted(true);
}
if (field == null) {
return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
}
FieldMapper<?> mapper = context.smartNameFieldMapper(field);
if (mapper == null) {
config.unmapped(true);
return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
}
IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
config.fieldContext(new FieldContext(field, indexFieldData));
return new RangeAggregator.Factory(aggregationName, config, InternalRange.FACTORY, ranges, keyed);
}
} | 1no label
| src_main_java_org_elasticsearch_search_aggregations_bucket_range_RangeParser.java |
59 | public class TitanFactory {
private static final Logger log =
LoggerFactory.getLogger(TitanFactory.class);
/**
* Opens a {@link TitanGraph} database.
* <p/>
* If the argument points to a configuration file, the configuration file is loaded to configure the Titan graph
* If the string argument is a configuration short-cut, then the short-cut is parsed and used to configure the returned Titan graph.
* <p />
* A configuration short-cut is of the form:
* [STORAGE_BACKEND_NAME]:[DIRECTORY_OR_HOST]
*
* @param shortcutOrFile Configuration file name or configuration short-cut
* @return Titan graph database configured according to the provided configuration
* @see <a href="https://github.com/thinkaurelius/titan/wiki/Graph-Configuration">Graph Configuration Wiki</a>
*/
public static TitanGraph open(String shortcutOrFile) {
return open(getLocalConfiguration(shortcutOrFile));
}
/**
* Opens a {@link TitanGraph} database configured according to the provided configuration.
*
* @param configuration Configuration for the graph database
* @return Titan graph database
* @see <a href="https://github.com/thinkaurelius/titan/wiki/Graph-Configuration">Graph Configuration Wiki</a>
*/
public static TitanGraph open(Configuration configuration) {
return open(new CommonsConfiguration(configuration));
}
/**
* Opens a {@link TitanGraph} database configured according to the provided configuration.
*
* @param configuration Configuration for the graph database
* @return Titan graph database
*/
public static TitanGraph open(BasicConfiguration configuration) {
return open(configuration.getConfiguration());
}
/**
* Opens a {@link TitanGraph} database configured according to the provided configuration.
*
* @param configuration Configuration for the graph database
* @return Titan graph database
*/
public static TitanGraph open(ReadConfiguration configuration) {
return new StandardTitanGraph(new GraphDatabaseConfiguration(configuration));
}
/**
* Returns a {@link Builder} that allows to set the configuration options for opening a Titan graph database.
* <p />
* In the builder, the configuration options for the graph can be set individually. Once all options are configured,
* the graph can be opened with {@link com.thinkaurelius.titan.core.TitanFactory.Builder#open()}.
*
* @return
*/
public static Builder build() {
return new Builder();
}
//--------------------- BUILDER -------------------------------------------
public static class Builder extends UserModifiableConfiguration {
private Builder() {
super(GraphDatabaseConfiguration.buildConfiguration());
}
/**
* Configures the provided configuration path to the given value.
*
* @param path
* @param value
* @return
*/
public Builder set(String path, Object value) {
super.set(path, value);
return this;
}
/**
* Opens a Titan graph with the previously configured options.
*
* @return
*/
public TitanGraph open() {
return TitanFactory.open(super.getConfiguration());
}
}
/**
* Returns a {@link com.thinkaurelius.titan.core.log.LogProcessorFramework} for processing transaction log entries
* against the provided graph instance.
*
* @param graph
* @return
*/
public static LogProcessorFramework openTransactionLog(TitanGraph graph) {
return new StandardLogProcessorFramework((StandardTitanGraph)graph);
}
/**
* Returns a {@link TransactionRecovery} process for recovering partially failed transactions. The recovery process
* will start processing the write-ahead transaction log at the specified transaction time.
*
* @param graph
* @param sinceEpoch
* @param unit
* @return
*/
public static TransactionRecovery startTransactionRecovery(TitanGraph graph, long sinceEpoch, TimeUnit unit) {
return new StandardTransactionLogProcessor((StandardTitanGraph)graph, new StandardTimestamp(sinceEpoch,unit));
}
//###################################
// HELPER METHODS
//###################################
private static ReadConfiguration getLocalConfiguration(String shortcutOrFile) {
File file = new File(shortcutOrFile);
if (file.exists()) return getLocalConfiguration(file);
else {
int pos = shortcutOrFile.indexOf(':');
if (pos<0) pos = shortcutOrFile.length();
String backend = shortcutOrFile.substring(0,pos);
Preconditions.checkArgument(Backend.REGISTERED_STORAGE_MANAGERS_SHORTHAND.containsKey(backend.toLowerCase()), "Backend shorthand unknown: %s", backend);
String secondArg = null;
if (pos+1<shortcutOrFile.length()) secondArg = shortcutOrFile.substring(pos + 1).trim();
BaseConfiguration config = new BaseConfiguration();
ModifiableConfiguration writeConfig = new ModifiableConfiguration(ROOT_NS,new CommonsConfiguration(config), BasicConfiguration.Restriction.NONE);
writeConfig.set(STORAGE_BACKEND,backend);
ConfigOption option = Backend.REGISTERED_STORAGE_MANAGERS_SHORTHAND.get(backend.toLowerCase());
if (option==null) {
Preconditions.checkArgument(secondArg==null);
} else if (option==STORAGE_DIRECTORY || option==STORAGE_CONF_FILE) {
Preconditions.checkArgument(StringUtils.isNotBlank(secondArg),"Need to provide additional argument to initialize storage backend");
writeConfig.set(option,getAbsolutePath(secondArg));
} else if (option==STORAGE_HOSTS) {
Preconditions.checkArgument(StringUtils.isNotBlank(secondArg),"Need to provide additional argument to initialize storage backend");
writeConfig.set(option,new String[]{secondArg});
} else throw new IllegalArgumentException("Invalid configuration option for backend "+option);
return new CommonsConfiguration(config);
}
}
/**
* Load a properties file containing a Titan graph configuration.
* <p/>
* <ol>
* <li>Load the file contents into a {@link org.apache.commons.configuration.PropertiesConfiguration}</li>
* <li>For each key that points to a configuration object that is either a directory
* or local file, check
* whether the associated value is a non-null, non-absolute path. If so,
* then prepend the absolute path of the parent directory of the provided configuration {@code file}.
* This has the effect of making non-absolute backend
* paths relative to the config file's directory rather than the JVM's
* working directory.
* <li>Return the {@link ReadConfiguration} for the prepared configuration file</li>
* </ol>
* <p/>
*
* @param file A properties file to load
* @return A configuration derived from {@code file}
*/
@SuppressWarnings("unchecked")
private static ReadConfiguration getLocalConfiguration(File file) {
Preconditions.checkArgument(file != null && file.exists() && file.isFile() && file.canRead(),
"Need to specify a readable configuration file, but was given: %s", file.toString());
try {
PropertiesConfiguration configuration = new PropertiesConfiguration(file);
final File tmpParent = file.getParentFile();
final File configParent;
if (null == tmpParent) {
/*
* null usually means we were given a Titan config file path
* string like "foo.properties" that refers to the current
* working directory of the process.
*/
configParent = new File(System.getProperty("user.dir"));
} else {
configParent = tmpParent;
}
Preconditions.checkNotNull(configParent);
Preconditions.checkArgument(configParent.isDirectory());
// TODO this mangling logic is a relic from the hardcoded string days; it should be deleted and rewritten as a setting on ConfigOption
final Pattern p = Pattern.compile("(" +
Pattern.quote(STORAGE_NS.getName()) + "\\..*" +
"(" + Pattern.quote(STORAGE_DIRECTORY.getName()) + "|" +
Pattern.quote(STORAGE_CONF_FILE.getName()) + ")"
+ "|" +
Pattern.quote(INDEX_NS.getName()) + "\\..*" +
"(" + Pattern.quote(INDEX_DIRECTORY.getName()) + "|" +
Pattern.quote(INDEX_CONF_FILE.getName()) + ")"
+ ")");
final Iterator<String> keysToMangle = Iterators.filter(configuration.getKeys(), new Predicate<String>() {
@Override
public boolean apply(String key) {
if (null == key)
return false;
return p.matcher(key).matches();
}
});
while (keysToMangle.hasNext()) {
String k = keysToMangle.next();
Preconditions.checkNotNull(k);
String s = configuration.getString(k);
Preconditions.checkArgument(StringUtils.isNotBlank(s),"Invalid Configuration: key %s has null empty value",k);
configuration.setProperty(k,getAbsolutePath(configParent,s));
}
return new CommonsConfiguration(configuration);
} catch (ConfigurationException e) {
throw new IllegalArgumentException("Could not load configuration at: " + file, e);
}
}
private static final String getAbsolutePath(String file) {
return getAbsolutePath(new File(System.getProperty("user.dir")), file);
}
private static final String getAbsolutePath(final File configParent, String file) {
File storedir = new File(file);
if (!storedir.isAbsolute()) {
String newFile = configParent.getAbsolutePath() + File.separator + file;
log.debug("Overwrote relative path: was {}, now {}", file, newFile);
return newFile;
} else {
log.debug("Loaded absolute path for key: {}", file);
return file;
}
}
} | 1no label
| titan-core_src_main_java_com_thinkaurelius_titan_core_TitanFactory.java |
874 | public class TransportSearchQueryThenFetchAction extends TransportSearchTypeAction {
@Inject
public TransportSearchQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
super(settings, threadPool, clusterService, searchService, searchPhaseController);
}
@Override
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
new AsyncAction(searchRequest, listener).start();
}
private class AsyncAction extends BaseAsyncAction<QuerySearchResult> {
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<IntArrayList> docIdsToLoad;
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
super(request, listener);
fetchResults = new AtomicArray<FetchSearchResult>(firstResults.length());
docIdsToLoad = new AtomicArray<IntArrayList>(firstResults.length());
}
@Override
protected String firstPhaseName() {
return "query";
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<QuerySearchResult> listener) {
searchService.sendExecuteQuery(node, request, listener);
}
@Override
protected void moveToSecondPhase() {
sortedShardList = searchPhaseController.sortDocs(firstResults);
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
if (docIdsToLoad.asList().isEmpty()) {
finishHim();
return;
}
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
int localOperations = 0;
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
localOperations++;
} else {
FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
}
if (localOperations > 0) {
if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
}
}
});
} else {
boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
final QuerySearchResult queryResult = firstResults.get(entry.index);
final DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
final FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
try {
if (localAsync) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
});
} else {
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
} catch (Throwable t) {
onFetchFailure(t, fetchSearchRequest, entry.index, queryResult.shardTarget(), counter);
}
}
}
}
}
}
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final FetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
@Override
public void onResult(FetchSearchResult result) {
result.shardTarget(shardTarget);
fetchResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Throwable t) {
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
}
});
}
void onFetchFailure(Throwable t, FetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
}
this.addShardFailure(shardIndex, shardTarget, t);
successulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
void finishHim() {
try {
innerFinishHim();
} catch (Throwable e) {
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures());
if (logger.isDebugEnabled()) {
logger.debug("failed to reduce search", failure);
}
listener.onFailure(failure);
} finally {
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
}
}
void innerFinishHim() throws Exception {
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, fetchResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
} | 1no label
| src_main_java_org_elasticsearch_action_search_type_TransportSearchQueryThenFetchAction.java |
2,784 | public class LocalNodeIdModule extends AbstractModule {
private final String localNodeId;
public LocalNodeIdModule(String localNodeId) {
this.localNodeId = localNodeId;
}
@Override
protected void configure() {
bind(String.class).annotatedWith(LocalNodeId.class).toInstance(localNodeId);
}
} | 0true
| src_main_java_org_elasticsearch_index_LocalNodeIdModule.java |
3,320 | final class EmptyByteValuesWithOrdinals extends BytesValues.WithOrdinals {
EmptyByteValuesWithOrdinals(Ordinals.Docs ordinals) {
super(ordinals);
}
@Override
public BytesRef getValueByOrd(long ord) {
scratch.length = 0;
return scratch;
}
@Override
public int setDocument(int docId) {
return 0;
}
@Override
public BytesRef nextValue() {
throw new ElasticsearchIllegalStateException("Empty BytesValues has no next value");
}
@Override
public int currentValueHash() {
throw new ElasticsearchIllegalStateException("Empty BytesValues has no hash for the current value");
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_EmptyByteValuesWithOrdinals.java |
1,305 | public static class Echo extends HazelcastInstanceAwareObject implements Callable<String>, DataSerializable {
String input;
public Echo() {
}
public Echo(String input) {
this.input = input;
}
@Override
public String call() {
getHazelcastInstance().getCountDownLatch("latch").countDown();
return getHazelcastInstance().getCluster().getLocalMember().toString() + ":" + input;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(input);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
input = in.readUTF();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_examples_TestApp.java |
3,659 | public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
AnalyzerMapper.Builder builder = analyzer();
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("path")) {
builder.field(fieldNode.toString());
}
}
return builder;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_internal_AnalyzerMapper.java |
513 | public abstract class StressTestSupport extends HazelcastTestSupport {
//todo: should be system property
public static final int RUNNING_TIME_SECONDS = 180;
//todo: should be system property
public static final int CLUSTER_SIZE = 6;
//todo: should be system property
public static final int KILL_DELAY_SECONDS = 10;
private final List<HazelcastInstance> instances = new CopyOnWriteArrayList<HazelcastInstance>();
private CountDownLatch startLatch;
private KillMemberThread killMemberThread;
private volatile boolean stopOnError = true;
private volatile boolean stopTest = false;
private boolean clusterChangeEnabled = true;
@Before
public void setUp() {
startLatch = new CountDownLatch(1);
for (int k = 0; k < CLUSTER_SIZE; k++) {
HazelcastInstance hz = newHazelcastInstance(createClusterConfig());
instances.add(hz);
}
}
public void setClusterChangeEnabled(boolean membershutdownEnabled) {
this.clusterChangeEnabled = membershutdownEnabled;
}
public Config createClusterConfig() {
return new Config();
}
@After
public void tearDown() {
for (HazelcastInstance hz : instances) {
try {
hz.shutdown();
} catch (Exception e) {
e.printStackTrace();
}
}
}
public final boolean startAndWaitForTestCompletion() {
System.out.println("Cluster change enabled:" + clusterChangeEnabled);
if (clusterChangeEnabled) {
killMemberThread = new KillMemberThread();
killMemberThread.start();
}
System.out.println("==================================================================");
System.out.println("Test started.");
System.out.println("==================================================================");
startLatch.countDown();
for (int k = 1; k <= RUNNING_TIME_SECONDS; k++) {
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(1));
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
float percent = (k * 100.0f) / RUNNING_TIME_SECONDS;
System.out.printf("%.1f Running for %s of %s seconds\n", percent, k, RUNNING_TIME_SECONDS);
if (stopTest) {
System.err.println("==================================================================");
System.err.println("Test ended premature!");
System.err.println("==================================================================");
return false;
}
}
System.out.println("==================================================================");
System.out.println("Test completed.");
System.out.println("==================================================================");
stopTest();
return true;
}
protected final void setStopOnError(boolean stopOnError) {
this.stopOnError = stopOnError;
}
protected final void stopTest() {
stopTest = true;
}
protected final boolean isStopped() {
return stopTest;
}
public final void assertNoErrors(TestThread... threads) {
for (TestThread thread : threads) {
thread.assertNoError();
}
}
public final void joinAll(TestThread... threads) {
for (TestThread t : threads) {
try {
t.join(60000);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while joining thread:" + t);
}
if (t.isAlive()) {
System.err.println("Could not join Thread:" + t.getName() + ", it is still alive");
for (StackTraceElement e : t.getStackTrace()) {
System.err.println("\tat " + e);
}
throw new RuntimeException("Could not join thread:" + t + ", thread is still alive");
}
}
assertNoErrors(threads);
}
public final static AtomicLong ID_GENERATOR = new AtomicLong(1);
public abstract class TestThread extends Thread {
private volatile Throwable error;
protected final Random random = new Random();
public TestThread() {
setName(getClass().getName() + "" + ID_GENERATOR.getAndIncrement());
}
@Override
public final void run() {
try {
startLatch.await();
doRun();
} catch (Throwable t) {
if (stopOnError) {
stopTest();
}
t.printStackTrace();
this.error = t;
}
}
public final void assertNoError() {
assertNull(getName() + " encountered an error", error);
}
public abstract void doRun() throws Exception;
}
public class KillMemberThread extends TestThread {
@Override
public void doRun() throws Exception {
while (!stopTest) {
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(KILL_DELAY_SECONDS));
} catch (InterruptedException e) {
}
int index = random.nextInt(CLUSTER_SIZE);
HazelcastInstance instance = instances.remove(index);
instance.shutdown();
HazelcastInstance newInstance = newHazelcastInstance(createClusterConfig());
instances.add(newInstance);
}
}
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_stress_StressTestSupport.java |
1,713 | public static class Builder<VType> implements IntObjectMap<VType> {
private IntObjectOpenHashMap<VType> map;
public Builder() {
//noinspection unchecked
this(EMPTY);
}
public Builder(int size) {
this.map = new IntObjectOpenHashMap<VType>(size);
}
public Builder(ImmutableOpenIntMap<VType> map) {
this.map = map.map.clone();
}
/**
* Builds a new instance of the
*/
public ImmutableOpenIntMap<VType> build() {
IntObjectOpenHashMap<VType> map = this.map;
this.map = null; // nullify the map, so any operation post build will fail! (hackish, but safest)
return new ImmutableOpenIntMap<VType>(map);
}
/**
* Puts all the entries in the map to the builder.
*/
public Builder<VType> putAll(Map<Integer, VType> map) {
for (Map.Entry<Integer, VType> entry : map.entrySet()) {
this.map.put(entry.getKey(), entry.getValue());
}
return this;
}
/**
* A put operation that can be used in the fluent pattern.
*/
public Builder<VType> fPut(int key, VType value) {
map.put(key, value);
return this;
}
@Override
public VType put(int key, VType value) {
return map.put(key, value);
}
@Override
public VType get(int key) {
return map.get(key);
}
@Override
public VType getOrDefault(int kType, VType vType) {
return map.getOrDefault(kType, vType);
}
/**
* Remove that can be used in the fluent pattern.
*/
public Builder<VType> fRemove(int key) {
map.remove(key);
return this;
}
@Override
public VType remove(int key) {
return map.remove(key);
}
@Override
public Iterator<IntObjectCursor<VType>> iterator() {
return map.iterator();
}
@Override
public boolean containsKey(int key) {
return map.containsKey(key);
}
@Override
public int size() {
return map.size();
}
@Override
public boolean isEmpty() {
return map.isEmpty();
}
@Override
public void clear() {
map.clear();
}
@Override
public int putAll(IntObjectAssociativeContainer<? extends VType> container) {
return map.putAll(container);
}
@Override
public int putAll(Iterable<? extends IntObjectCursor<? extends VType>> iterable) {
return map.putAll(iterable);
}
@Override
public int removeAll(IntContainer container) {
return map.removeAll(container);
}
@Override
public int removeAll(IntPredicate predicate) {
return map.removeAll(predicate);
}
@Override
public <T extends IntObjectProcedure<? super VType>> T forEach(T procedure) {
return map.forEach(procedure);
}
@Override
public IntCollection keys() {
return map.keys();
}
@Override
public ObjectContainer<VType> values() {
return map.values();
}
} | 0true
| src_main_java_org_elasticsearch_common_collect_ImmutableOpenIntMap.java |
1,255 | addOperation(operations, new Runnable() {
public void run() {
IMap map = hazelcast.getMap("myMap");
map.size();
}
}, 4); | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
734 | public class DeleteByQueryResponse extends ActionResponse implements Iterable<IndexDeleteByQueryResponse> {
private Map<String, IndexDeleteByQueryResponse> indices = newHashMap();
DeleteByQueryResponse() {
}
@Override
public Iterator<IndexDeleteByQueryResponse> iterator() {
return indices.values().iterator();
}
/**
* The responses from all the different indices.
*/
public Map<String, IndexDeleteByQueryResponse> getIndices() {
return indices;
}
/**
* The response of a specific index.
*/
public IndexDeleteByQueryResponse getIndex(String index) {
return indices.get(index);
}
public RestStatus status() {
RestStatus status = RestStatus.OK;
for (IndexDeleteByQueryResponse indexResponse : indices.values()) {
if (indexResponse.getFailedShards() > 0) {
RestStatus indexStatus = indexResponse.getFailures()[0].status();
if (indexResponse.getFailures().length > 1) {
for (int i = 1; i < indexResponse.getFailures().length; i++) {
if (indexResponse.getFailures()[i].status().getStatus() >= 500) {
indexStatus = indexResponse.getFailures()[i].status();
}
}
}
if (status.getStatus() < indexStatus.getStatus()) {
status = indexStatus;
}
}
}
return status;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
for (int i = 0; i < size; i++) {
IndexDeleteByQueryResponse response = new IndexDeleteByQueryResponse();
response.readFrom(in);
indices.put(response.getIndex(), response);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(indices.size());
for (IndexDeleteByQueryResponse indexResponse : indices.values()) {
indexResponse.writeTo(out);
}
}
} | 0true
| src_main_java_org_elasticsearch_action_deletebyquery_DeleteByQueryResponse.java |
2,408 | public static class Entry {
public final int expectedInsertions;
public final double fpp;
Entry(int expectedInsertions, double fpp) {
this.expectedInsertions = expectedInsertions;
this.fpp = fpp;
}
} | 0true
| src_main_java_org_elasticsearch_common_util_BloomFilter.java |
945 | public class AbstractOrderDaoExtensionHandler extends AbstractExtensionHandler implements OrderDaoExtensionHandler {
public ExtensionResultStatusType attachAdditionalDataToNewCart(Customer customer, Order cart) {
return ExtensionResultStatusType.NOT_HANDLED;
}
public ExtensionResultStatusType applyAdditionalOrderLookupFilter(Customer customer, String name, List<Order> orders) {
return ExtensionResultStatusType.NOT_HANDLED;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_dao_AbstractOrderDaoExtensionHandler.java |
2,724 | static enum AutoImportDangledState {
NO() {
@Override
public boolean shouldImport() {
return false;
}
},
YES() {
@Override
public boolean shouldImport() {
return true;
}
},
CLOSED() {
@Override
public boolean shouldImport() {
return true;
}
};
public abstract boolean shouldImport();
public static AutoImportDangledState fromString(String value) {
if ("no".equalsIgnoreCase(value)) {
return NO;
} else if ("yes".equalsIgnoreCase(value)) {
return YES;
} else if ("closed".equalsIgnoreCase(value)) {
return CLOSED;
} else {
throw new ElasticsearchIllegalArgumentException("failed to parse [" + value + "], not a valid auto dangling import type");
}
}
} | 0true
| src_main_java_org_elasticsearch_gateway_local_state_meta_LocalGatewayMetaState.java |
1,504 | public class OrderStateAOP implements ApplicationContextAware {
private ApplicationContext applicationContext;
public Object processOrderRetrieval(ProceedingJoinPoint call) throws Throwable {
Object returnValue;
/*
* we retrieve the OrderState instance directly from the application
* context, as this bean has a request scope.
*/
OrderState orderState = (OrderState) applicationContext.getBean("blOrderState");
Customer customer = (Customer) call.getArgs()[0];
Order order = orderState.getOrder(customer);
if (order != null) {
returnValue = order;
} else {
returnValue = call.proceed();
returnValue = orderState.setOrder(customer, (Order) returnValue);
}
return returnValue;
}
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_order_OrderStateAOP.java |
251 | public class BufferedChecksumIndexOutput extends BufferedIndexOutput {
private final IndexOutput delegate;
private final BufferedIndexOutput bufferedDelegate;
private final Checksum digest;
public BufferedChecksumIndexOutput(IndexOutput delegate, Checksum digest) {
super(delegate instanceof BufferedIndexOutput ? ((BufferedIndexOutput) delegate).getBufferSize() : BufferedIndexOutput.DEFAULT_BUFFER_SIZE);
if (delegate instanceof BufferedIndexOutput) {
bufferedDelegate = (BufferedIndexOutput) delegate;
this.delegate = delegate;
} else {
this.delegate = delegate;
bufferedDelegate = null;
}
this.digest = digest;
}
public Checksum digest() {
return digest;
}
public IndexOutput underlying() {
return this.delegate;
}
// don't override it, base class method simple reads from input and writes to this output
// @Override public void copyBytes(IndexInput input, long numBytes) throws IOException {
// delegate.copyBytes(input, numBytes);
// }
@Override
public void close() throws IOException {
try {
super.close();
} finally {
delegate.close();
}
}
@Override
protected void flushBuffer(byte[] b, int offset, int len) throws IOException {
if (bufferedDelegate != null) {
bufferedDelegate.flushBuffer(b, offset, len);
} else {
delegate.writeBytes(b, offset, len);
}
digest.update(b, offset, len);
}
// don't override it, base class method simple reads from input and writes to this output
// @Override public void copyBytes(IndexInput input, long numBytes) throws IOException {
// delegate.copyBytes(input, numBytes);
// }
@Override
public void flush() throws IOException {
try {
super.flush();
} finally {
delegate.flush();
}
}
@Override
public void seek(long pos) throws IOException {
// seek might be called on files, which means that the checksum is not file checksum
// but a checksum of the bytes written to this stream, which is the same for each
// type of file in lucene
super.seek(pos);
delegate.seek(pos);
}
@Override
public long length() throws IOException {
return delegate.length();
}
@Override
public void setLength(long length) throws IOException {
delegate.setLength(length);
}
@Override
public String toString() {
return delegate.toString();
}
} | 0true
| src_main_java_org_apache_lucene_store_BufferedChecksumIndexOutput.java |
1,988 | return new Internal() {
public int value() {
return value;
}
public Class<? extends Annotation> annotationType() {
return Internal.class;
}
@Override
public String toString() {
return "@" + Internal.class.getName() + "(value=" + value + ")";
}
@Override
public boolean equals(Object o) {
return o instanceof Internal
&& ((Internal) o).value() == value();
}
@Override
public int hashCode() {
return (127 * "value".hashCode()) ^ value;
}
}; | 0true
| src_main_java_org_elasticsearch_common_inject_internal_UniqueAnnotations.java |
211 | public class CustomPassageFormatterTests {
@Test
public void testSimpleFormat() {
String content = "This is a really cool highlighter. Postings highlighter gives nice snippets back. No matches here.";
CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new DefaultEncoder());
Passage[] passages = new Passage[3];
String match = "highlighter";
BytesRef matchBytesRef = new BytesRef(match);
Passage passage1 = new Passage();
int start = content.indexOf(match);
int end = start + match.length();
passage1.startOffset = 0;
passage1.endOffset = end + 2; //lets include the whitespace at the end to make sure we trim it
passage1.addMatch(start, end, matchBytesRef);
passages[0] = passage1;
Passage passage2 = new Passage();
start = content.lastIndexOf(match);
end = start + match.length();
passage2.startOffset = passage1.endOffset;
passage2.endOffset = end + 26;
passage2.addMatch(start, end, matchBytesRef);
passages[1] = passage2;
Passage passage3 = new Passage();
passage3.startOffset = passage2.endOffset;
passage3.endOffset = content.length();
passages[2] = passage3;
Snippet[] fragments = passageFormatter.format(passages, content);
assertThat(fragments, notNullValue());
assertThat(fragments.length, equalTo(3));
assertThat(fragments[0].getText(), equalTo("This is a really cool <em>highlighter</em>."));
assertThat(fragments[0].isHighlighted(), equalTo(true));
assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
assertThat(fragments[1].isHighlighted(), equalTo(true));
assertThat(fragments[2].getText(), equalTo("No matches here."));
assertThat(fragments[2].isHighlighted(), equalTo(false));
}
@Test
public void testHtmlEncodeFormat() {
String content = "<b>This is a really cool highlighter.</b> Postings highlighter gives nice snippets back.";
CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new SimpleHTMLEncoder());
Passage[] passages = new Passage[2];
String match = "highlighter";
BytesRef matchBytesRef = new BytesRef(match);
Passage passage1 = new Passage();
int start = content.indexOf(match);
int end = start + match.length();
passage1.startOffset = 0;
passage1.endOffset = end + 6; //lets include the whitespace at the end to make sure we trim it
passage1.addMatch(start, end, matchBytesRef);
passages[0] = passage1;
Passage passage2 = new Passage();
start = content.lastIndexOf(match);
end = start + match.length();
passage2.startOffset = passage1.endOffset;
passage2.endOffset = content.length();
passage2.addMatch(start, end, matchBytesRef);
passages[1] = passage2;
Snippet[] fragments = passageFormatter.format(passages, content);
assertThat(fragments, notNullValue());
assertThat(fragments.length, equalTo(2));
assertThat(fragments[0].getText(), equalTo("<b>This is a really cool <em>highlighter</em>.</b>"));
assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
}
} | 0true
| src_test_java_org_apache_lucene_search_postingshighlight_CustomPassageFormatterTests.java |
846 | return new IAnswer<OrderItemPriceDetail>() {
@Override
public OrderItemPriceDetail answer() throws Throwable {
return new OrderItemPriceDetailImpl();
}
}; | 0true
| core_broadleaf-framework_src_test_java_org_broadleafcommerce_core_offer_service_OfferDataItemProvider.java |
55 | public class PaxosClusterMemberEvents implements ClusterMemberEvents, Lifecycle
{
private Cluster cluster;
private AtomicBroadcast atomicBroadcast;
private StringLogger logger;
protected AtomicBroadcastSerializer serializer;
protected Iterable<ClusterMemberListener> listeners = Listeners.newListeners();
private ClusterMembersSnapshot clusterMembersSnapshot;
private ClusterListener.Adapter clusterListener;
private Snapshot snapshot;
private AtomicBroadcastListener atomicBroadcastListener;
private ExecutorService executor;
private final Predicate<ClusterMembersSnapshot> snapshotValidator;
private final Heartbeat heartbeat;
private HeartbeatListenerImpl heartbeatListener;
private ObjectInputStreamFactory lenientObjectInputStream;
private ObjectOutputStreamFactory lenientObjectOutputStream;
public PaxosClusterMemberEvents( final Snapshot snapshot, Cluster cluster, Heartbeat heartbeat,
AtomicBroadcast atomicBroadcast, Logging logging,
Predicate<ClusterMembersSnapshot> validator,
Function2<Iterable<MemberIsAvailable>, MemberIsAvailable,
Iterable<MemberIsAvailable>> snapshotFilter,
ObjectInputStreamFactory lenientObjectInputStream,
ObjectOutputStreamFactory lenientObjectOutputStream)
{
this.snapshot = snapshot;
this.cluster = cluster;
this.heartbeat = heartbeat;
this.atomicBroadcast = atomicBroadcast;
this.lenientObjectInputStream = lenientObjectInputStream;
this.lenientObjectOutputStream = lenientObjectOutputStream;
this.logger = logging.getMessagesLog( getClass() );
clusterListener = new ClusterListenerImpl();
atomicBroadcastListener = new AtomicBroadcastListenerImpl();
this.snapshotValidator = validator;
clusterMembersSnapshot = new ClusterMembersSnapshot( snapshotFilter );
}
@Override
public void addClusterMemberListener( ClusterMemberListener listener )
{
listeners = Listeners.addListener( listener, listeners );
}
@Override
public void removeClusterMemberListener( ClusterMemberListener listener )
{
listeners = Listeners.removeListener( listener, listeners );
}
@Override
public void init()
throws Throwable
{
serializer = new AtomicBroadcastSerializer( lenientObjectInputStream, lenientObjectOutputStream );
cluster.addClusterListener( clusterListener );
atomicBroadcast.addAtomicBroadcastListener( atomicBroadcastListener );
snapshot.setSnapshotProvider( new HighAvailabilitySnapshotProvider() );
heartbeat.addHeartbeatListener( heartbeatListener = new HeartbeatListenerImpl() );
executor = Executors.newSingleThreadExecutor();
}
@Override
public void start()
throws Throwable
{
}
@Override
public void stop()
throws Throwable
{
}
@Override
public void shutdown()
throws Throwable
{
snapshot.setSnapshotProvider( null );
if ( executor != null )
{
executor.shutdown();
executor = null;
}
cluster.removeClusterListener( clusterListener );
atomicBroadcast.removeAtomicBroadcastListener( atomicBroadcastListener );
heartbeat.removeHeartbeatListener( heartbeatListener );
}
private class HighAvailabilitySnapshotProvider implements SnapshotProvider
{
@Override
public void getState( ObjectOutputStream output ) throws IOException
{
output.writeObject( clusterMembersSnapshot );
}
@Override
public void setState( ObjectInputStream input ) throws IOException, ClassNotFoundException
{
clusterMembersSnapshot = ClusterMembersSnapshot.class.cast(input.readObject());
if ( !snapshotValidator.accept( clusterMembersSnapshot ) )
{
executor.submit( new Runnable()
{
@Override
public void run()
{
cluster.leave();
}
} );
}
else
{
// Send current availability events to listeners
Listeners.notifyListeners( listeners, executor, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailableMembers() )
{
listener.memberIsAvailable( memberIsAvailable.getRole(),
memberIsAvailable.getInstanceId(), memberIsAvailable.getRoleUri() );
}
}
} );
}
}
}
public static class UniqueRoleFilter
implements Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>>
{
private final String role;
private final Set<String> roles = new HashSet<String>();
public UniqueRoleFilter( String role )
{
this.role = role;
}
@Override
public Iterable<MemberIsAvailable> apply( Iterable<MemberIsAvailable> previousSnapshot, final MemberIsAvailable newMessage )
{
return Iterables.append( newMessage, Iterables.filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return not( in( newMessage.getInstanceId() ) ).accept( item.getInstanceId() );
}
}, previousSnapshot));
}
}
private static class UniqueInstanceFilter implements Predicate<MemberIsAvailable>
{
private final Set<InstanceId> roles = new HashSet<InstanceId>();
@Override
public boolean accept( MemberIsAvailable item )
{
return roles.add( item.getInstanceId() );
}
}
public static class ClusterMembersSnapshot
implements Serializable
{
private final
Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>> nextSnapshotFunction;
private Iterable<MemberIsAvailable> availableMembers = new ArrayList<MemberIsAvailable>();
public ClusterMembersSnapshot( Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>> nextSnapshotFunction )
{
this.nextSnapshotFunction = nextSnapshotFunction;
}
public void availableMember( MemberIsAvailable memberIsAvailable )
{
availableMembers = toList( nextSnapshotFunction.apply( availableMembers, memberIsAvailable ) );
}
public void unavailableMember( final InstanceId member )
{
availableMembers = toList( filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return !item.getInstanceId().equals( member );
}
}, availableMembers ) );
}
public void unavailableMember( final URI member, final String role )
{
availableMembers = toList( filter(new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return !(item.getClusterUri().equals( member ) && item.getRole().equals( role ));
}
}, availableMembers));
}
public Iterable<MemberIsAvailable> getCurrentAvailableMembers()
{
return availableMembers;
}
public Iterable<MemberIsAvailable> getCurrentAvailable( final InstanceId memberId )
{
return toList( Iterables.filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return item.getInstanceId().equals( memberId );
}
}, availableMembers) );
}
}
private class ClusterListenerImpl extends ClusterListener.Adapter
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
// Catch up with elections
for ( Map.Entry<String, InstanceId> memberRoles : clusterConfiguration.getRoles().entrySet() )
{
elected( memberRoles.getKey(), memberRoles.getValue(),
clusterConfiguration.getUriForId( memberRoles.getValue() ) );
}
}
@Override
public void elected( String role, final InstanceId instanceId, final URI electedMember )
{
if ( role.equals( ClusterConfiguration.COORDINATOR ) )
{
// Use the cluster coordinator as master for HA
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.coordinatorIsElected( instanceId );
}
} );
}
}
@Override
public void leftCluster( final InstanceId member )
{
// Notify unavailability of members
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailable( member ) )
{
listener.memberIsUnavailable( memberIsAvailable.getRole(), member );
}
}
} );
clusterMembersSnapshot.unavailableMember( member );
}
}
private class AtomicBroadcastListenerImpl implements AtomicBroadcastListener
{
@Override
public void receive( Payload payload )
{
try
{
final Object value = serializer.receive( payload );
if ( value instanceof MemberIsAvailable )
{
final MemberIsAvailable memberIsAvailable = (MemberIsAvailable) value;
// Update snapshot
clusterMembersSnapshot.availableMember( memberIsAvailable );
logger.info("Snapshot:"+clusterMembersSnapshot.getCurrentAvailableMembers());
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsAvailable( memberIsAvailable.getRole(),
memberIsAvailable.getInstanceId(), memberIsAvailable.getRoleUri() );
}
} );
}
else if ( value instanceof MemberIsUnavailable )
{
final MemberIsUnavailable memberIsUnavailable = (MemberIsUnavailable) value;
// Update snapshot
clusterMembersSnapshot.unavailableMember( memberIsUnavailable.getClusterUri(),
memberIsUnavailable.getRole() );
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsUnavailable( memberIsUnavailable.getRole(),
memberIsUnavailable.getInstanceId() );
}
} );
}
}
catch ( Throwable t )
{
logger.error( "Could not handle cluster member available message", t );
}
}
}
private class HeartbeatListenerImpl implements HeartbeatListener
{
@Override
public void failed( final InstanceId server )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsFailed( server );
}
} );
}
@Override
public void alive( final InstanceId server )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsAlive( server );
}
} );
}
}
} | 1no label
| enterprise_cluster_src_main_java_org_neo4j_cluster_member_paxos_PaxosClusterMemberEvents.java |
324 | public class MergeXmlConfigResource {
private static final Log LOG = LogFactory.getLog(MergeXmlConfigResource.class);
public Resource getMergedConfigResource(ResourceInputStream[] sources) throws BeansException {
Resource configResource = null;
ResourceInputStream merged = null;
try {
merged = merge(sources);
//read the final stream into a byte array
ByteArrayOutputStream baos = new ByteArrayOutputStream();
boolean eof = false;
while (!eof) {
int temp = merged.read();
if (temp == -1) {
eof = true;
} else {
baos.write(temp);
}
}
configResource = new ByteArrayResource(baos.toByteArray());
if (LOG.isDebugEnabled()) {
LOG.debug("Merged config: \n" + serialize(configResource));
}
} catch (MergeException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
} catch (MergeManagerSetupException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
} catch (IOException e) {
throw new FatalBeanException("Unable to merge source and patch locations", e);
} finally {
if (merged != null) {
try{ merged.close(); } catch (Throwable e) {
LOG.error("Unable to merge source and patch locations", e);
}
}
}
return configResource;
}
protected ResourceInputStream merge(ResourceInputStream[] sources) throws MergeException, MergeManagerSetupException {
if (sources.length == 1) return sources[0];
ResourceInputStream response = null;
ResourceInputStream[] pair = new ResourceInputStream[2];
pair[0] = sources[0];
for (int j=1;j<sources.length;j++){
pair[1] = sources[j];
response = mergeItems(pair[0], pair[1]);
try{
pair[0].close();
} catch (Throwable e) {
LOG.error("Unable to merge source and patch locations", e);
}
try{
pair[1].close();
} catch (Throwable e) {
LOG.error("Unable to merge source and patch locations", e);
}
pair[0] = response;
}
return response;
}
protected ResourceInputStream mergeItems(ResourceInputStream sourceLocationFirst, ResourceInputStream sourceLocationSecond) throws MergeException, MergeManagerSetupException {
ResourceInputStream response = new MergeManager().merge(sourceLocationFirst, sourceLocationSecond);
return response;
}
public String serialize(Resource resource) {
String response = "";
try {
response = serialize(resource.getInputStream());
} catch (IOException e) {
LOG.error("Unable to merge source and patch locations", e);
}
return response;
}
public String serialize(InputStream in) {
InputStreamReader reader = null;
int temp;
StringBuilder item = new StringBuilder();
boolean eof = false;
try {
reader = new InputStreamReader(in);
while (!eof) {
temp = reader.read();
if (temp == -1) {
eof = true;
} else {
item.append((char) temp);
}
}
} catch (IOException e) {
LOG.error("Unable to merge source and patch locations", e);
} finally {
if (reader != null) {
try{ reader.close(); } catch (Throwable e) {
LOG.error("Unable to merge source and patch locations", e);
}
}
}
return item.toString();
}
protected byte[] buildArrayFromStream(InputStream source) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
boolean eof = false;
try{
while (!eof) {
int temp = source.read();
if (temp == -1) {
eof = true;
} else {
baos.write(temp);
}
}
} finally {
try{ source.close(); } catch (Throwable e) {
LOG.error("Unable to merge source and patch locations", e);
}
}
return baos.toByteArray();
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_MergeXmlConfigResource.java |
193 | static final class Whitespace extends Run {
@Override
protected boolean isValid(char ch) {
return Character.isWhitespace(ch) && ch != '\n' && ch != '\r';
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_CeylonBreakIterator.java |
469 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name="BLC_SANDBOX")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blSandBoxElements")
public class SandBoxImpl implements SandBox {
private static final Log LOG = LogFactory.getLog(SandBoxImpl.class);
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "SandBoxId")
@GenericGenerator(
name="SandBoxId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="SandBoxImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.common.sandbox.domain.SandBoxImpl")
}
)
@Column(name = "SANDBOX_ID")
@AdminPresentation(visibility = VisibilityEnum.HIDDEN_ALL)
protected Long id;
@Column(name = "SANDBOX_NAME")
@Index(name="SANDBOX_NAME_INDEX", columnNames={"SANDBOX_NAME"})
protected String name;
@Column(name="AUTHOR")
protected Long author;
@ManyToOne(targetEntity = SiteImpl.class)
@JoinTable(name = "BLC_SITE_SANDBOX", joinColumns = @JoinColumn(name = "SANDBOX_ID"), inverseJoinColumns = @JoinColumn(name = "SITE_ID"))
protected Site site;
@Column(name = "SANDBOX_TYPE")
@AdminPresentation(friendlyName = "SandBoxImpl_SandBox_Type", group = "SandBoxImpl_Description", fieldType= SupportedFieldType.BROADLEAF_ENUMERATION, broadleafEnumeration="org.broadleafcommerce.common.sandbox.domain.SandBoxType")
protected String sandboxType;
/* (non-Javadoc)
* @see org.broadleafcommerce.openadmin.domain.SandBox#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.openadmin.domain.SandBox#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.openadmin.domain.SandBox#getName()
*/
@Override
public String getName() {
return name;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.openadmin.domain.SandBox#setName(java.lang.String)
*/
@Override
public void setName(String name) {
this.name = name;
}
@Override
public SandBoxType getSandBoxType() {
return SandBoxType.getInstance(sandboxType);
}
@Override
public void setSandBoxType(final SandBoxType sandboxType) {
if (sandboxType != null) {
this.sandboxType = sandboxType.getType();
}
}
@Override
public Long getAuthor() {
return author;
}
@Override
public void setAuthor(Long author) {
this.author = author;
}
@Override
public Site getSite() {
return site;
}
@Override
public void setSite(Site site) {
this.site = site;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((author == null) ? 0 : author.hashCode());
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SandBoxImpl other = (SandBoxImpl) obj;
if (author == null) {
if (other.author != null)
return false;
} else if (!author.equals(other.author))
return false;
if (id == null) {
if (other.id != null)
return false;
} else if (!id.equals(other.id))
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
return true;
}
public void checkCloneable(SandBox sandBox) throws CloneNotSupportedException, SecurityException, NoSuchMethodException {
Method cloneMethod = sandBox.getClass().getMethod("clone", new Class[]{});
if (cloneMethod.getDeclaringClass().getName().startsWith("org.broadleafcommerce") && !sandBox.getClass().getName().startsWith("org.broadleafcommerce")) {
//subclass is not implementing the clone method
throw new CloneNotSupportedException("Custom extensions and implementations should implement clone.");
}
}
@Override
public SandBox clone() {
SandBox clone;
try {
clone = (SandBox) Class.forName(this.getClass().getName()).newInstance();
try {
checkCloneable(clone);
} catch (CloneNotSupportedException e) {
LOG.warn("Clone implementation missing in inheritance hierarchy outside of Broadleaf: " + clone.getClass().getName(), e);
}
clone.setId(id);
clone.setName(name);
clone.setAuthor(author);
clone.setSandBoxType(getSandBoxType());
if (site != null) {
clone.setSite(site.clone());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return clone;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_sandbox_domain_SandBoxImpl.java |
171 | public interface BaseTransactionConfigurable extends BaseTransaction {
/**
* Get the configuration for this transaction
*
* @return
*/
public BaseTransactionConfig getConfiguration();
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_BaseTransactionConfigurable.java |
62 | public class OModificationOperationProhibitedException extends OException {
private static final long serialVersionUID = 1L;
public OModificationOperationProhibitedException() {
}
public OModificationOperationProhibitedException(String message) {
super(message);
}
public OModificationOperationProhibitedException(Throwable cause) {
super(cause);
}
public OModificationOperationProhibitedException(String message, Throwable cause) {
super(message, cause);
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_concur_lock_OModificationOperationProhibitedException.java |
1,535 | public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator {
public static final String SETTING_THRESHOLD = "cluster.routing.allocation.balance.threshold";
public static final String SETTING_INDEX_BALANCE_FACTOR = "cluster.routing.allocation.balance.index";
public static final String SETTING_SHARD_BALANCE_FACTOR = "cluster.routing.allocation.balance.shard";
public static final String SETTING_PRIMARY_BALANCE_FACTOR = "cluster.routing.allocation.balance.primary";
private static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.5f;
private static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f;
private static final float DEFAULT_PRIMARY_BALANCE_FACTOR = 0.05f;
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
final float indexBalance = settings.getAsFloat(SETTING_INDEX_BALANCE_FACTOR, weightFunction.indexBalance);
final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance);
final float primaryBalance = settings.getAsFloat(SETTING_PRIMARY_BALANCE_FACTOR, weightFunction.primaryBalance);
float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold);
if (threshold <= 0.0f) {
throw new ElasticsearchIllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold);
}
BalancedShardsAllocator.this.threshold = threshold;
BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance, primaryBalance);
}
}
private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR, DEFAULT_PRIMARY_BALANCE_FACTOR);
private volatile float threshold = 1.0f;
public BalancedShardsAllocator(Settings settings) {
this(settings, new NodeSettingsService(settings));
}
@Inject
public BalancedShardsAllocator(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
ApplySettings applySettings = new ApplySettings();
applySettings.onRefreshSettings(settings);
nodeSettingsService.addListener(applySettings);
}
@Override
public void applyStartedShards(StartedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
@Override
public void applyFailedShards(FailedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
@Override
public boolean allocateUnassigned(RoutingAllocation allocation) {
return rebalance(allocation);
}
@Override
public boolean rebalance(RoutingAllocation allocation) {
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
return balancer.balance();
}
@Override
public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
return balancer.move(shardRouting, node);
}
/**
* Returns the currently configured delta threshold
*/
public float getThreshold() {
return threshold;
}
/**
* Returns the index related weight factor.
*/
public float getIndexBalance() {
return weightFunction.indexBalance;
}
/**
* Returns the primary related weight factor.
*/
public float getPrimaryBalance() {
return weightFunction.primaryBalance;
}
/**
* Returns the shard related weight factor.
*/
public float getShardBalance() {
return weightFunction.shardBalance;
}
/**
* This class is the primary weight function used to create balanced over nodes and shards in the cluster.
* Currently this function has 3 properties:
* <ul>
* <li><code>index balance</code> - balance property over shards per index</li>
* <li><code>shard balance</code> - balance property over shards per cluster</li>
* <li><code>primary balance</code> - balance property over primaries per cluster</li>
* </ul>
* <p>
* Each of these properties are expressed as factor such that the properties factor defines the relative importance of the property for the
* weight function. For example if the weight function should calculate the weights only based on a global (shard) balance the index and primary balance
* can be set to <tt>0.0</tt> and will in turn have no effect on the distribution.
* </p>
* The weight per index is calculated based on the following formula:
* <ul>
* <li>
* <code>weight<sub>index</sub>(node, index) = indexBalance * (node.numShards(index) - avgShardsPerNode(index))</code>
* </li>
* <li>
* <code>weight<sub>node</sub>(node, index) = shardBalance * (node.numShards() - avgShardsPerNode)</code>
* </li>
* <li>
* <code>weight<sub>primary</sub>(node, index) = primaryBalance * (node.numPrimaries() - avgPrimariesPerNode)</code>
* </li>
* </ul>
* <code>weight(node, index) = weight<sub>index</sub>(node, index) + weight<sub>node</sub>(node, index) + weight<sub>primary</sub>(node, index)</code>
*/
public static class WeightFunction {
private final float indexBalance;
private final float shardBalance;
private final float primaryBalance;
private final EnumMap<Operation, float[]> thetaMap = new EnumMap<BalancedShardsAllocator.Operation, float[]>(Operation.class);
public WeightFunction(float indexBalance, float shardBalance, float primaryBalance) {
float sum = indexBalance + shardBalance + primaryBalance;
if (sum <= 0.0f) {
throw new ElasticsearchIllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum);
}
final float[] defaultTheta = new float[]{shardBalance / sum, indexBalance / sum, primaryBalance / sum};
for (Operation operation : Operation.values()) {
switch (operation) {
case THRESHOLD_CHECK:
sum = indexBalance + shardBalance;
if (sum <= 0.0f) {
thetaMap.put(operation, defaultTheta);
} else {
thetaMap.put(operation, new float[]{shardBalance / sum, indexBalance / sum, 0});
}
break;
case BALANCE:
case ALLOCATE:
case MOVE:
thetaMap.put(operation, defaultTheta);
break;
default:
assert false;
}
}
this.indexBalance = indexBalance;
this.shardBalance = shardBalance;
this.primaryBalance = primaryBalance;
}
public float weight(Operation operation, Balancer balancer, ModelNode node, String index) {
final float weightShard = (node.numShards() - balancer.avgShardsPerNode());
final float weightIndex = (node.numShards(index) - balancer.avgShardsPerNode(index));
final float weightPrimary = (node.numPrimaries() - balancer.avgPrimariesPerNode());
final float[] theta = thetaMap.get(operation);
assert theta != null;
return theta[0] * weightShard + theta[1] * weightIndex + theta[2] * weightPrimary;
}
}
/**
* An enum that donates the actual operation the {@link WeightFunction} is
* applied to.
*/
public static enum Operation {
/**
* Provided during balance operations.
*/
BALANCE,
/**
* Provided during initial allocation operation for unassigned shards.
*/
ALLOCATE,
/**
* Provided during move operation.
*/
MOVE,
/**
* Provided when the weight delta is checked against the configured threshold.
* This can be used to ignore tie-breaking weight factors that should not
* solely trigger a relocation unless the delta is above the threshold.
*/
THRESHOLD_CHECK
}
/**
* A {@link Balancer}
*/
public static class Balancer {
private final ESLogger logger;
private final Map<String, ModelNode> nodes = new HashMap<String, ModelNode>();
private final HashSet<String> indices = new HashSet<String>();
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
private final WeightFunction weight;
private final float threshold;
private final MetaData metaData;
private final Predicate<MutableShardRouting> assignedFilter = new Predicate<MutableShardRouting>() {
@Override
public boolean apply(MutableShardRouting input) {
return input.assignedToNode();
}
};
public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger;
this.allocation = allocation;
this.weight = weight;
this.threshold = threshold;
this.routingNodes = allocation.routingNodes();
for (RoutingNode node : routingNodes) {
nodes.put(node.nodeId(), new ModelNode(node.nodeId()));
}
metaData = routingNodes.metaData();
}
/**
* Returns an array view on the nodes in the balancer. Nodes should not be removed from this list.
*/
private ModelNode[] nodesArray() {
return nodes.values().toArray(new ModelNode[nodes.size()]);
}
/**
* Returns the average of shards per node for the given index
*/
public float avgShardsPerNode(String index) {
return ((float) metaData.index(index).totalNumberOfShards()) / nodes.size();
}
/**
* Returns the global average of shards per node
*/
public float avgShardsPerNode() {
return ((float) metaData.totalNumberOfShards()) / nodes.size();
}
/**
* Returns the global average of primaries per node
*/
public float avgPrimariesPerNode() {
return ((float) metaData.numberOfShards()) / nodes.size();
}
/**
* Returns the average of primaries per node for the given index
*/
public float avgPrimariesPerNode(String index) {
return ((float) metaData.index(index).numberOfShards()) / nodes.size();
}
/**
* Returns a new {@link NodeSorter} that sorts the nodes based on their
* current weight with respect to the index passed to the sorter. The
* returned sorter is not sorted. Use {@link NodeSorter#reset(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Operation, String)}
* to sort based on an index.
*/
private NodeSorter newNodeSorter() {
return new NodeSorter(nodesArray(), weight, this);
}
private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards unassigned) {
if (logger.isTraceEnabled()) {
logger.trace("Start distributing Shards");
}
indices.addAll(allocation.routingTable().indicesRouting().keySet());
buildModelFromAssigned(routing.shards(assignedFilter));
return allocateUnassigned(unassigned, routing.ignoredUnassigned());
}
private static boolean lessThan(float delta, float threshold) {
/* deltas close to the threshold are "rounded" to the threshold manually
to prevent floating point problems if the delta is very close to the
threshold ie. 1.000000002 which can trigger unnecessary balance actions*/
return delta <= threshold + 0.001f;
}
/**
* Balances the nodes on the cluster model according to the weight
* function. The configured threshold is the minimum delta between the
* weight of the maximum node and the minimum node according to the
* {@link WeightFunction}. This weight is calculated per index to
* distribute shards evenly per index. The balancer tries to relocate
* shards only if the delta exceeds the threshold. If the default case
* the threshold is set to <tt>1.0</tt> to enforce gaining relocation
* only, or in other words relocations that move the weight delta closer
* to <tt>0.0</tt>
*
* @return <code>true</code> if the current configuration has been
* changed, otherwise <code>false</code>
*/
public boolean balance() {
if (this.nodes.isEmpty()) {
/* with no nodes this is pointless */
return false;
}
if (logger.isTraceEnabled()) {
logger.trace("Start balancing cluster");
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
boolean changed = initialize(routingNodes, unassigned);
if (!changed) {
NodeSorter sorter = newNodeSorter();
if (nodes.size() > 1) { /* skip if we only have one node */
for (String index : buildWeightOrderedIndidces(Operation.BALANCE, sorter)) {
sorter.reset(Operation.BALANCE, index);
final float[] weights = sorter.weights;
final ModelNode[] modelNodes = sorter.modelNodes;
int lowIdx = 0;
int highIdx = weights.length - 1;
while (true) {
final ModelNode minNode = modelNodes[lowIdx];
final ModelNode maxNode = modelNodes[highIdx];
advance_range:
if (maxNode.numShards(index) > 0) {
float delta = weights[highIdx] - weights[lowIdx];
delta = lessThan(delta, threshold) ? delta : sorter.weight(Operation.THRESHOLD_CHECK, maxNode) - sorter.weight(Operation.THRESHOLD_CHECK, minNode);
if (lessThan(delta, threshold)) {
if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta?
&& (weights[highIdx-1] - weights[0] > threshold) // check if we need to break at all
) {
/* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible
* due to some allocation decider restrictions like zone awareness. if one zone has for instance
* less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we
* can't move to the "lighter" shards since otherwise the zone would go over capacity.
*
* This break jumps straight to the condition below were we start moving from the high index towards
* the low index to shrink the window we are considering for balance from the other direction.
* (check shrinking the window from MAX to MIN)
* See #3580
*/
break advance_range;
}
if (logger.isTraceEnabled()) {
logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]",
index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
break;
}
if (logger.isTraceEnabled()) {
logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]",
maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
/* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes.
* a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */
if (tryRelocateShard(Operation.BALANCE, minNode, maxNode, index, delta)) {
/*
* TODO we could be a bit smarter here, we don't need to fully sort necessarily
* we could just find the place to insert linearly but the win might be minor
* compared to the added complexity
*/
weights[lowIdx] = sorter.weight(Operation.BALANCE, modelNodes[lowIdx]);
weights[highIdx] = sorter.weight(Operation.BALANCE, modelNodes[highIdx]);
sorter.sort(0, weights.length);
lowIdx = 0;
highIdx = weights.length - 1;
changed = true;
continue;
}
}
if (lowIdx < highIdx - 1) {
/* Shrinking the window from MIN to MAX
* we can't move from any shard from the min node lets move on to the next node
* and see if the threshold still holds. We either don't have any shard of this
* index on this node of allocation deciders prevent any relocation.*/
lowIdx++;
} else if (lowIdx > 0) {
/* Shrinking the window from MAX to MIN
* now we go max to min since obviously we can't move anything to the max node
* lets pick the next highest */
lowIdx = 0;
highIdx--;
} else {
/* we are done here, we either can't relocate anymore or we are balanced */
break;
}
}
}
}
}
routingNodes.unassigned().transactionEnd(unassigned);
return changed;
}
/**
* This builds a initial index ordering where the indices are returned
* in most unbalanced first. We need this in order to prevent over
* allocations on added nodes from one index when the weight parameters
* for global balance overrule the index balance at an intermediate
* state. For example this can happen if we have 3 nodes and 3 indices
* with 3 shards and 1 shard. At the first stage all three nodes hold
* 2 shard for each index. now we add another node and the first index
* is balanced moving 3 two of the nodes over to the new node since it
* has no shards yet and global balance for the node is way below
* average. To re-balance we need to move shards back eventually likely
* to the nodes we relocated them from.
*/
private String[] buildWeightOrderedIndidces(Operation operation, NodeSorter sorter) {
final String[] indices = this.indices.toArray(new String[this.indices.size()]);
final float[] deltas = new float[indices.length];
for (int i = 0; i < deltas.length; i++) {
sorter.reset(operation, indices[i]);
deltas[i] = sorter.delta();
}
new IntroSorter() {
float pivotWeight;
@Override
protected void swap(int i, int j) {
final String tmpIdx = indices[i];
indices[i] = indices[j];
indices[j] = tmpIdx;
final float tmpDelta = deltas[i];
deltas[i] = deltas[j];
deltas[j] = tmpDelta;
}
@Override
protected int compare(int i, int j) {
return Float.compare(deltas[j], deltas[i]);
}
@Override
protected void setPivot(int i) {
pivotWeight = deltas[i];
}
@Override
protected int comparePivot(int j) {
return Float.compare(deltas[j], pivotWeight);
}
}.sort(0, deltas.length);
return indices;
}
/**
* This function executes a move operation moving the given shard from
* the given node to the minimal eligible node with respect to the
* weight function. Iff the shard is moved the shard will be set to
* {@link ShardRoutingState#RELOCATING} and a shadow instance of this
* shard is created with an incremented version in the state
* {@link ShardRoutingState#INITIALIZING}.
*
* @return <code>true</code> iff the shard has successfully been moved.
*/
public boolean move(MutableShardRouting shard, RoutingNode node ) {
if (nodes.isEmpty() || !shard.started()) {
/* with no nodes or a not started shard this is pointless */
return false;
}
if (logger.isTraceEnabled()) {
logger.trace("Try moving shard [{}] from [{}]", shard, node);
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
boolean changed = initialize(routingNodes, unassigned);
if (!changed) {
final ModelNode sourceNode = nodes.get(node.nodeId());
assert sourceNode != null;
final NodeSorter sorter = newNodeSorter();
sorter.reset(Operation.MOVE, shard.getIndex());
final ModelNode[] nodes = sorter.modelNodes;
assert sourceNode.containsShard(shard);
/*
* the sorter holds the minimum weight node first for the shards index.
* We now walk through the nodes until we find a node to allocate the shard.
* This is not guaranteed to be balanced after this operation we still try best effort to
* allocate on the minimal eligible node.
*/
for (ModelNode currentNode : nodes) {
if (currentNode.getNodeId().equals(node.nodeId())) {
continue;
}
RoutingNode target = routingNodes.node(currentNode.getNodeId());
Decision decision = allocation.deciders().canAllocate(shard, target, allocation);
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
sourceNode.removeShard(shard);
final MutableShardRouting initializingShard = new MutableShardRouting(shard.index(), shard.id(), currentNode.getNodeId(),
shard.currentNodeId(), shard.restoreSource(), shard.primary(), INITIALIZING, shard.version() + 1);
currentNode.addShard(initializingShard, decision);
routingNodes.assign(initializingShard, target.nodeId());
routingNodes.relocate(shard, target.nodeId()); // set the node to relocate after we added the initializing shard
if (logger.isTraceEnabled()) {
logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
}
changed = true;
break;
}
}
}
routingNodes.unassigned().transactionEnd(unassigned);
return changed;
}
/**
* Builds the internal model from all shards in the given
* {@link Iterable}. All shards in the {@link Iterable} must be assigned
* to a node. This method will skip shards in the state
* {@link ShardRoutingState#RELOCATING} since each relocating shard has
* a shadow shard in the state {@link ShardRoutingState#INITIALIZING}
* on the target node which we respect during the allocation / balancing
* process. In short, this method recreates the status-quo in the cluster.
*/
private void buildModelFromAssigned(Iterable<MutableShardRouting> shards) {
for (MutableShardRouting shard : shards) {
assert shard.assignedToNode();
/* we skip relocating shards here since we expect an initializing shard with the same id coming in */
if (shard.state() == RELOCATING) {
continue;
}
ModelNode node = nodes.get(shard.currentNodeId());
assert node != null;
node.addShard(shard, Decision.single(Type.YES, "Already allocated on node", node.getNodeId()));
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId());
}
}
}
/**
* Allocates all given shards on the minimal eligable node for the shards index
* with respect to the weight function. All given shards must be unassigned.
*/
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned, List<MutableShardRouting> ignoredUnassigned) {
assert !nodes.isEmpty();
if (logger.isTraceEnabled()) {
logger.trace("Start allocating unassigned shards");
}
if (unassigned.isEmpty()) {
return false;
}
boolean changed = false;
/*
* TODO: We could be smarter here and group the shards by index and then
* use the sorter to save some iterations.
*/
final AllocationDeciders deciders = allocation.deciders();
final Comparator<MutableShardRouting> comparator = new Comparator<MutableShardRouting>() {
@Override
public int compare(MutableShardRouting o1,
MutableShardRouting o2) {
if (o1.primary() ^ o2.primary()) {
return o1.primary() ? -1 : o2.primary() ? 1 : 0;
}
final int indexCmp;
if ((indexCmp = o1.index().compareTo(o2.index())) == 0) {
return o1.getId() - o2.getId();
}
return indexCmp;
}
};
/*
* we use 2 arrays and move replicas to the second array once we allocated an identical
* replica in the current iteration to make sure all indices get allocated in the same manner.
* The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like:
* [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)]
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ingoreUnassigned.
*/
MutableShardRouting[] primary = unassigned.drain();
MutableShardRouting[] secondary = new MutableShardRouting[primary.length];
int secondaryLength = 0;
int primaryLength = primary.length;
ArrayUtil.timSort(primary, comparator);
final Set<ModelNode> throttledNodes = new IdentityHashSet<ModelNode>();
do {
for (int i = 0; i < primaryLength; i++) {
MutableShardRouting shard = primary[i];
if (!shard.primary()) {
boolean drop = deciders.canAllocate(shard, allocation).type() == Type.NO;
if (drop) {
ignoredUnassigned.add(shard);
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
ignoredUnassigned.add(primary[++i]);
}
continue;
} else {
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
secondary[secondaryLength++] = primary[++i];
}
}
}
assert !shard.assignedToNode() : shard;
/* find an node with minimal weight we can allocate on*/
float minWeight = Float.POSITIVE_INFINITY;
ModelNode minNode = null;
Decision decision = null;
if (throttledNodes.size() < nodes.size()) {
/* Don't iterate over an identity hashset here the
* iteration order is different for each run and makes testing hard */
for (ModelNode node : nodes.values()) {
if (throttledNodes.contains(node)) {
continue;
}
/*
* The shard we add is removed below to simulate the
* addition for weight calculation we use Decision.ALWAYS to
* not violate the not null condition.
*/
if (!node.containsShard(shard)) {
node.addShard(shard, Decision.ALWAYS);
float currentWeight = weight.weight(Operation.ALLOCATE, this, node, shard.index());
/*
* Remove the shard from the node again this is only a
* simulation
*/
Decision removed = node.removeShard(shard);
assert removed != null;
/*
* Unless the operation is not providing any gains we
* don't check deciders
*/
if (currentWeight <= minWeight) {
Decision currentDecision = deciders.canAllocate(shard, routingNodes.node(node.getNodeId()), allocation);
NOUPDATE:
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
if (currentWeight == minWeight) {
/* we have an equal weight tie breaking:
* 1. if one decision is YES prefer it
* 2. prefer the node that holds the primary for this index with the next id in the ring ie.
* for the 3 shards 2 replica case we try to build up:
* 1 2 0
* 2 0 1
* 0 1 2
* such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater
* than the id of the shard we need to assign. This works find when new indices are created since
* primaries are added first and we only add one shard set a time in this algorithm.
*/
if (currentDecision.type() == decision.type()) {
final int repId = shard.id();
final int nodeHigh = node.highestPrimary(shard.index());
final int minNodeHigh = minNode.highestPrimary(shard.index());
if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh))
|| (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) {
minNode = node;
minWeight = currentWeight;
decision = currentDecision;
} else {
break NOUPDATE;
}
} else if (currentDecision.type() != Type.YES) {
break NOUPDATE;
}
}
minNode = node;
minWeight = currentWeight;
decision = currentDecision;
}
}
}
}
}
assert decision != null && minNode != null || decision == null && minNode == null;
if (minNode != null) {
minNode.addShard(shard, decision);
if (decision.type() == Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
}
routingNodes.assign(shard, routingNodes.node(minNode.getNodeId()).nodeId());
changed = true;
continue; // don't add to ignoreUnassigned
} else {
final RoutingNode node = routingNodes.node(minNode.getNodeId());
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Can not allocate on node [{}] remove from round decisin [{}]", node, decision.type());
}
throttledNodes.add(minNode);
}
}
if (logger.isTraceEnabled()) {
logger.trace("No eligable node found to assign shard [{}] decision [{}]", shard, decision.type());
}
} else if (logger.isTraceEnabled()) {
logger.trace("No Node found to assign shard [{}]", shard);
}
ignoredUnassigned.add(shard);
if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas
while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) {
ignoredUnassigned.add(secondary[--secondaryLength]);
}
}
}
primaryLength = secondaryLength;
MutableShardRouting[] tmp = primary;
primary = secondary;
secondary = tmp;
secondaryLength = 0;
} while (primaryLength > 0);
// clear everything we have either added it or moved to ingoreUnassigned
return changed;
}
/**
* Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the
* balance model. Iff this method returns a <code>true</code> the relocation has already been executed on the
* simulation model as well as on the cluster.
*/
private boolean tryRelocateShard(Operation operation, ModelNode minNode, ModelNode maxNode, String idx, float minCost) {
final ModelIndex index = maxNode.getIndex(idx);
Decision decision = null;
if (index != null) {
if (logger.isTraceEnabled()) {
logger.trace("Try relocating shard for index index [{}] from node [{}] to node [{}]", idx, maxNode.getNodeId(),
minNode.getNodeId());
}
final RoutingNode node = routingNodes.node(minNode.getNodeId());
MutableShardRouting candidate = null;
final AllocationDeciders deciders = allocation.deciders();
/* make a copy since we modify this list in the loop */
final ArrayList<MutableShardRouting> shards = new ArrayList<MutableShardRouting>(index.getAllShards());
for (MutableShardRouting shard : shards) {
if (shard.started()) {
// skip initializing, unassigned and relocating shards we can't relocate them anyway
Decision allocationDecision = deciders.canAllocate(shard, node, allocation);
Decision rebalanceDecision = deciders.canRebalance(shard, allocation);
if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE))
&& ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) {
Decision srcDecision;
if ((srcDecision = maxNode.removeShard(shard)) != null) {
minNode.addShard(shard, srcDecision);
final float delta = weight.weight(operation, this, minNode, idx) - weight.weight(operation, this, maxNode, idx);
if (delta < minCost ||
(candidate != null && delta == minCost && candidate.id() > shard.id())) {
/* this last line is a tie-breaker to make the shard allocation alg deterministic
* otherwise we rely on the iteration order of the index.getAllShards() which is a set.*/
minCost = delta;
candidate = shard;
decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
}
minNode.removeShard(shard);
maxNode.addShard(shard, srcDecision);
}
}
}
}
if (candidate != null) {
/* allocate on the model even if not throttled */
maxNode.removeShard(candidate);
minNode.addShard(candidate, decision);
if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */
if (logger.isTraceEnabled()) {
logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
minNode.getNodeId());
}
/* now allocate on the cluster - if we are started we need to relocate the shard */
if (candidate.started()) {
RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId());
routingNodes.assign(new MutableShardRouting(candidate.index(), candidate.id(), lowRoutingNode.nodeId(), candidate
.currentNodeId(), candidate.restoreSource(), candidate.primary(), INITIALIZING, candidate.version() + 1), lowRoutingNode.nodeId());
routingNodes.relocate(candidate, lowRoutingNode.nodeId());
} else {
assert candidate.unassigned();
routingNodes.assign(candidate, routingNodes.node(minNode.getNodeId()).nodeId());
}
return true;
}
}
}
if (logger.isTraceEnabled()) {
logger.trace("Couldn't find shard to relocate from node [{}] to node [{}] allocation decision [{}]", maxNode.getNodeId(),
minNode.getNodeId(), decision == null ? "NO" : decision.type().name());
}
return false;
}
}
static class ModelNode implements Iterable<ModelIndex> {
private final String id;
private final Map<String, ModelIndex> indices = new HashMap<String, ModelIndex>();
/* cached stats - invalidated on add/remove and lazily calculated */
private int numShards = -1;
private int numPrimaries = -1;
public ModelNode(String id) {
this.id = id;
}
public ModelIndex getIndex(String indexId) {
return indices.get(indexId);
}
public String getNodeId() {
return id;
}
public int numShards() {
if (numShards == -1) {
int sum = 0;
for (ModelIndex index : indices.values()) {
sum += index.numShards();
}
numShards = sum;
}
return numShards;
}
public int numShards(String idx) {
ModelIndex index = indices.get(idx);
return index == null ? 0 : index.numShards();
}
public int numPrimaries(String idx) {
ModelIndex index = indices.get(idx);
return index == null ? 0 : index.numPrimaries();
}
public int numPrimaries() {
if (numPrimaries == -1) {
int sum = 0;
for (ModelIndex index : indices.values()) {
sum += index.numPrimaries();
}
numPrimaries = sum;
}
return numPrimaries;
}
public Collection<MutableShardRouting> shards() {
Collection<MutableShardRouting> result = new ArrayList<MutableShardRouting>();
for (ModelIndex index : indices.values()) {
result.addAll(index.getAllShards());
}
return result;
}
public int highestPrimary(String index) {
ModelIndex idx = indices.get(index);
if (idx != null) {
return idx.highestPrimary();
}
return -1;
}
public void addShard(MutableShardRouting shard, Decision decision) {
numPrimaries = numShards = -1;
ModelIndex index = indices.get(shard.index());
if (index == null) {
index = new ModelIndex(shard.index());
indices.put(index.getIndexId(), index);
}
index.addShard(shard, decision);
}
public Decision removeShard(MutableShardRouting shard) {
numPrimaries = numShards = -1;
ModelIndex index = indices.get(shard.index());
Decision removed = null;
if (index != null) {
removed = index.removeShard(shard);
if (removed != null && index.numShards() == 0) {
indices.remove(shard.index());
}
}
return removed;
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Node(").append(id).append(")");
return sb.toString();
}
@Override
public Iterator<ModelIndex> iterator() {
return indices.values().iterator();
}
public boolean containsShard(MutableShardRouting shard) {
ModelIndex index = getIndex(shard.getIndex());
return index == null ? false : index.containsShard(shard);
}
}
static final class ModelIndex {
private final String id;
private final Map<MutableShardRouting, Decision> shards = new HashMap<MutableShardRouting, Decision>();
private int numPrimaries = -1;
private int highestPrimary = -1;
public ModelIndex(String id) {
this.id = id;
}
public int highestPrimary() {
if (highestPrimary == -1) {
int maxId = -1;
for (MutableShardRouting shard : shards.keySet()) {
if (shard.primary()) {
maxId = Math.max(maxId, shard.id());
}
}
return highestPrimary = maxId;
}
return highestPrimary;
}
public String getIndexId() {
return id;
}
public Decision getDecicion(MutableShardRouting shard) {
return shards.get(shard);
}
public int numShards() {
return shards.size();
}
public Collection<MutableShardRouting> getAllShards() {
return shards.keySet();
}
public int numPrimaries() {
if (numPrimaries == -1) {
int num = 0;
for (MutableShardRouting shard : shards.keySet()) {
if (shard.primary()) {
num++;
}
}
return numPrimaries = num;
}
return numPrimaries;
}
public Decision removeShard(MutableShardRouting shard) {
highestPrimary = numPrimaries = -1;
return shards.remove(shard);
}
public void addShard(MutableShardRouting shard, Decision decision) {
highestPrimary = numPrimaries = -1;
assert decision != null;
assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard;
shards.put(shard, decision);
}
public boolean containsShard(MutableShardRouting shard) {
return shards.containsKey(shard);
}
}
static final class NodeSorter extends IntroSorter {
final ModelNode[] modelNodes;
/* the nodes weights with respect to the current weight function / index */
final float[] weights;
private final WeightFunction function;
private String index;
private final Balancer balancer;
private float pivotWeight;
public NodeSorter(ModelNode[] modelNodes, WeightFunction function, Balancer balancer) {
this.function = function;
this.balancer = balancer;
this.modelNodes = modelNodes;
weights = new float[modelNodes.length];
}
/**
* Resets the sorter, recalculates the weights per node and sorts the
* nodes by weight, with minimal weight first.
*/
public void reset(Operation operation, String index) {
this.index = index;
for (int i = 0; i < weights.length; i++) {
weights[i] = weight(operation, modelNodes[i]);
}
sort(0, modelNodes.length);
}
public float weight(Operation operation, ModelNode node) {
return function.weight(operation, balancer, node, index);
}
@Override
protected void swap(int i, int j) {
final ModelNode tmpNode = modelNodes[i];
modelNodes[i] = modelNodes[j];
modelNodes[j] = tmpNode;
final float tmpWeight = weights[i];
weights[i] = weights[j];
weights[j] = tmpWeight;
}
@Override
protected int compare(int i, int j) {
return Float.compare(weights[i], weights[j]);
}
@Override
protected void setPivot(int i) {
pivotWeight = weights[i];
}
@Override
protected int comparePivot(int j) {
return Float.compare(pivotWeight, weights[j]);
}
public float delta() {
return weights[weights.length - 1] - weights[0];
}
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_allocator_BalancedShardsAllocator.java |
1,491 | public interface AddressPicker {
void pickAddress() throws Exception;
Address getBindAddress();
Address getPublicAddress();
ServerSocketChannel getServerSocketChannel();
} | 0true
| hazelcast_src_main_java_com_hazelcast_instance_AddressPicker.java |
1,976 | MapLoader mapLoader = new MapLoader() {
@Override
public Object load(Object key) {
return null;
}
@Override
public Map loadAll(Collection keys) {
loadAllCalled.set(true);
return new HashMap();
}
@Override
public Set loadAllKeys() {
return new HashSet(Arrays.asList(1));
}
}; | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapLoaderTest.java |
663 | constructors[COLLECTION_EVENT_FILTER] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionEventFilter();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java |
319 | public interface MergeBeanStatusProvider {
/**
* Typically used by the {@link AbstractMergeBeanPostProcessor} class to determine whether or not certain
* lists should be processed or if they can be safely ignored.
*
* @param bean
* @param beanName
* @param appCtx
* @return whether or not processing should be triggered
*/
public boolean isProcessingEnabled(Object bean, String beanName, ApplicationContext appCtx);
} | 0true
| common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_MergeBeanStatusProvider.java |
1,412 | clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
List<String> indicesToOpen = new ArrayList<String>();
for (String index : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexMissingException(new Index(index));
}
if (indexMetaData.state() != IndexMetaData.State.OPEN) {
indicesToOpen.add(index);
}
}
if (indicesToOpen.isEmpty()) {
return currentState;
}
logger.info("opening indices [{}]", indicesAsString);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
.blocks(currentState.blocks());
for (String index : indicesToOpen) {
mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN));
blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK);
}
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable());
for (String index : indicesToOpen) {
rtBuilder.addAsRecovery(updatedState.metaData().index(index));
}
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build());
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
}); | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaDataIndexStateService.java |
273 | @SuppressWarnings("serial")
public abstract class OCommandRequestAbstract implements OCommandRequestInternal {
protected OCommandResultListener resultListener;
protected OProgressListener progressListener;
protected int limit = -1;
protected long timeoutMs = OGlobalConfiguration.COMMAND_TIMEOUT.getValueAsLong();
protected TIMEOUT_STRATEGY timeoutStrategy = TIMEOUT_STRATEGY.EXCEPTION;
protected Map<Object, Object> parameters;
protected String fetchPlan = null;
protected boolean useCache = false;
protected OCommandContext context;
protected OCommandRequestAbstract() {
}
public OCommandResultListener getResultListener() {
return resultListener;
}
public void setResultListener(OCommandResultListener iListener) {
resultListener = iListener;
}
public Map<Object, Object> getParameters() {
return parameters;
}
protected void setParameters(final Object... iArgs) {
if (iArgs != null && iArgs.length > 0)
parameters = convertToParameters(iArgs);
}
@SuppressWarnings("unchecked")
protected Map<Object, Object> convertToParameters(final Object... iArgs) {
final Map<Object, Object> params;
if (iArgs.length == 1 && iArgs[0] instanceof Map) {
params = (Map<Object, Object>) iArgs[0];
} else {
params = new HashMap<Object, Object>(iArgs.length);
for (int i = 0; i < iArgs.length; ++i) {
Object par = iArgs[i];
if (par instanceof OIdentifiable && ((OIdentifiable) par).getIdentity().isValid())
// USE THE RID ONLY
par = ((OIdentifiable) par).getIdentity();
params.put(i, par);
}
}
return params;
}
public OProgressListener getProgressListener() {
return progressListener;
}
public OCommandRequestAbstract setProgressListener(OProgressListener progressListener) {
this.progressListener = progressListener;
return this;
}
public void reset() {
}
public int getLimit() {
return limit;
}
public OCommandRequestAbstract setLimit(final int limit) {
this.limit = limit;
return this;
}
public String getFetchPlan() {
return fetchPlan;
}
@SuppressWarnings("unchecked")
public <RET extends OCommandRequest> RET setFetchPlan(String fetchPlan) {
this.fetchPlan = fetchPlan;
return (RET) this;
}
public boolean isUseCache() {
return useCache;
}
public void setUseCache(boolean useCache) {
this.useCache = useCache;
}
@Override
public OCommandContext getContext() {
if (context == null)
context = new OBasicCommandContext();
return context;
}
public OCommandRequestAbstract setContext(final OCommandContext iContext) {
context = iContext;
return this;
}
public long getTimeoutTime() {
return timeoutMs;
}
public void setTimeout(final long timeout, TIMEOUT_STRATEGY strategy) {
this.timeoutMs = timeout;
this.timeoutStrategy = strategy;
}
public TIMEOUT_STRATEGY getTimeoutStrategy() {
return timeoutStrategy;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_OCommandRequestAbstract.java |
80 | NOT_EQUAL {
@Override
public boolean isValidValueType(Class<?> clazz) {
return true;
}
@Override
public boolean isValidCondition(Object condition) {
return true;
}
@Override
public boolean evaluate(Object value, Object condition) {
if (condition==null) {
return value!=null;
} else {
return !condition.equals(value);
}
}
@Override
public String toString() {
return "<>";
}
@Override
public TitanPredicate negate() {
return EQUAL;
}
}, | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Cmp.java |
967 | @Entity
@DiscriminatorColumn(name = "TYPE")
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_BUND_ITEM_FEE_PRICE")
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region = "blOrderElements")
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.READONLY,
booleanOverrideValue = true))
}
)
public class BundleOrderItemFeePriceImpl implements BundleOrderItemFeePrice {
public static final Log LOG = LogFactory.getLog(BundleOrderItemFeePriceImpl.class);
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "BundleOrderItemFeePriceId")
@GenericGenerator(
name="BundleOrderItemFeePriceId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="BundleOrderItemFeePriceImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.order.domain.BundleOrderItemFeePriceImpl")
}
)
@Column(name = "BUND_ITEM_FEE_PRICE_ID")
protected Long id;
@ManyToOne(targetEntity = BundleOrderItemImpl.class, optional = false)
@JoinColumn(name = "BUND_ORDER_ITEM_ID")
protected BundleOrderItem bundleOrderItem;
@Column(name = "AMOUNT", precision=19, scale=5)
@AdminPresentation(friendlyName = "BundleOrderItemFeePriceImpl_Amount", order=2, prominent=true)
protected BigDecimal amount;
@Column(name = "NAME")
@AdminPresentation(friendlyName = "BundleOrderItemFeePriceImpl_Name", order=1, prominent=true)
private String name;
@Column(name = "REPORTING_CODE")
@AdminPresentation(friendlyName = "BundleOrderItemFeePriceImpl_Reporting_Code", order=3, prominent=true)
private String reportingCode;
@Column(name = "IS_TAXABLE")
@AdminPresentation(friendlyName = "BundleOrderItemFeePriceImpl_Taxable", order=4)
private Boolean isTaxable = Boolean.FALSE;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public BundleOrderItem getBundleOrderItem() {
return bundleOrderItem;
}
@Override
public void setBundleOrderItem(BundleOrderItem bundleOrderItem) {
this.bundleOrderItem = bundleOrderItem;
}
@Override
public Money getAmount() {
return convertToMoney(amount);
}
@Override
public void setAmount(Money amount) {
this.amount = Money.toAmount(amount);
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public Boolean isTaxable() {
return isTaxable;
}
@Override
public void setTaxable(Boolean isTaxable) {
this.isTaxable = isTaxable;
}
@Override
public String getReportingCode() {
return reportingCode;
}
@Override
public void setReportingCode(String reportingCode) {
this.reportingCode = reportingCode;
}
public void checkCloneable(BundleOrderItemFeePrice bundleFeePrice) throws CloneNotSupportedException, SecurityException, NoSuchMethodException {
Method cloneMethod = bundleFeePrice.getClass().getMethod("clone", new Class[]{});
if (cloneMethod.getDeclaringClass().getName().startsWith("org.broadleafcommerce") && !bundleFeePrice.getClass().getName().startsWith("org.broadleafcommerce")) {
//subclass is not implementing the clone method
throw new CloneNotSupportedException("Custom extensions and implementations should implement clone in order to guarantee split and merge operations are performed accurately");
}
}
protected Money convertToMoney(BigDecimal amount) {
return amount == null ? null : BroadleafCurrencyUtils.getMoney(amount, bundleOrderItem.getOrder().getCurrency());
}
@Override
public BundleOrderItemFeePrice clone() {
//instantiate from the fully qualified name via reflection
BundleOrderItemFeePrice clone;
try {
clone = (BundleOrderItemFeePrice) Class.forName(this.getClass().getName()).newInstance();
try {
checkCloneable(clone);
} catch (CloneNotSupportedException e) {
LOG.warn("Clone implementation missing in inheritance hierarchy outside of Broadleaf: " + clone.getClass().getName(), e);
}
clone.setAmount(convertToMoney(amount));
clone.setName(name);
clone.setReportingCode(reportingCode);
clone.setBundleOrderItem(bundleOrderItem);
} catch (Exception e) {
throw new RuntimeException(e);
}
return clone;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((amount == null) ? 0 : amount.hashCode());
result = prime * result + ((bundleOrderItem == null) ? 0 : bundleOrderItem.hashCode());
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + (isTaxable ? 1231 : 1237);
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((reportingCode == null) ? 0 : reportingCode.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
BundleOrderItemFeePriceImpl other = (BundleOrderItemFeePriceImpl) obj;
if (amount == null) {
if (other.amount != null) {
return false;
}
} else if (!amount.equals(other.amount)) {
return false;
}
if (bundleOrderItem == null) {
if (other.bundleOrderItem != null) {
return false;
}
} else if (!bundleOrderItem.equals(other.bundleOrderItem)) {
return false;
}
if (id == null) {
if (other.id != null) {
return false;
}
} else if (!id.equals(other.id)) {
return false;
}
if (isTaxable != other.isTaxable) {
return false;
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
if (reportingCode == null) {
if (other.reportingCode != null) {
return false;
}
} else if (!reportingCode.equals(other.reportingCode)) {
return false;
}
return true;
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_BundleOrderItemFeePriceImpl.java |
2,131 | public class LoggerMessageFormat {
static final char DELIM_START = '{';
static final char DELIM_STOP = '}';
static final String DELIM_STR = "{}";
private static final char ESCAPE_CHAR = '\\';
public static String format(final String messagePattern, final Object... argArray) {
return format(null, messagePattern, argArray);
}
public static String format(final String prefix, final String messagePattern, final Object... argArray) {
if (messagePattern == null) {
return null;
}
if (argArray == null) {
if (prefix == null) {
return messagePattern;
} else {
return prefix + messagePattern;
}
}
int i = 0;
int j;
final StringBuilder sbuf = new StringBuilder(messagePattern.length() + 50);
if (prefix != null) {
sbuf.append(prefix);
}
for (int L = 0; L < argArray.length; L++) {
j = messagePattern.indexOf(DELIM_STR, i);
if (j == -1) {
// no more variables
if (i == 0) { // this is a simple string
return messagePattern;
} else { // add the tail string which contains no variables and return
// the result.
sbuf.append(messagePattern.substring(i, messagePattern.length()));
return sbuf.toString();
}
} else {
if (isEscapedDelimeter(messagePattern, j)) {
if (!isDoubleEscaped(messagePattern, j)) {
L--; // DELIM_START was escaped, thus should not be incremented
sbuf.append(messagePattern.substring(i, j - 1));
sbuf.append(DELIM_START);
i = j + 1;
} else {
// The escape character preceding the delimiter start is
// itself escaped: "abc x:\\{}"
// we have to consume one backward slash
sbuf.append(messagePattern.substring(i, j - 1));
deeplyAppendParameter(sbuf, argArray[L], new HashMap());
i = j + 2;
}
} else {
// normal case
sbuf.append(messagePattern.substring(i, j));
deeplyAppendParameter(sbuf, argArray[L], new HashMap());
i = j + 2;
}
}
}
// append the characters following the last {} pair.
sbuf.append(messagePattern.substring(i, messagePattern.length()));
return sbuf.toString();
}
static boolean isEscapedDelimeter(String messagePattern,
int delimeterStartIndex) {
if (delimeterStartIndex == 0) {
return false;
}
char potentialEscape = messagePattern.charAt(delimeterStartIndex - 1);
if (potentialEscape == ESCAPE_CHAR) {
return true;
} else {
return false;
}
}
static boolean isDoubleEscaped(String messagePattern, int delimeterStartIndex) {
if (delimeterStartIndex >= 2 && messagePattern.charAt(delimeterStartIndex - 2) == ESCAPE_CHAR) {
return true;
} else {
return false;
}
}
private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Map seenMap) {
if (o == null) {
sbuf.append("null");
return;
}
if (!o.getClass().isArray()) {
safeObjectAppend(sbuf, o);
} else {
// check for primitive array types because they
// unfortunately cannot be cast to Object[]
if (o instanceof boolean[]) {
booleanArrayAppend(sbuf, (boolean[]) o);
} else if (o instanceof byte[]) {
byteArrayAppend(sbuf, (byte[]) o);
} else if (o instanceof char[]) {
charArrayAppend(sbuf, (char[]) o);
} else if (o instanceof short[]) {
shortArrayAppend(sbuf, (short[]) o);
} else if (o instanceof int[]) {
intArrayAppend(sbuf, (int[]) o);
} else if (o instanceof long[]) {
longArrayAppend(sbuf, (long[]) o);
} else if (o instanceof float[]) {
floatArrayAppend(sbuf, (float[]) o);
} else if (o instanceof double[]) {
doubleArrayAppend(sbuf, (double[]) o);
} else {
objectArrayAppend(sbuf, (Object[]) o, seenMap);
}
}
}
private static void safeObjectAppend(StringBuilder sbuf, Object o) {
try {
String oAsString = o.toString();
sbuf.append(oAsString);
} catch (Throwable t) {
sbuf.append("[FAILED toString()]");
}
}
private static void objectArrayAppend(StringBuilder sbuf, Object[] a, Map seenMap) {
sbuf.append('[');
if (!seenMap.containsKey(a)) {
seenMap.put(a, null);
final int len = a.length;
for (int i = 0; i < len; i++) {
deeplyAppendParameter(sbuf, a[i], seenMap);
if (i != len - 1)
sbuf.append(", ");
}
// allow repeats in siblings
seenMap.remove(a);
} else {
sbuf.append("...");
}
sbuf.append(']');
}
private static void booleanArrayAppend(StringBuilder sbuf, boolean[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
private static void byteArrayAppend(StringBuilder sbuf, byte[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
private static void charArrayAppend(StringBuilder sbuf, char[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
private static void shortArrayAppend(StringBuilder sbuf, short[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
private static void intArrayAppend(StringBuilder sbuf, int[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
private static void longArrayAppend(StringBuilder sbuf, long[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
private static void floatArrayAppend(StringBuilder sbuf, float[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
private static void doubleArrayAppend(StringBuilder sbuf, double[] a) {
sbuf.append('[');
final int len = a.length;
for (int i = 0; i < len; i++) {
sbuf.append(a[i]);
if (i != len - 1)
sbuf.append(", ");
}
sbuf.append(']');
}
} | 0true
| src_main_java_org_elasticsearch_common_logging_support_LoggerMessageFormat.java |
1,066 | public class MapIndexConfig {
private String attribute;
private boolean ordered = false;
private MapIndexConfigReadOnly readOnly;
/**
* Creates a MapIndexConfig without an attribute and with ordered is false.
*/
public MapIndexConfig() {
}
/**
* Creates a MapIndexConfig with the given attribute and ordered setting.
*
* @param attribute the attribute that is going to be indexed.
* @param ordered if the index is ordered.
* @see #setOrdered(boolean)
* @see #setAttribute(String)
*/
public MapIndexConfig(String attribute, boolean ordered) {
setAttribute(attribute);
setOrdered(ordered);
}
public MapIndexConfig(MapIndexConfig config) {
attribute = config.getAttribute();
ordered = config.isOrdered();
}
public MapIndexConfigReadOnly getAsReadOnly() {
if (readOnly == null ){
readOnly = new MapIndexConfigReadOnly(this);
}
return readOnly;
}
/**
* Gets the attribute that is going to be indexed. If no attribute is set, null is returned.
*
* @return the attribute to be indexed.
* @see #setAttribute(String)
*/
public String getAttribute() {
return attribute;
}
/**
* Sets the attribute that is going to be indexed.
*
* @param attribute the attribute that is going to be indexed.
* @return the updated MapIndexConfig.
* @throws IllegalArgumentException if attribute is null or an empty string.
*/
public MapIndexConfig setAttribute(String attribute) {
this.attribute = hasText(attribute,"Map index attribute");
return this;
}
/**
* Checks if the index should be ordered.
*
* @return true if ordered, false otherwise.
* @see #setOrdered(boolean)
*/
public boolean isOrdered() {
return ordered;
}
/**
* Configures the index to be ordered or not ordered. Some indices can be ordered, e.g. age. Sometimes you
* want to look for all people with an age equal or greater than X. In other cases an ordered index doesn't make
* sense, e.g. a phone number of a person.
*
* @param ordered if the index should be an ordered index.
* @return the updated MapIndexConfig.
*/
public MapIndexConfig setOrdered(boolean ordered) {
this.ordered = ordered;
return this;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("MapIndexConfig{");
sb.append("attribute='").append(attribute).append('\'');
sb.append(", ordered=").append(ordered);
sb.append('}');
return sb.toString();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_MapIndexConfig.java |
1,047 | public class MultiTermVectorsShardRequest extends SingleShardOperationRequest<MultiTermVectorsShardRequest> {
private int shardId;
private String preference;
IntArrayList locations;
List<TermVectorRequest> requests;
MultiTermVectorsShardRequest() {
}
MultiTermVectorsShardRequest(String index, int shardId) {
super(index);
this.shardId = shardId;
locations = new IntArrayList();
requests = new ArrayList<TermVectorRequest>();
}
public int shardId() {
return this.shardId;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public MultiTermVectorsShardRequest preference(String preference) {
this.preference = preference;
return this;
}
public String preference() {
return this.preference;
}
public void add(int location, TermVectorRequest request) {
this.locations.add(location);
this.requests.add(request);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
locations = new IntArrayList(size);
requests = new ArrayList<TermVectorRequest>(size);
for (int i = 0; i < size; i++) {
locations.add(in.readVInt());
requests.add(TermVectorRequest.readTermVectorRequest(in));
}
preference = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(locations.size());
for (int i = 0; i < locations.size(); i++) {
out.writeVInt(locations.get(i));
requests.get(i).writeTo(out);
}
out.writeOptionalString(preference);
}
} | 0true
| src_main_java_org_elasticsearch_action_termvector_MultiTermVectorsShardRequest.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.