Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
47 | public enum Order {
/**
* Increasing
*/
ASC,
/**
* Decreasing
*/
DESC;
/**
* Modulates the result of a {@link Comparable#compareTo(Object)} execution for this specific
* order, i.e. it negates the result if the order is {@link #DESC}.
*
* @param compare
* @return
*/
public int modulateNaturalOrder(int compare) {
switch (this) {
case ASC:
return compare;
case DESC:
return -compare;
default:
throw new AssertionError("Unrecognized order: " + this);
}
}
/**
* The default order when none is specified
*/
public static final Order DEFAULT = ASC;
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_Order.java |
62 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_FLD_ENUM")
@Cache(usage= CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blCMSElements")
public class FieldEnumerationImpl implements FieldEnumeration {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "FieldEnumerationId")
@GenericGenerator(
name="FieldEnumerationId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="FieldEnumerationImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.field.domain.FieldEnumerationImpl")
}
)
@Column(name = "FLD_ENUM_ID")
protected Long id;
@Column (name = "NAME")
protected String name;
@OneToMany(mappedBy = "fieldEnumeration", targetEntity = FieldEnumerationItemImpl.class, cascade = {CascadeType.ALL})
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blCMSElements")
@OrderBy("fieldOrder")
@BatchSize(size = 20)
protected List<FieldEnumerationItem> enumerationItems;
@Override
public List<FieldEnumerationItem> getEnumerationItems() {
return enumerationItems;
}
@Override
public void setEnumerationItems(List<FieldEnumerationItem> enumerationItems) {
this.enumerationItems = enumerationItems;
}
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_field_domain_FieldEnumerationImpl.java |
106 | class CreateEnumProposal extends CorrectionProposal {
CreateEnumProposal(String def, String desc, Image image,
int offset, TextFileChange change) {
super(desc, change, new Region(offset, 0), image);
}
static void addCreateEnumProposal(Tree.CompilationUnit cu, Node node,
ProblemLocation problem, Collection<ICompletionProposal> proposals,
IProject project, TypeChecker tc, IFile file) {
Node idn = Nodes.getIdentifyingNode(node);
if (idn==null) return;
String brokenName = idn.getText();
if (brokenName.isEmpty()) return;
Tree.Declaration dec = findDeclaration(cu, node);
if (dec instanceof Tree.ClassDefinition) {
Tree.ClassDefinition cd = (Tree.ClassDefinition) dec;
if (cd.getCaseTypes()!=null) {
if (cd.getCaseTypes().getTypes().contains(node)) {
addCreateEnumProposal(proposals, project,
"class " + brokenName + parameters(cd.getTypeParameterList()) +
parameters(cd.getParameterList()) +
" extends " + cd.getDeclarationModel().getName() +
parameters(cd.getTypeParameterList()) +
arguments(cd.getParameterList()) + " {}",
"class '"+ brokenName + parameters(cd.getTypeParameterList()) +
parameters(cd.getParameterList()) + "'",
CeylonLabelProvider.CLASS, cu, cd);
}
if (cd.getCaseTypes().getBaseMemberExpressions().contains(node)) {
addCreateEnumProposal(proposals, project,
"object " + brokenName +
" extends " + cd.getDeclarationModel().getName() +
parameters(cd.getTypeParameterList()) +
arguments(cd.getParameterList()) + " {}",
"object '"+ brokenName + "'",
ATTRIBUTE, cu, cd);
}
}
}
if (dec instanceof Tree.InterfaceDefinition) {
Tree.InterfaceDefinition cd = (Tree.InterfaceDefinition) dec;
if (cd.getCaseTypes()!=null) {
if (cd.getCaseTypes().getTypes().contains(node)) {
addCreateEnumProposal(proposals, project,
"interface " + brokenName + parameters(cd.getTypeParameterList()) +
" satisfies " + cd.getDeclarationModel().getName() +
parameters(cd.getTypeParameterList()) + " {}",
"interface '"+ brokenName + parameters(cd.getTypeParameterList()) + "'",
INTERFACE, cu, cd);
}
if (cd.getCaseTypes().getBaseMemberExpressions().contains(node)) {
addCreateEnumProposal(proposals, project,
"object " + brokenName +
" satisfies " + cd.getDeclarationModel().getName() +
parameters(cd.getTypeParameterList()) + " {}",
"object '"+ brokenName + "'",
ATTRIBUTE, cu, cd);
}
}
}
}
private static void addCreateEnumProposal(Collection<ICompletionProposal> proposals,
String def, String desc, Image image, PhasedUnit unit,
Tree.Statement statement) {
IFile file = getFile(unit);
TextFileChange change = new TextFileChange("Create Enumerated", file);
IDocument doc = EditorUtil.getDocument(change);
String indent = getIndent(statement, doc);
String s = indent + def + Indents.getDefaultLineDelimiter(doc);
int offset = statement.getStopIndex()+2;
if (offset>doc.getLength()) {
offset = doc.getLength();
s = Indents.getDefaultLineDelimiter(doc) + s;
}
change.setEdit(new InsertEdit(offset, s));
proposals.add(new CreateEnumProposal(def,
"Create enumerated " + desc,
image, offset + def.indexOf("{}")+1, change));
}
private static void addCreateEnumProposal(Collection<ICompletionProposal> proposals,
IProject project, String def, String desc, Image image,
Tree.CompilationUnit cu, Tree.TypeDeclaration cd) {
for (PhasedUnit unit: getUnits(project)) {
if (unit.getUnit().equals(cu.getUnit())) {
addCreateEnumProposal(proposals, def, desc, image, unit, cd);
break;
}
}
}
private static String parameters(Tree.ParameterList pl) {
StringBuilder result = new StringBuilder();
if (pl==null ||
pl.getParameters().isEmpty()) {
result.append("()");
}
else {
result.append("(");
int len = pl.getParameters().size(), i=0;
for (Tree.Parameter p: pl.getParameters()) {
if (p!=null) {
if (p instanceof Tree.ParameterDeclaration) {
Tree.TypedDeclaration td =
((Tree.ParameterDeclaration) p).getTypedDeclaration();
result.append(td.getType().getTypeModel().getProducedTypeName())
.append(" ")
.append(td.getIdentifier().getText());
}
else if (p instanceof Tree.InitializerParameter) {
result.append(p.getParameterModel().getType().getProducedTypeName())
.append(" ")
.append(((Tree.InitializerParameter) p).getIdentifier().getText());
}
//TODO: easy to add back in:
/*if (p instanceof Tree.FunctionalParameterDeclaration) {
Tree.FunctionalParameterDeclaration fp = (Tree.FunctionalParameterDeclaration) p;
for (Tree.ParameterList ipl: fp.getParameterLists()) {
parameters(ipl, label);
}
}*/
}
if (++i<len) result.append(", ");
}
result.append(")");
}
return result.toString();
}
private static String parameters(Tree.TypeParameterList tpl) {
StringBuilder result = new StringBuilder();
if (tpl!=null &&
!tpl.getTypeParameterDeclarations().isEmpty()) {
result.append("<");
int len = tpl.getTypeParameterDeclarations().size(), i=0;
for (Tree.TypeParameterDeclaration p: tpl.getTypeParameterDeclarations()) {
result.append(p.getIdentifier().getText());
if (++i<len) result.append(", ");
}
result.append(">");
}
return result.toString();
}
private static String arguments(Tree.ParameterList pl) {
StringBuilder result = new StringBuilder();
if (pl==null ||
pl.getParameters().isEmpty()) {
result.append("()");
}
else {
result.append("(");
int len = pl.getParameters().size(), i=0;
for (Tree.Parameter p: pl.getParameters()) {
if (p!=null) {
Tree.Identifier id;
if (p instanceof Tree.InitializerParameter) {
id = ((Tree.InitializerParameter) p).getIdentifier();
}
else if (p instanceof Tree.ParameterDeclaration) {
id = ((Tree.ParameterDeclaration) p).getTypedDeclaration().getIdentifier();
}
else {
continue;
}
result.append(id.getText());
//TODO: easy to add back in:
/*if (p instanceof Tree.FunctionalParameterDeclaration) {
Tree.FunctionalParameterDeclaration fp = (Tree.FunctionalParameterDeclaration) p;
for (Tree.ParameterList ipl: fp.getParameterLists()) {
parameters(ipl, label);
}
}*/
}
if (++i<len) result.append(", ");
}
result.append(")");
}
return result.toString();
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_CreateEnumProposal.java |
1,956 | public static class Instance<T> implements InternalFactory<T> {
private final T object;
public Instance(T object) {
this.object = object;
}
@Override
public T get(Errors errors, InternalContext context, Dependency<?> dependency) throws ErrorsException {
return object;
}
@Override
public String toString() {
return object.toString();
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_internal_InternalFactory.java |
2,933 | public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
private final Factory factory;
@Inject
public ShingleTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
Boolean outputUnigrams = settings.getAsBoolean("output_unigrams", true);
Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false);
String tokenSeparator = settings.get("token_separator", ShingleFilter.TOKEN_SEPARATOR);
factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return factory.create(tokenStream);
}
public Factory getInnerFactory() {
return this.factory;
}
public static final class Factory implements TokenFilterFactory {
private final int maxShingleSize;
private final boolean outputUnigrams;
private final boolean outputUnigramsIfNoShingles;
private final String tokenSeparator;
private int minShingleSize;
private final String name;
public Factory(String name) {
this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, false, ShingleFilter.TOKEN_SEPARATOR);
}
Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, String tokenSeparator) {
this.maxShingleSize = maxShingleSize;
this.outputUnigrams = outputUnigrams;
this.outputUnigramsIfNoShingles = outputUnigramsIfNoShingles;
this.tokenSeparator = tokenSeparator;
this.minShingleSize = minShingleSize;
this.name = name;
}
public TokenStream create(TokenStream tokenStream) {
ShingleFilter filter = new ShingleFilter(tokenStream, minShingleSize, maxShingleSize);
filter.setOutputUnigrams(outputUnigrams);
filter.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles);
filter.setTokenSeparator(tokenSeparator);
return filter;
}
public int getMaxShingleSize() {
return maxShingleSize;
}
public int getMinShingleSize() {
return minShingleSize;
}
public boolean getOutputUnigrams() {
return outputUnigrams;
}
public boolean getOutputUnigramsIfNoShingles() {
return outputUnigramsIfNoShingles;
}
@Override
public String name() {
return name;
}
}
} | 1no label
| src_main_java_org_elasticsearch_index_analysis_ShingleTokenFilterFactory.java |
1,380 | @RunWith(HazelcastSerialClassRunner.class)
@Category(SlowTest.class)
public class CustomPropertiesTest extends HibernateTestSupport {
@Test
public void test() {
Properties props = getDefaultProperties();
props.put(CacheEnvironment.SHUTDOWN_ON_STOP, "false");
SessionFactory sf = createSessionFactory(props);
HazelcastInstance hz = HazelcastAccessor.getHazelcastInstance(sf);
assertEquals(1, hz.getCluster().getMembers().size());
MapConfig cfg = hz.getConfig().getMapConfig("com.hazelcast.hibernate.entity.*");
assertNotNull(cfg);
assertEquals(30, cfg.getTimeToLiveSeconds());
assertEquals(50, cfg.getMaxSizeConfig().getSize());
sf.close();
assertTrue(hz.getLifecycleService().isRunning());
hz.shutdown();
}
@Test
public void testNativeClient() throws Exception {
HazelcastInstance main = Hazelcast.newHazelcastInstance(new ClasspathXmlConfig("hazelcast-custom.xml"));
Properties props = getDefaultProperties();
props.remove(CacheEnvironment.CONFIG_FILE_PATH_LEGACY);
props.setProperty(Environment.CACHE_REGION_FACTORY, HazelcastCacheRegionFactory.class.getName());
props.setProperty(CacheEnvironment.USE_NATIVE_CLIENT, "true");
props.setProperty(CacheEnvironment.NATIVE_CLIENT_GROUP, "dev-custom");
props.setProperty(CacheEnvironment.NATIVE_CLIENT_PASSWORD, "dev-pass");
props.setProperty(CacheEnvironment.NATIVE_CLIENT_ADDRESS, "localhost");
SessionFactory sf = createSessionFactory(props);
HazelcastInstance hz = HazelcastAccessor.getHazelcastInstance(sf);
assertTrue(hz instanceof HazelcastClientProxy);
assertEquals(1, main.getCluster().getMembers().size());
HazelcastClientProxy client = (HazelcastClientProxy) hz;
ClientConfig clientConfig = client.getClientConfig();
assertEquals("dev-custom", clientConfig.getGroupConfig().getName());
assertEquals("dev-pass", clientConfig.getGroupConfig().getPassword());
assertTrue(clientConfig.getNetworkConfig().isSmartRouting());
assertTrue(clientConfig.getNetworkConfig().isRedoOperation());
Hazelcast.newHazelcastInstance(new ClasspathXmlConfig("hazelcast-custom.xml"));
assertEquals(2, hz.getCluster().getMembers().size());
main.shutdown();
Thread.sleep(1000 * 3); // let client to reconnect
assertEquals(1, hz.getCluster().getMembers().size());
Session session = sf.openSession();
Transaction tx = session.beginTransaction();
session.save(new DummyEntity(1L, "dummy", 0, new Date()));
tx.commit();
session.close();
sf.close();
Hazelcast.shutdownAll();
}
@Test
public void testNamedInstance() {
Config config = new Config();
config.setInstanceName("hibernate");
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
Properties props = getDefaultProperties();
props.setProperty(Environment.CACHE_REGION_FACTORY, HazelcastCacheRegionFactory.class.getName());
props.put(CacheEnvironment.HAZELCAST_INSTANCE_NAME, "hibernate");
props.put(CacheEnvironment.SHUTDOWN_ON_STOP, "false");
final SessionFactory sf = createSessionFactory(props);
assertTrue(hz == HazelcastAccessor.getHazelcastInstance(sf));
sf.close();
assertTrue(hz.getLifecycleService().isRunning());
hz.shutdown();
}
private Properties getDefaultProperties() {
Properties props = new Properties();
props.setProperty(Environment.CACHE_REGION_FACTORY, HazelcastCacheRegionFactory.class.getName());
props.setProperty(CacheEnvironment.CONFIG_FILE_PATH_LEGACY, "hazelcast-custom.xml");
return props;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_test_java_com_hazelcast_hibernate_CustomPropertiesTest.java |
1,379 | public interface Custom {
String type();
interface Factory<T extends Custom> {
String type();
T readFrom(StreamInput in) throws IOException;
void writeTo(T customIndexMetaData, StreamOutput out) throws IOException;
T fromMap(Map<String, Object> map) throws IOException;
T fromXContent(XContentParser parser) throws IOException;
void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException;
/**
* Merges from first to second, with first being more important, i.e., if something exists in first and second,
* first will prevail.
*/
T merge(T first, T second);
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_IndexMetaData.java |
3,545 | public static class Builder {
private final ImmutableList.Builder<String> copyToBuilders = ImmutableList.builder();
public Builder add(String field) {
copyToBuilders.add(field);
return this;
}
public CopyTo build() {
return new CopyTo(copyToBuilders.build());
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_AbstractFieldMapper.java |
3,644 | public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Builder builder = geoShapeField(name);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (Names.TREE.equals(fieldName)) {
builder.tree(fieldNode.toString());
} else if (Names.TREE_LEVELS.equals(fieldName)) {
builder.treeLevels(Integer.parseInt(fieldNode.toString()));
} else if (Names.TREE_PRESISION.equals(fieldName)) {
builder.treeLevelsByDistance(DistanceUnit.parse(fieldNode.toString(), DistanceUnit.DEFAULT, DistanceUnit.DEFAULT));
} else if (Names.DISTANCE_ERROR_PCT.equals(fieldName)) {
builder.distanceErrorPct(Double.parseDouble(fieldNode.toString()));
} else if (Names.STRATEGY.equals(fieldName)) {
builder.strategy(fieldNode.toString());
}
}
return builder;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_geo_GeoShapeFieldMapper.java |
3,861 | public class HasParentFilterParser implements FilterParser {
public static final String NAME = "has_parent";
@Inject
public HasParentFilterParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
boolean queryFound = false;
String parentType = null;
boolean cache = false;
CacheKeyFilter.Key cacheKey = null;
String filterName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
// TODO handle `query` element before `type` element...
String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType});
try {
query = parseContext.parseInnerQuery();
queryFound = true;
} finally {
QueryParseContext.setTypes(origTypes);
}
} else if ("filter".equals(currentFieldName)) {
// TODO handle `filter` element before `type` element...
String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType});
try {
Filter innerFilter = parseContext.parseInnerFilter();
query = new XConstantScoreQuery(innerFilter);
queryFound = true;
} finally {
QueryParseContext.setTypes(origTypes);
}
} else {
throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) {
parentType = parser.text();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_parent] filter has been removed, use a filter as a facet_filter in the relevant global facet");
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else if ("_cache".equals(currentFieldName)) {
cache = parser.booleanValue();
} else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
cacheKey = new CacheKeyFilter.Key(parser.text());
} else {
throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound) {
throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'query' field");
}
if (query == null) {
return null;
}
if (parentType == null) {
throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'parent_type' field");
}
DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
if (parentDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "[has_parent] filter configured 'parent_type' [" + parentType + "] is not a valid type");
}
// wrap the query with type query
query = new XFilteredQuery(query, parseContext.cacheFilter(parentDocMapper.typeFilter(), null));
Set<String> parentTypes = new HashSet<String>(5);
parentTypes.add(parentType);
for (DocumentMapper documentMapper : parseContext.mapperService()) {
ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
if (parentFieldMapper.active()) {
DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());
if (parentTypeDocumentMapper == null) {
// Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
parentTypes.add(parentFieldMapper.type());
}
}
}
Filter parentFilter;
if (parentTypes.size() == 1) {
DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypes.iterator().next());
parentFilter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
} else {
XBooleanFilter parentsFilter = new XBooleanFilter();
for (String parentTypeStr : parentTypes) {
DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypeStr);
Filter filter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
parentsFilter.add(filter, BooleanClause.Occur.SHOULD);
}
parentFilter = parentsFilter;
}
Filter childrenFilter = parseContext.cacheFilter(new NotFilter(parentFilter), null);
Query parentConstantScoreQuery = new ParentConstantScoreQuery(query, parentType, childrenFilter);
if (filterName != null) {
parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(parentConstantScoreQuery));
}
boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
if (deleteByQuery) {
return new DeleteByQueryWrappingFilter(parentConstantScoreQuery);
} else {
return new CustomQueryWrappingFilter(parentConstantScoreQuery);
}
}
} | 1no label
| src_main_java_org_elasticsearch_index_query_HasParentFilterParser.java |
843 | return new IAnswer<Order>() {
@Override
public Order answer() throws Throwable {
Long orderId = (Long) EasyMock.getCurrentArguments()[0];
Order order = orders.get(orderId);
Iterator<OrderItem> orderItemItr = order.getOrderItems().listIterator();
while (orderItemItr.hasNext()) {
OrderItem item = orderItemItr.next();
if (item.getId().equals(EasyMock.getCurrentArguments()[1])) {
orderItemItr.remove();
}
}
for (FulfillmentGroup fg : order.getFulfillmentGroups()) {
Iterator<FulfillmentGroupItem> itr = fg.getFulfillmentGroupItems().iterator();
while (itr.hasNext()) {
if (itr.next().getOrderItem().getId().equals(EasyMock.getCurrentArguments()[1])) {
itr.remove();
}
}
}
return order;
}
}; | 0true
| core_broadleaf-framework_src_test_java_org_broadleafcommerce_core_offer_service_OfferDataItemProvider.java |
1,508 | public class FailedNodeRoutingTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class);
@Test
public void simpleFailedNodeTest() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start 4 nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
logger.info("start the replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("remove 2 nodes where primaries are allocated, reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
.remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
)
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (RoutingNode routingNode : routingNodes) {
assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
assertThat(routingNode.numberOfShardsWithState(INITIALIZING), equalTo(1));
}
}
@Test
public void simpleFailedNodeTestNoReassign() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start 4 nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("start all the primary shards, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
logger.info("start the replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("remove 2 nodes where primaries are allocated, reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
.remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
)
.build();
prevRoutingTable = routingTable;
routingTable = strategy.rerouteWithNoReassign(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (RoutingNode routingNode : routingNodes) {
assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
}
assertThat(routingNodes.unassigned().size(), equalTo(2));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_FailedNodeRoutingTests.java |
1,458 | public abstract class AbstractGeneralRegion<Cache extends RegionCache> extends AbstractHazelcastRegion<Cache>
implements GeneralDataRegion {
private final Cache cache;
protected AbstractGeneralRegion(final HazelcastInstance instance, final String name, final Properties props, final Cache cache) {
super(instance, name, props);
this.cache = cache;
}
public void evict(final Object key) throws CacheException {
try {
getCache().remove(key);
} catch (OperationTimeoutException ignored) {
}
}
public void evictAll() throws CacheException {
try {
getCache().clear();
} catch (OperationTimeoutException ignored) {
}
}
public Object get(final Object key) throws CacheException {
try {
return getCache().get(key);
} catch (OperationTimeoutException e) {
return null;
}
}
public void put(final Object key, final Object value) throws CacheException {
try {
getCache().put(key, value, null);
} catch (OperationTimeoutException ignored) {
}
}
public Cache getCache() {
return cache;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_main_java_com_hazelcast_hibernate_region_AbstractGeneralRegion.java |
1,097 | public class DiscreteOrderItemRequest extends AbstractOrderItemRequest {
protected BundleOrderItem bundleOrderItem;
protected List<DiscreteOrderItemFeePrice> discreteOrderItemFeePrices = new ArrayList<DiscreteOrderItemFeePrice>();
public DiscreteOrderItemRequest() {
super();
}
public DiscreteOrderItemRequest(AbstractOrderItemRequest request) {
setCategory(request.getCategory());
setItemAttributes(request.getItemAttributes());
setPersonalMessage(request.getPersonalMessage());
setProduct(request.getProduct());
setQuantity(request.getQuantity());
setSku(request.getSku());
setOrder(request.getOrder());
setSalePriceOverride(request.getSalePriceOverride());
setRetailPriceOverride(request.getRetailPriceOverride());
}
@Override
public DiscreteOrderItemRequest clone() {
DiscreteOrderItemRequest returnRequest = new DiscreteOrderItemRequest();
copyProperties(returnRequest);
returnRequest.setDiscreteOrderItemFeePrices(discreteOrderItemFeePrices);
return returnRequest;
}
public BundleOrderItem getBundleOrderItem() {
return bundleOrderItem;
}
public void setBundleOrderItem(BundleOrderItem bundleOrderItem) {
this.bundleOrderItem = bundleOrderItem;
}
public List<DiscreteOrderItemFeePrice> getDiscreteOrderItemFeePrices() {
return discreteOrderItemFeePrices;
}
public void setDiscreteOrderItemFeePrices(
List<DiscreteOrderItemFeePrice> discreteOrderItemFeePrices) {
this.discreteOrderItemFeePrices = discreteOrderItemFeePrices;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_call_DiscreteOrderItemRequest.java |
2,135 | public abstract class IndexCommitDelegate extends IndexCommit {
protected final IndexCommit delegate;
/**
* Constructs a new {@link IndexCommit} that will delegate all calls
* to the provided delegate.
*
* @param delegate The delegate
*/
public IndexCommitDelegate(IndexCommit delegate) {
this.delegate = delegate;
}
@Override
public String getSegmentsFileName() {
return delegate.getSegmentsFileName();
}
@Override
public Collection<String> getFileNames() throws IOException {
return delegate.getFileNames();
}
@Override
public Directory getDirectory() {
return delegate.getDirectory();
}
@Override
public void delete() {
delegate.delete();
}
@Override
public boolean isDeleted() {
return delegate.isDeleted();
}
@Override
public int getSegmentCount() {
return delegate.getSegmentCount();
}
@Override
public boolean equals(Object other) {
return delegate.equals(other);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
@Override
public long getGeneration() {
return delegate.getGeneration();
}
@Override
public Map<String, String> getUserData() throws IOException {
return delegate.getUserData();
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_IndexCommitDelegate.java |
833 | public class AtomicReferenceProxy<E> extends AbstractDistributedObject<AtomicReferenceService>
implements AsyncAtomicReference<E> {
private final String name;
private final int partitionId;
public AtomicReferenceProxy(String name, NodeEngine nodeEngine, AtomicReferenceService service) {
super(nodeEngine, service);
this.name = name;
this.partitionId = nodeEngine.getPartitionService().getPartitionId(getNameAsPartitionAwareData());
}
private <E> InternalCompletableFuture<E> asyncInvoke(Operation operation, NodeEngine nodeEngine) {
try {
OperationService operationService = nodeEngine.getOperationService();
return operationService.invokeOnPartition(AtomicReferenceService.SERVICE_NAME, operation, partitionId);
} catch (Throwable throwable) {
throw rethrow(throwable);
}
}
@Override
public void alter(IFunction<E, E> function) {
asyncAlter(function).getSafely();
}
@Override
public InternalCompletableFuture<Void> asyncAlter(IFunction<E, E> function) {
isNotNull(function, "function");
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new AlterOperation(name, nodeEngine.toData(function));
return asyncInvoke(operation, nodeEngine);
}
@Override
public E alterAndGet(IFunction<E, E> function) {
return asyncAlterAndGet(function).getSafely();
}
@Override
public InternalCompletableFuture<E> asyncAlterAndGet(IFunction<E, E> function) {
isNotNull(function, "function");
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new AlterAndGetOperation(name, nodeEngine.toData(function));
return asyncInvoke(operation, nodeEngine);
}
@Override
public E getAndAlter(IFunction<E, E> function) {
return asyncGetAndAlter(function).getSafely();
}
@Override
public InternalCompletableFuture<E> asyncGetAndAlter(IFunction<E, E> function) {
isNotNull(function, "function");
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new GetAndAlterOperation(name, nodeEngine.toData(function));
return asyncInvoke(operation, nodeEngine);
}
@Override
public <R> R apply(IFunction<E, R> function) {
return asyncApply(function).getSafely();
}
@Override
public <R> InternalCompletableFuture<R> asyncApply(IFunction<E, R> function) {
isNotNull(function, "function");
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new ApplyOperation(name, nodeEngine.toData(function));
return asyncInvoke(operation, nodeEngine);
}
@Override
public void clear() {
asyncClear().getSafely();
}
@Override
public InternalCompletableFuture<Void> asyncClear() {
return asyncSet(null);
}
@Override
public boolean compareAndSet(E expect, E update) {
return asyncCompareAndSet(expect, update).getSafely();
}
@Override
public InternalCompletableFuture<Boolean> asyncCompareAndSet(E expect, E update) {
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new CompareAndSetOperation(name, nodeEngine.toData(expect), nodeEngine.toData(update));
return asyncInvoke(operation, nodeEngine);
}
@Override
public E get() {
return asyncGet().getSafely();
}
@Override
public InternalCompletableFuture<E> asyncGet() {
Operation operation = new GetOperation(name);
return asyncInvoke(operation, getNodeEngine());
}
@Override
public boolean contains(E expected) {
return asyncContains(expected).getSafely();
}
@Override
public InternalCompletableFuture<Boolean> asyncContains(E value) {
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new ContainsOperation(name, nodeEngine.toData(value));
return asyncInvoke(operation, nodeEngine);
}
@Override
public void set(E newValue) {
asyncSet(newValue).getSafely();
}
@Override
public InternalCompletableFuture<Void> asyncSet(E newValue) {
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new SetOperation(name, nodeEngine.toData(newValue));
return asyncInvoke(operation, nodeEngine);
}
@Override
public E getAndSet(E newValue) {
return asyncGetAndSet(newValue).getSafely();
}
@Override
public InternalCompletableFuture<E> asyncGetAndSet(E newValue) {
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new GetAndSetOperation(name, nodeEngine.toData(newValue));
return asyncInvoke(operation, nodeEngine);
}
@Override
public E setAndGet(E update) {
return asyncSetAndGet(update).getSafely();
}
@Override
public InternalCompletableFuture<E> asyncSetAndGet(E update) {
NodeEngine nodeEngine = getNodeEngine();
Operation operation = new SetAndGetOperation(name, nodeEngine.toData(update));
return asyncInvoke(operation, nodeEngine);
}
@Override
public boolean isNull() {
return asyncIsNull().getSafely();
}
@Override
public InternalCompletableFuture<Boolean> asyncIsNull() {
Operation operation = new IsNullOperation(name);
return asyncInvoke(operation, getNodeEngine());
}
@Override
public String getName() {
return name;
}
public int getPartitionId() {
return partitionId;
}
@Override
public String getServiceName() {
return AtomicReferenceService.SERVICE_NAME;
}
@Override
public String toString() {
return "IAtomicReference{" + "name='" + name + '\'' + '}';
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_atomicreference_AtomicReferenceProxy.java |
157 | class RemoveAnnotionProposal extends CorrectionProposal {
private final Declaration dec;
private final String annotation;
RemoveAnnotionProposal(Declaration dec, String annotation,
int offset, TextFileChange change) {
super("Make '" + dec.getName() + "' non-" + annotation + " " +
(dec.getContainer() instanceof TypeDeclaration ?
"in '" + ((TypeDeclaration) dec.getContainer()).getName() + "'" : ""),
change, new Region(offset, 0));
this.dec = dec;
this.annotation = annotation;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof RemoveAnnotionProposal) {
RemoveAnnotionProposal that = (RemoveAnnotionProposal) obj;
return that.dec.equals(dec) &&
that.annotation.equals(annotation);
}
else {
return super.equals(obj);
}
}
@Override
public int hashCode() {
return dec.hashCode();
}
static void addRemoveAnnotationProposal(Node node, String annotation,
Collection<ICompletionProposal> proposals, IProject project) {
Referenceable dec = Nodes.getReferencedDeclaration(node);
if (dec instanceof Declaration) {
addRemoveAnnotationProposal(node, annotation,
"Make Non" + annotation,
(Declaration) dec, proposals, project);
}
}
static void addRemoveAnnotationProposal(Node node, String annotation, String desc,
Declaration dec, Collection<ICompletionProposal> proposals, IProject project) {
if (dec!=null && dec.getName()!=null) {
for (PhasedUnit unit: getUnits(project)) {
if (dec.getUnit().equals(unit.getUnit())) {
//TODO: "object" declarations?
FindDeclarationNodeVisitor fdv =
new FindDeclarationNodeVisitor(dec);
getRootNode(unit).visit(fdv);
Tree.Declaration decNode =
(Tree.Declaration) fdv.getDeclarationNode();
if (decNode!=null) {
addRemoveAnnotationProposal(annotation, desc, dec,
proposals, unit, decNode);
}
break;
}
}
}
}
private static void addRemoveAnnotationProposal(String annotation,
String desc, Declaration dec,
Collection<ICompletionProposal> proposals,
PhasedUnit unit, Tree.Declaration decNode) {
IFile file = CeylonBuilder.getFile(unit);
TextFileChange change = new TextFileChange(desc, file);
change.setEdit(new MultiTextEdit());
Integer offset = decNode.getStartIndex();
for (Tree.Annotation a: decNode.getAnnotationList().getAnnotations()) {
Identifier id = ((Tree.BaseMemberExpression)a.getPrimary()).getIdentifier();
if (id!=null) {
if (id.getText().equals(annotation)) {
boolean args = a.getPositionalArgumentList()!=null &&
a.getPositionalArgumentList().getToken()!=null ||
a.getNamedArgumentList()!=null;
change.addEdit(new DeleteEdit(a.getStartIndex(),
a.getStopIndex()-a.getStartIndex()+1 +
(args?0:1))); //get rid of the trailing space
}
}
}
RemoveAnnotionProposal p =
new RemoveAnnotionProposal(dec, annotation, offset, change);
if (!proposals.contains(p)) {
proposals.add(p);
}
}
static void addRemoveAnnotationDecProposal(Collection<ICompletionProposal> proposals,
String annotation, IProject project, Node node) {
if (node instanceof Tree.Declaration) {
addRemoveAnnotationProposal(node, annotation, "Make Non" + annotation,
((Tree.Declaration) node).getDeclarationModel(),
proposals, project);
}
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_RemoveAnnotionProposal.java |
1,486 | public class RoutingTableValidation implements Serializable, Streamable {
private boolean valid = true;
private List<String> failures;
private Map<String, List<String>> indicesFailures;
public RoutingTableValidation() {
}
public boolean valid() {
return valid;
}
public List<String> allFailures() {
if (failures().isEmpty() && indicesFailures().isEmpty()) {
return ImmutableList.of();
}
List<String> allFailures = newArrayList(failures());
for (Map.Entry<String, List<String>> entry : indicesFailures().entrySet()) {
for (String failure : entry.getValue()) {
allFailures.add("Index [" + entry.getKey() + "]: " + failure);
}
}
return allFailures;
}
public List<String> failures() {
if (failures == null) {
return ImmutableList.of();
}
return failures;
}
public Map<String, List<String>> indicesFailures() {
if (indicesFailures == null) {
return ImmutableMap.of();
}
return indicesFailures;
}
public List<String> indexFailures(String index) {
if (indicesFailures == null) {
return ImmutableList.of();
}
List<String> indexFailures = indicesFailures.get(index);
if (indexFailures == null) {
return ImmutableList.of();
}
return indexFailures;
}
public void addFailure(String failure) {
valid = false;
if (failures == null) {
failures = newArrayList();
}
failures.add(failure);
}
public void addIndexFailure(String index, String failure) {
valid = false;
if (indicesFailures == null) {
indicesFailures = newHashMap();
}
List<String> indexFailures = indicesFailures.get(index);
if (indexFailures == null) {
indexFailures = Lists.newArrayList();
indicesFailures.put(index, indexFailures);
}
indexFailures.add(failure);
}
@Override
public String toString() {
return allFailures().toString();
}
@Override
public void readFrom(StreamInput in) throws IOException {
valid = in.readBoolean();
int size = in.readVInt();
if (size == 0) {
failures = ImmutableList.of();
} else {
failures = Lists.newArrayListWithCapacity(size);
for (int i = 0; i < size; i++) {
failures.add(in.readString());
}
}
size = in.readVInt();
if (size == 0) {
indicesFailures = ImmutableMap.of();
} else {
indicesFailures = newHashMap();
for (int i = 0; i < size; i++) {
String index = in.readString();
int size2 = in.readVInt();
List<String> indexFailures = newArrayListWithCapacity(size2);
for (int j = 0; j < size2; j++) {
indexFailures.add(in.readString());
}
indicesFailures.put(index, indexFailures);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(valid);
if (failures == null) {
out.writeVInt(0);
} else {
out.writeVInt(failures.size());
for (String failure : failures) {
out.writeString(failure);
}
}
if (indicesFailures == null) {
out.writeVInt(0);
} else {
out.writeVInt(indicesFailures.size());
for (Map.Entry<String, List<String>> entry : indicesFailures.entrySet()) {
out.writeString(entry.getKey());
out.writeVInt(entry.getValue().size());
for (String failure : entry.getValue()) {
out.writeString(failure);
}
}
}
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_RoutingTableValidation.java |
514 | public class SystemTime {
private static final TimeSource defaultTimeSource = new DefaultTimeSource();
private static TimeSource globalTimeSource = null;
private static final InheritableThreadLocal<TimeSource> localTimeSource = new InheritableThreadLocal<TimeSource>();
public static TimeSource getTimeSource() {
TimeSource applicableTimeSource;
TimeSource localTS = localTimeSource.get();
if (localTS != null) {
applicableTimeSource = localTS;
} else if (globalTimeSource != null) {
applicableTimeSource = globalTimeSource;
} else {
applicableTimeSource = defaultTimeSource;
}
return applicableTimeSource;
}
public static void setGlobalTimeSource(final TimeSource globalTS) {
SystemTime.globalTimeSource = globalTS;
}
public static void resetGlobalTimeSource() {
setGlobalTimeSource(null);
}
public static void setLocalTimeSource(final TimeSource localTS) {
SystemTime.localTimeSource.set(localTS);
}
public static void resetLocalTimeSource() {
SystemTime.localTimeSource.remove();
}
public static void reset() {
resetGlobalTimeSource();
resetLocalTimeSource();
}
public static long asMillis() {
return asMillis(true);
}
public static long asMillis(boolean includeTime) {
if (includeTime) {
return getTimeSource().timeInMillis();
}
return asCalendar(includeTime).getTimeInMillis();
}
public static Date asDate() {
return asDate(true);
}
public static Date asDate(boolean includeTime) {
if (includeTime) {
return new Date(asMillis());
}
return asCalendar(includeTime).getTime();
}
public static Calendar asCalendar() {
return asCalendar(true);
}
public static Calendar asCalendar(boolean includeTime) {
return asCalendar(Locale.getDefault(), TimeZone.getDefault(), includeTime);
}
public static Calendar asCalendar(Locale locale) {
return asCalendar(locale, TimeZone.getDefault(), true);
}
public static Calendar asCalendar(TimeZone timeZone) {
return asCalendar(Locale.getDefault(), timeZone, true);
}
/**
* Returns false if the current time source is a {@link FixedTimeSource} indicating that the
* time is being overridden. For example to preview items in a later time.
*
* @return
*/
public static boolean shouldCacheDate() {
if (SystemTime.getTimeSource() instanceof FixedTimeSource) {
return false;
} else {
return true;
}
}
/**
* Many DAO objects in Broadleaf use a cached time concept. Since most entities have an active
* start and end date, the DAO may ask for a representation of "NOW" that is within some
* threshold.
*
* By default, most entities cache active-date queries to every 10 seconds. These DAO
* classes can be overridden to extend or decrease this default.
*
* @return
*/
public static Date getCurrentDateWithinTimeResolution(Date cachedDate, Long dateResolutionMillis) {
Date returnDate = SystemTime.asDate();
if (cachedDate == null || (SystemTime.getTimeSource() instanceof FixedTimeSource)) {
return returnDate;
}
if (returnDate.getTime() > (cachedDate.getTime() + dateResolutionMillis)) {
return returnDate;
} else {
return cachedDate;
}
}
public static Calendar asCalendar(Locale locale, TimeZone timeZone, boolean includeTime) {
Calendar calendar = Calendar.getInstance(timeZone, locale);
calendar.setTimeInMillis(asMillis());
if (!includeTime) {
calendar.set(Calendar.HOUR_OF_DAY, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
}
return calendar;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_time_SystemTime.java |
1,176 | public static class Tab {
public static class Name {
public static final String Address = "PaymentInfoImpl_Address_Tab";
public static final String Log = "PaymentInfoImpl_Log_Tab";
public static final String Advanced = "PaymentInfoImpl_Advanced_Tab";
}
public static class Order {
public static final int Address = 2000;
public static final int Log = 4000;
public static final int Advanced = 5000;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_domain_PaymentInfoImpl.java |
3,503 | public final class MapperBuilders {
private MapperBuilders() {
}
public static DocumentMapper.Builder doc(String index, RootObjectMapper.Builder objectBuilder) {
return new DocumentMapper.Builder(index, null, objectBuilder);
}
public static DocumentMapper.Builder doc(String index, @Nullable Settings settings, RootObjectMapper.Builder objectBuilder) {
return new DocumentMapper.Builder(index, settings, objectBuilder);
}
public static SourceFieldMapper.Builder source() {
return new SourceFieldMapper.Builder();
}
public static IdFieldMapper.Builder id() {
return new IdFieldMapper.Builder();
}
public static RoutingFieldMapper.Builder routing() {
return new RoutingFieldMapper.Builder();
}
public static UidFieldMapper.Builder uid() {
return new UidFieldMapper.Builder();
}
public static SizeFieldMapper.Builder size() {
return new SizeFieldMapper.Builder();
}
public static VersionFieldMapper.Builder version() {
return new VersionFieldMapper.Builder();
}
public static TypeFieldMapper.Builder type() {
return new TypeFieldMapper.Builder();
}
public static IndexFieldMapper.Builder index() {
return new IndexFieldMapper.Builder();
}
public static TimestampFieldMapper.Builder timestamp() {
return new TimestampFieldMapper.Builder();
}
public static TTLFieldMapper.Builder ttl() {
return new TTLFieldMapper.Builder();
}
public static ParentFieldMapper.Builder parent() {
return new ParentFieldMapper.Builder();
}
public static BoostFieldMapper.Builder boost(String name) {
return new BoostFieldMapper.Builder(name);
}
public static AllFieldMapper.Builder all() {
return new AllFieldMapper.Builder();
}
public static AnalyzerMapper.Builder analyzer() {
return new AnalyzerMapper.Builder();
}
public static RootObjectMapper.Builder rootObject(String name) {
return new RootObjectMapper.Builder(name);
}
public static ObjectMapper.Builder object(String name) {
return new ObjectMapper.Builder(name);
}
public static BooleanFieldMapper.Builder booleanField(String name) {
return new BooleanFieldMapper.Builder(name);
}
public static StringFieldMapper.Builder stringField(String name) {
return new StringFieldMapper.Builder(name);
}
public static BinaryFieldMapper.Builder binaryField(String name) {
return new BinaryFieldMapper.Builder(name);
}
public static DateFieldMapper.Builder dateField(String name) {
return new DateFieldMapper.Builder(name);
}
public static IpFieldMapper.Builder ipField(String name) {
return new IpFieldMapper.Builder(name);
}
public static ShortFieldMapper.Builder shortField(String name) {
return new ShortFieldMapper.Builder(name);
}
public static ByteFieldMapper.Builder byteField(String name) {
return new ByteFieldMapper.Builder(name);
}
public static IntegerFieldMapper.Builder integerField(String name) {
return new IntegerFieldMapper.Builder(name);
}
public static TokenCountFieldMapper.Builder tokenCountField(String name) {
return new TokenCountFieldMapper.Builder(name);
}
public static LongFieldMapper.Builder longField(String name) {
return new LongFieldMapper.Builder(name);
}
public static FloatFieldMapper.Builder floatField(String name) {
return new FloatFieldMapper.Builder(name);
}
public static DoubleFieldMapper.Builder doubleField(String name) {
return new DoubleFieldMapper.Builder(name);
}
public static GeoPointFieldMapper.Builder geoPointField(String name) {
return new GeoPointFieldMapper.Builder(name);
}
public static GeoShapeFieldMapper.Builder geoShapeField(String name) {
return new GeoShapeFieldMapper.Builder(name);
}
public static CompletionFieldMapper.Builder completionField(String name) {
return new CompletionFieldMapper.Builder(name);
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_MapperBuilders.java |
1,439 | public class LocalRegionCache implements RegionCache {
protected final ITopic<Object> topic;
protected final MessageListener<Object> messageListener;
protected final ConcurrentMap<Object, Value> cache;
protected final Comparator versionComparator;
protected MapConfig config;
public LocalRegionCache(final String name, final HazelcastInstance hazelcastInstance,
final CacheDataDescription metadata) {
try {
config = hazelcastInstance != null ? hazelcastInstance.getConfig().findMapConfig(name) : null;
} catch (UnsupportedOperationException ignored) {
}
versionComparator = metadata != null && metadata.isVersioned() ? metadata.getVersionComparator() : null;
cache = new ConcurrentHashMap<Object, Value>();
messageListener = createMessageListener();
if (hazelcastInstance != null) {
topic = hazelcastInstance.getTopic(name);
topic.addMessageListener(messageListener);
} else {
topic = null;
}
}
public Object get(final Object key) {
final Value value = cache.get(key);
return value != null ? value.getValue() : null;
}
public boolean put(final Object key, final Object value, final Object currentVersion) {
final Value newValue = new Value(currentVersion, value, null, Clock.currentTimeMillis());
cache.put(key, newValue);
return true;
}
public boolean update(final Object key, final Object value, final Object currentVersion,
final Object previousVersion, final SoftLock lock) {
if (lock == LOCK_FAILURE) {
return false;
}
final Value currentValue = cache.get(key);
if (lock == LOCK_SUCCESS) {
if (currentValue != null && currentVersion != null
&& versionComparator.compare(currentVersion, currentValue.getVersion()) < 0) {
return false;
}
}
if (topic != null) {
topic.publish(createMessage(key, value, currentVersion));
}
cache.put(key, new Value(currentVersion, value, lock, Clock.currentTimeMillis()));
return true;
}
protected Object createMessage(final Object key, Object value, final Object currentVersion) {
return new Invalidation(key, currentVersion);
}
protected MessageListener<Object> createMessageListener() {
return new MessageListener<Object>() {
public void onMessage(final Message<Object> message) {
final Invalidation invalidation = (Invalidation) message.getMessageObject();
if (versionComparator != null) {
final Value value = cache.get(invalidation.getKey());
if (value != null) {
Object currentVersion = value.getVersion();
Object newVersion = invalidation.getVersion();
if (versionComparator.compare(newVersion, currentVersion) > 0) {
cache.remove(invalidation.getKey(), value);
}
}
} else {
cache.remove(invalidation.getKey());
}
}
};
}
public boolean remove(final Object key) {
final Value value = cache.remove(key);
if (value != null) {
if (topic != null) {
topic.publish(createMessage(key, null, value.getVersion()));
}
return true;
}
return false;
}
public SoftLock tryLock(final Object key, final Object version) {
final Value value = cache.get(key);
if (value == null) {
if (cache.putIfAbsent(key, new Value(version, null, LOCK_SUCCESS, Clock.currentTimeMillis())) == null) {
return LOCK_SUCCESS;
} else {
return LOCK_FAILURE;
}
} else {
if (version == null || versionComparator.compare(version, value.getVersion()) >= 0) {
if (cache.replace(key, value, value.createLockedValue(LOCK_SUCCESS))) {
return LOCK_SUCCESS;
} else {
return LOCK_FAILURE;
}
} else {
return LOCK_FAILURE;
}
}
}
public void unlock(final Object key, SoftLock lock) {
final Value value = cache.get(key);
if (value != null) {
final SoftLock currentLock = value.getLock();
if (currentLock == lock) {
cache.replace(key, value, value.createUnlockedValue());
}
}
}
public boolean contains(final Object key) {
return cache.containsKey(key);
}
public void clear() {
cache.clear();
}
public long size() {
return cache.size();
}
public long getSizeInMemory() {
return 0;
}
public Map asMap() {
return cache;
}
void cleanup() {
final int maxSize;
final long timeToLive;
if (config != null) {
maxSize = config.getMaxSizeConfig().getSize();
timeToLive = config.getTimeToLiveSeconds() * 1000L;
} else {
maxSize = 100000;
timeToLive = CacheEnvironment.getDefaultCacheTimeoutInMillis();
}
if ((maxSize > 0 && maxSize != Integer.MAX_VALUE) || timeToLive > 0) {
final Iterator<Entry<Object, Value>> iter = cache.entrySet().iterator();
SortedSet<EvictionEntry> entries = null;
final long now = Clock.currentTimeMillis();
while (iter.hasNext()) {
final Entry<Object, Value> e = iter.next();
final Object k = e.getKey();
final Value v = e.getValue();
if (v.getLock() == LOCK_SUCCESS) {
continue;
}
if (v.getCreationTime() + timeToLive < now) {
iter.remove();
} else if (maxSize > 0 && maxSize != Integer.MAX_VALUE) {
if (entries == null) {
entries = new TreeSet<EvictionEntry>();
}
entries.add(new EvictionEntry(k, v));
}
}
final int diff = cache.size() - maxSize;
final int k = diff >= 0 ? (diff + maxSize * 20 / 100) : 0;
if (k > 0 && entries != null) {
int i = 0;
for (EvictionEntry entry : entries) {
if (cache.remove(entry.key, entry.value)) {
if (++i == k) {
break;
}
}
}
}
}
}
static private class EvictionEntry implements Comparable<EvictionEntry> {
final Object key;
final Value value;
private EvictionEntry(final Object key, final Value value) {
this.key = key;
this.value = value;
}
public int compareTo(final EvictionEntry o) {
final long thisVal = this.value.getCreationTime();
final long anotherVal = o.value.getCreationTime();
return (thisVal < anotherVal ? -1 : (thisVal == anotherVal ? 0 : 1));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EvictionEntry that = (EvictionEntry) o;
if (key != null ? !key.equals(that.key) : that.key != null) return false;
if (value != null ? !value.equals(that.value) : that.value != null) return false;
return true;
}
@Override
public int hashCode() {
return key != null ? key.hashCode() : 0;
}
}
private static final SoftLock LOCK_SUCCESS = new SoftLock() {
@Override
public String toString() {
return "Lock::Success";
}
};
private static final SoftLock LOCK_FAILURE = new SoftLock() {
@Override
public String toString() {
return "Lock::Failure";
}
};
} | 1no label
| hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_local_LocalRegionCache.java |
1,502 | @SuppressWarnings("SynchronizationOnStaticField")
@PrivateApi
public final class HazelcastInstanceFactory {
private static final ConcurrentMap<String, InstanceFuture> INSTANCE_MAP
= new ConcurrentHashMap<String, InstanceFuture>(5);
private static final AtomicInteger FACTORY_ID_GEN = new AtomicInteger();
private HazelcastInstanceFactory() {
}
public static Set<HazelcastInstance> getAllHazelcastInstances() {
Set<HazelcastInstance> result = new HashSet<HazelcastInstance>();
for (InstanceFuture f : INSTANCE_MAP.values()) {
result.add(f.get());
}
return result;
}
public static HazelcastInstance getHazelcastInstance(String instanceName) {
InstanceFuture instanceFuture = INSTANCE_MAP.get(instanceName);
if (instanceFuture == null) {
return null;
}
try {
return instanceFuture.get();
} catch (IllegalStateException t) {
return null;
}
}
public static HazelcastInstance getOrCreateHazelcastInstance(Config config) {
if (config == null) {
throw new NullPointerException("config can't be null");
}
String name = config.getInstanceName();
hasText(name, "instanceName");
InstanceFuture future = INSTANCE_MAP.get(name);
if (future != null) {
return future.get();
}
future = new InstanceFuture();
InstanceFuture found = INSTANCE_MAP.putIfAbsent(name, future);
if (found != null) {
return found.get();
}
try {
HazelcastInstanceProxy hz = constructHazelcastInstance(config, name, new DefaultNodeContext());
future.set(hz);
return hz;
} catch (Throwable t) {
INSTANCE_MAP.remove(name, future);
future.setFailure(t);
throw ExceptionUtil.rethrow(t);
}
}
public static HazelcastInstance newHazelcastInstance(Config config) {
if (config == null) {
config = new XmlConfigBuilder().build();
}
return newHazelcastInstance(config, config.getInstanceName(), new DefaultNodeContext());
}
private static String createInstanceName(Config config) {
return "_hzInstance_" + FACTORY_ID_GEN.incrementAndGet() + "_" + config.getGroupConfig().getName();
}
public static HazelcastInstance newHazelcastInstance(Config config, String instanceName,
NodeContext nodeContext) {
if (config == null) {
config = new XmlConfigBuilder().build();
}
String name = instanceName;
if (name == null || name.trim().length() == 0) {
name = createInstanceName(config);
}
InstanceFuture future = new InstanceFuture();
if (INSTANCE_MAP.putIfAbsent(name, future) != null) {
throw new DuplicateInstanceNameException("HazelcastInstance with name '" + name + "' already exists!");
}
try {
HazelcastInstanceProxy hz = constructHazelcastInstance(config, name, nodeContext);
future.set(hz);
return hz;
} catch (Throwable t) {
INSTANCE_MAP.remove(name, future);
future.setFailure(t);
throw ExceptionUtil.rethrow(t);
}
}
private static HazelcastInstanceProxy constructHazelcastInstance(Config config, String instanceName,
NodeContext nodeContext) {
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
HazelcastInstanceProxy proxy;
try {
if (classLoader == null) {
Thread.currentThread().setContextClassLoader(HazelcastInstanceFactory.class.getClassLoader());
}
HazelcastInstanceImpl hazelcastInstance = new HazelcastInstanceImpl(instanceName, config, nodeContext);
OutOfMemoryErrorDispatcher.register(hazelcastInstance);
proxy = new HazelcastInstanceProxy(hazelcastInstance);
final Node node = hazelcastInstance.node;
final boolean firstMember = isFirstMember(node);
final int initialWaitSeconds = node.groupProperties.INITIAL_WAIT_SECONDS.getInteger();
if (initialWaitSeconds > 0) {
hazelcastInstance.logger.info("Waiting "
+ initialWaitSeconds + " seconds before completing HazelcastInstance startup...");
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(initialWaitSeconds));
if (firstMember) {
node.partitionService.firstArrangement();
} else {
Thread.sleep(TimeUnit.SECONDS.toMillis(4));
}
} catch (InterruptedException ignored) {
}
}
awaitMinimalClusterSize(hazelcastInstance, node, firstMember);
hazelcastInstance.lifecycleService.fireLifecycleEvent(STARTED);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
} finally {
Thread.currentThread().setContextClassLoader(classLoader);
}
return proxy;
}
private static boolean isFirstMember(Node node) {
final Iterator<Member> iter = node.getClusterService().getMembers().iterator();
return (iter.hasNext() && iter.next().localMember());
}
private static void awaitMinimalClusterSize(HazelcastInstanceImpl hazelcastInstance, Node node, boolean firstMember)
throws InterruptedException {
final int initialMinClusterSize = node.groupProperties.INITIAL_MIN_CLUSTER_SIZE.getInteger();
while (node.getClusterService().getSize() < initialMinClusterSize) {
try {
hazelcastInstance.logger.info("HazelcastInstance waiting for cluster size of " + initialMinClusterSize);
//noinspection BusyWait
Thread.sleep(TimeUnit.SECONDS.toMillis(1));
} catch (InterruptedException ignored) {
}
}
if (initialMinClusterSize > 1) {
if (firstMember) {
node.partitionService.firstArrangement();
} else {
Thread.sleep(TimeUnit.SECONDS.toMillis(3));
}
hazelcastInstance.logger.info("HazelcastInstance starting after waiting for cluster size of "
+ initialMinClusterSize);
}
}
public static void shutdownAll() {
final List<HazelcastInstanceProxy> instances = new LinkedList<HazelcastInstanceProxy>();
for (InstanceFuture f : INSTANCE_MAP.values()) {
try {
HazelcastInstanceProxy instanceProxy = f.get();
instances.add(instanceProxy);
} catch (RuntimeException ignore) {
}
}
INSTANCE_MAP.clear();
OutOfMemoryErrorDispatcher.clear();
ManagementService.shutdownAll();
Collections.sort(instances, new Comparator<HazelcastInstanceProxy>() {
public int compare(HazelcastInstanceProxy o1, HazelcastInstanceProxy o2) {
return o1.getName().compareTo(o2.getName());
}
});
for (HazelcastInstanceProxy proxy : instances) {
proxy.getLifecycleService().shutdown();
proxy.original = null;
}
}
static Map<MemberImpl, HazelcastInstanceImpl> getInstanceImplMap() {
final Map<MemberImpl, HazelcastInstanceImpl> map = new HashMap<MemberImpl, HazelcastInstanceImpl>();
for (InstanceFuture f : INSTANCE_MAP.values()) {
try {
HazelcastInstanceProxy instanceProxy = f.get();
final HazelcastInstanceImpl impl = instanceProxy.original;
if (impl != null) {
map.put(impl.node.getLocalMember(), impl);
}
} catch (RuntimeException ignore) {
}
}
return map;
}
static void remove(HazelcastInstanceImpl instance) {
OutOfMemoryErrorDispatcher.deregister(instance);
InstanceFuture future = INSTANCE_MAP.remove(instance.getName());
if (future != null) {
future.get().original = null;
}
if (INSTANCE_MAP.size() == 0) {
ManagementService.shutdownAll();
}
}
private static class InstanceFuture {
private volatile HazelcastInstanceProxy hz;
private volatile Throwable throwable;
HazelcastInstanceProxy get() {
if (hz != null) {
return hz;
}
boolean restoreInterrupt = false;
synchronized (this) {
while (hz == null && throwable == null) {
try {
wait();
} catch (InterruptedException ignore) {
restoreInterrupt = true;
}
}
}
if (restoreInterrupt) {
Thread.currentThread().interrupt();
}
if (hz != null) {
return hz;
}
throw new IllegalStateException(throwable);
}
void set(HazelcastInstanceProxy proxy) {
synchronized (this) {
this.hz = proxy;
notifyAll();
}
}
public void setFailure(Throwable throwable) {
synchronized (this) {
this.throwable = throwable;
notifyAll();
}
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_instance_HazelcastInstanceFactory.java |
1,383 | public class OBuffer implements Externalizable {
public byte[] buffer;
/**
* Constructor used by serialization.
*/
public OBuffer() {
}
public OBuffer(final byte[] buffer) {
this.buffer = buffer;
}
public void readExternal(final ObjectInput iInput) throws IOException, ClassNotFoundException {
final int bufferLenght = iInput.readInt();
if (bufferLenght > 0) {
buffer = new byte[bufferLenght];
for (int pos = 0, bytesReaded = 0; pos < bufferLenght; pos += bytesReaded) {
bytesReaded = iInput.read(buffer, pos, buffer.length - pos);
}
} else
buffer = null;
}
public void writeExternal(final ObjectOutput iOutput) throws IOException {
final int bufferLenght = buffer != null ? buffer.length : 0;
iOutput.writeInt(bufferLenght);
if (bufferLenght > 0)
iOutput.write(buffer);
}
@Override
public String toString() {
return "size:" + (buffer != null ? buffer.length : "empty");
}
public byte[] getBuffer() {
return buffer;
}
public void setBuffer(final byte[] buffer) {
this.buffer = buffer;
}
@Override
public boolean equals(final Object o) {
if (!(o instanceof OBuffer))
return false;
return OIOUtils.equals(buffer, ((OBuffer) o).buffer);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_type_OBuffer.java |
1,179 | public static class EchoServerHandler extends SimpleChannelUpstreamHandler {
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
e.getChannel().write(e.getMessage());
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
// Close the connection when an exception is raised.
e.getCause().printStackTrace();
e.getChannel().close();
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_transport_netty_NettyEchoBenchmark.java |
706 | "Total memory used by Disk Cache", METRIC_TYPE.SIZE, new OProfilerHookValue() {
@Override
public Object getValue() {
return (am.size() + a1in.size()) * pageSize;
}
}, profiler.getDatabaseMetric(null, "diskCache.totalMemory")); | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_hashindex_local_cache_OReadWriteDiskCache.java |
2,818 | public class ArabicStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ArabicStemTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new ArabicStemFilter(tokenStream);
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_ArabicStemTokenFilterFactory.java |
1,220 | public class PageCacheRecycler extends AbstractComponent {
public static final String TYPE = "page.type";
public static final String LIMIT_HEAP = "page.limit.heap";
public static final String LIMIT_PER_THREAD = "page.limit.per_thread";
public static final String WEIGHT = "page.weight";
private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage;
private final Recycler<long[]> longPage;
private final Recycler<double[]> doublePage;
private final Recycler<Object[]> objectPage;
public void close() {
bytePage.close();
intPage.close();
longPage.close();
doublePage.close();
objectPage.close();
}
private static int maximumSearchThreadPoolSize(ThreadPool threadPool, Settings settings) {
ThreadPool.Info searchThreadPool = threadPool.info(ThreadPool.Names.SEARCH);
assert searchThreadPool != null;
final int maxSize = searchThreadPool.getMax();
if (maxSize <= 0) {
// happens with cached thread pools, let's assume there are at most 3x ${number of processors} threads
return 3 * EsExecutors.boundedNumberOfProcessors(settings);
} else {
return maxSize;
}
}
// return the maximum number of pages that may be cached depending on
// - limit: the total amount of memory available
// - pageSize: the size of a single page
// - weight: the weight for this data type
// - totalWeight: the sum of all weights
private static int maxCount(long limit, long pageSize, double weight, double totalWeight) {
return (int) (weight / totalWeight * limit / pageSize);
}
@Inject
public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings);
final Type type = Type.parse(componentSettings.get(TYPE));
final long limit = componentSettings.getAsMemory(LIMIT_HEAP, "10%").bytes();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings);
// We have a global amount of memory that we need to divide across data types.
// Since some types are more useful than other ones we give them different weights.
// Trying to store all of them in a single stack would be problematic because eg.
// a work load could fill the recycler with only byte[] pages and then another
// workload that would work with double[] pages couldn't recycle them because there
// is no space left in the stack/queue. LRU/LFU policies are not an option either
// because they would make obtain/release too costly: we really need constant-time
// operations.
// Ultimately a better solution would be to only store one kind of data and have the
// ability to intepret it either as a source of bytes, doubles, longs, etc. eg. thanks
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
// that would need to be addressed such as garbage collection of native memory or safety
// of Unsafe writes.
final double bytesWeight = componentSettings.getAsDouble(WEIGHT + ".bytes", 1d);
final double intsWeight = componentSettings.getAsDouble(WEIGHT + ".ints", 1d);
final double longsWeight = componentSettings.getAsDouble(WEIGHT + ".longs", 1d);
final double doublesWeight = componentSettings.getAsDouble(WEIGHT + ".doubles", 1d);
// object pages are less useful to us so we give them a lower weight by default
final double objectsWeight = componentSettings.getAsDouble(WEIGHT + ".objects", 0.1d);
final double totalWeight = bytesWeight + intsWeight + longsWeight + doublesWeight + objectsWeight;
bytePage = build(type, maxCount(limit, BigArrays.BYTE_PAGE_SIZE, bytesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<byte[]>() {
@Override
public byte[] newInstance(int sizing) {
return new byte[BigArrays.BYTE_PAGE_SIZE];
}
@Override
public void clear(byte[] value) {}
});
intPage = build(type, maxCount(limit, BigArrays.INT_PAGE_SIZE, intsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<int[]>() {
@Override
public int[] newInstance(int sizing) {
return new int[BigArrays.INT_PAGE_SIZE];
}
@Override
public void clear(int[] value) {}
});
longPage = build(type, maxCount(limit, BigArrays.LONG_PAGE_SIZE, longsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<long[]>() {
@Override
public long[] newInstance(int sizing) {
return new long[BigArrays.LONG_PAGE_SIZE];
}
@Override
public void clear(long[] value) {}
});
doublePage = build(type, maxCount(limit, BigArrays.DOUBLE_PAGE_SIZE, doublesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<double[]>() {
@Override
public double[] newInstance(int sizing) {
return new double[BigArrays.DOUBLE_PAGE_SIZE];
}
@Override
public void clear(double[] value) {}
});
objectPage = build(type, maxCount(limit, BigArrays.OBJECT_PAGE_SIZE, objectsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<Object[]>() {
@Override
public Object[] newInstance(int sizing) {
return new Object[BigArrays.OBJECT_PAGE_SIZE];
}
@Override
public void clear(Object[] value) {
Arrays.fill(value, null); // we need to remove the strong refs on the objects stored in the array
}
});
}
public Recycler.V<byte[]> bytePage(boolean clear) {
final Recycler.V<byte[]> v = bytePage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), (byte) 0);
}
return v;
}
public Recycler.V<int[]> intPage(boolean clear) {
final Recycler.V<int[]> v = intPage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), 0);
}
return v;
}
public Recycler.V<long[]> longPage(boolean clear) {
final Recycler.V<long[]> v = longPage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), 0L);
}
return v;
}
public Recycler.V<double[]> doublePage(boolean clear) {
final Recycler.V<double[]> v = doublePage.obtain();
if (v.isRecycled() && clear) {
Arrays.fill(v.v(), 0d);
}
return v;
}
public Recycler.V<Object[]> objectPage() {
// object pages are cleared on release anyway
return objectPage.obtain();
}
private static <T> Recycler<T> build(Type type, int limit, int estimatedThreadPoolSize, int availableProcessors, Recycler.C<T> c) {
final Recycler<T> recycler;
if (limit == 0) {
recycler = none(c);
} else {
recycler = type.build(c, limit, estimatedThreadPoolSize, availableProcessors);
}
return recycler;
}
public static enum Type {
SOFT_THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return threadLocal(softFactory(dequeFactory(c, limit / estimatedThreadPoolSize)));
}
},
THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return threadLocal(dequeFactory(c, limit / estimatedThreadPoolSize));
}
},
QUEUE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return concurrentDeque(c, limit);
}
},
SOFT_CONCURRENT {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return concurrent(softFactory(dequeFactory(c, limit / availableProcessors)), availableProcessors);
}
},
CONCURRENT {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return concurrent(dequeFactory(c, limit / availableProcessors), availableProcessors);
}
},
NONE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return none(c);
}
};
public static Type parse(String type) {
if (Strings.isNullOrEmpty(type)) {
return SOFT_CONCURRENT;
}
try {
return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]");
}
}
abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors);
}
} | 0true
| src_main_java_org_elasticsearch_cache_recycler_PageCacheRecycler.java |
292 | public class PrimaryMissingActionException extends ElasticsearchException {
public PrimaryMissingActionException(String message) {
super(message);
}
} | 0true
| src_main_java_org_elasticsearch_action_PrimaryMissingActionException.java |
177 | public static final List<EntryMetaData> IDENTIFYING_METADATA = new ArrayList<EntryMetaData>(3) {{
for (EntryMetaData meta : values()) if (meta.isIdentifying()) add(meta);
}}; | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_EntryMetaData.java |
1,704 | final Iterator<T> intersection = new Iterator<T>() {
T current;
@Override
public boolean hasNext() {
if (iterator.hasNext()) {
do {
T next = iterator.next().value;
if (container2.contains(next)) {
current = next;
return true;
}
} while (iterator.hasNext());
}
return false;
}
@Override
public T next() {
return current;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}; | 0true
| src_main_java_org_elasticsearch_common_collect_HppcMaps.java |
132 | @SuppressWarnings("restriction")
public class OUnsafeBinaryConverter implements OBinaryConverter {
public static final OUnsafeBinaryConverter INSTANCE = new OUnsafeBinaryConverter();
private static final Unsafe theUnsafe;
private static final long BYTE_ARRAY_OFFSET;
static {
theUnsafe = (Unsafe) AccessController.doPrivileged(new PrivilegedAction<Object>() {
public Object run() {
try {
Field f = Unsafe.class.getDeclaredField("theUnsafe");
boolean wasAccessible = f.isAccessible();
f.setAccessible(true);
try {
return f.get(null);
} finally {
f.setAccessible(wasAccessible);
}
} catch (NoSuchFieldException e) {
throw new Error();
} catch (IllegalAccessException e) {
throw new Error();
}
}
});
BYTE_ARRAY_OFFSET = theUnsafe.arrayBaseOffset(byte[].class);
}
public void putShort(byte[] buffer, int index, short value, ByteOrder byteOrder) {
if (!byteOrder.equals(ByteOrder.nativeOrder()))
value = Short.reverseBytes(value);
theUnsafe.putShort(buffer, index + BYTE_ARRAY_OFFSET, value);
}
public short getShort(byte[] buffer, int index, ByteOrder byteOrder) {
short result = theUnsafe.getShort(buffer, index + BYTE_ARRAY_OFFSET);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Short.reverseBytes(result);
return result;
}
public void putInt(byte[] buffer, int pointer, int value, ByteOrder byteOrder) {
final long position = pointer + BYTE_ARRAY_OFFSET;
if (!byteOrder.equals(ByteOrder.nativeOrder()))
value = Integer.reverseBytes(value);
theUnsafe.putInt(buffer, position, value);
}
public int getInt(byte[] buffer, int pointer, ByteOrder byteOrder) {
final long position = pointer + BYTE_ARRAY_OFFSET;
int result = theUnsafe.getInt(buffer, position);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Integer.reverseBytes(result);
return result;
}
public void putLong(byte[] buffer, int index, long value, ByteOrder byteOrder) {
if (!byteOrder.equals(ByteOrder.nativeOrder()))
value = Long.reverseBytes(value);
theUnsafe.putLong(buffer, index + BYTE_ARRAY_OFFSET, value);
}
public long getLong(byte[] buffer, int index, ByteOrder byteOrder) {
long result = theUnsafe.getLong(buffer, index + BYTE_ARRAY_OFFSET);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Long.reverseBytes(result);
return result;
}
public void putChar(byte[] buffer, int index, char character, ByteOrder byteOrder) {
if (!byteOrder.equals(ByteOrder.nativeOrder()))
character = Character.reverseBytes(character);
theUnsafe.putChar(buffer, index + BYTE_ARRAY_OFFSET, character);
}
public char getChar(byte[] buffer, int index, ByteOrder byteOrder) {
char result = theUnsafe.getChar(buffer, index + BYTE_ARRAY_OFFSET);
if (!byteOrder.equals(ByteOrder.nativeOrder()))
result = Character.reverseBytes(result);
return result;
}
public boolean nativeAccelerationUsed() {
return true;
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_serialization_OUnsafeBinaryConverter.java |
2,686 | class GatewayRecoveryListener implements Gateway.GatewayStateRecoveredListener {
private final CountDownLatch latch;
GatewayRecoveryListener(CountDownLatch latch) {
this.latch = latch;
}
@Override
public void onSuccess(final ClusterState recoveredState) {
logger.trace("successful state recovery, importing cluster state...");
clusterService.submitStateUpdateTask("local-gateway-elected-state", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
assert currentState.metaData().indices().isEmpty();
// remove the block, since we recovered from gateway
ClusterBlocks.Builder blocks = ClusterBlocks.builder()
.blocks(currentState.blocks())
.blocks(recoveredState.blocks())
.removeGlobalBlock(STATE_NOT_RECOVERED_BLOCK);
MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData());
// automatically generate a UID for the metadata if we need to
metaDataBuilder.generateUuidIfNeeded();
if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
for (IndexMetaData indexMetaData : recoveredState.metaData()) {
metaDataBuilder.put(indexMetaData, false);
blocks.addBlocks(indexMetaData);
}
// update the state to reflect the new metadata and routing
ClusterState updatedState = ClusterState.builder(currentState)
.blocks(blocks)
.metaData(metaDataBuilder)
.build();
// initialize all index routing tables as empty
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable());
for (ObjectCursor<IndexMetaData> cursor : updatedState.metaData().indices().values()) {
routingTableBuilder.addAsRecovery(cursor.value);
}
// start with 0 based versions for routing table
routingTableBuilder.version(0);
// now, reroute
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build());
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size());
latch.countDown();
}
});
}
@Override
public void onFailure(String message) {
recovered.set(false);
scheduledRecovery.set(false);
// don't remove the block here, we don't want to allow anything in such a case
logger.info("metadata state not restored, reason: {}", message);
}
} | 0true
| src_main_java_org_elasticsearch_gateway_GatewayService.java |
1,498 | public class SkuLookupTag extends BodyTagSupport {
private static final long serialVersionUID = 1L;
private String var;
private long skuId;
@Override
public int doStartTag() throws JspException {
WebApplicationContext applicationContext = WebApplicationContextUtils.getWebApplicationContext(pageContext.getServletContext());
CatalogService catalogService = (CatalogService) applicationContext.getBean("blCatalogService");
pageContext.setAttribute(var, catalogService.findSkuById(skuId));
return EVAL_PAGE;
}
public String getVar() {
return var;
}
public void setVar(String var) {
this.var = var;
}
public long getSkuId() {
return skuId;
}
public void setSkuId(long skuId) {
this.skuId = skuId;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_layout_tags_SkuLookupTag.java |
1,421 | @XmlRootElement(name = "sku")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class SkuWrapper extends BaseWrapper implements APIWrapper<Sku> {
@XmlElement
protected Long id;
@XmlElement
@XmlJavaTypeAdapter(ISO8601DateAdapter.class)
protected Date activeStartDate;
@XmlElement
@XmlJavaTypeAdapter(ISO8601DateAdapter.class)
protected Date activeEndDate;
@XmlElement
protected String name;
@XmlElement
protected Boolean active;
@XmlElement
protected String description;
@XmlElement
protected Money retailPrice;
@XmlElement
protected Money salePrice;
@XmlElement
protected WeightWrapper weight;
@XmlElement
protected DimensionWrapper dimension;
@Override
public void wrapDetails(Sku model, HttpServletRequest request) {
this.id = model.getId();
this.activeStartDate = model.getActiveStartDate();
this.activeEndDate = model.getActiveEndDate();
this.name = model.getName();
this.description = model.getDescription();
this.retailPrice = model.getRetailPrice();
this.salePrice = model.getSalePrice();
this.active = model.isActive();
if (model.getWeight() != null){
weight = (WeightWrapper)context.getBean(WeightWrapper.class.getName());
weight.wrapDetails(model.getWeight(), request);
}
if (model.getDimension() != null){
dimension = (DimensionWrapper)context.getBean(DimensionWrapper.class.getName());
dimension.wrapDetails(model.getDimension(), request);
}
}
@Override
public void wrapSummary(Sku model, HttpServletRequest request) {
wrapDetails(model, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_SkuWrapper.java |
1,148 | public class ValidateUpdateRequestActivity extends BaseActivity<CartOperationContext> {
@Resource(name = "blOrderItemService")
protected OrderItemService orderItemService;
@Override
public CartOperationContext execute(CartOperationContext context) throws Exception {
CartOperationRequest request = context.getSeedData();
OrderItemRequestDTO orderItemRequestDTO = request.getItemRequest();
// Throw an exception if the user did not specify an orderItemId
if (orderItemRequestDTO.getOrderItemId() == null) {
throw new IllegalArgumentException("OrderItemId must be specified when removing from order");
}
// Throw an exception if the user tried to update an item to a negative quantity
if (orderItemRequestDTO.getQuantity() < 0) {
throw new IllegalArgumentException("Quantity cannot be negative");
}
// Throw an exception if the user did not specify an order to add the item to
if (request.getOrder() == null) {
throw new IllegalArgumentException("Order is required when updating item quantities");
}
// Throw an exception if the user is trying to update an order item that is part of a bundle
OrderItem orderItem = orderItemService.readOrderItemById(orderItemRequestDTO.getOrderItemId());
if (orderItem != null && orderItem instanceof DiscreteOrderItem) {
DiscreteOrderItem doi = (DiscreteOrderItem) orderItem;
if (doi.getBundleOrderItem() != null) {
throw new IllegalArgumentException("Cannot update an item that is part of a bundle");
}
}
return context;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_workflow_update_ValidateUpdateRequestActivity.java |
3,122 | public class InternalEngine extends AbstractIndexShardComponent implements Engine {
private volatile ByteSizeValue indexingBufferSize;
private volatile int indexConcurrency;
private volatile boolean compoundOnFlush = true;
private long gcDeletesInMillis;
private volatile boolean enableGcDeletes = true;
private volatile String codecName;
private final ThreadPool threadPool;
private final ShardIndexingService indexingService;
private final IndexSettingsService indexSettingsService;
@Nullable
private final InternalIndicesWarmer warmer;
private final Store store;
private final SnapshotDeletionPolicy deletionPolicy;
private final Translog translog;
private final MergePolicyProvider mergePolicyProvider;
private final MergeSchedulerProvider mergeScheduler;
private final AnalysisService analysisService;
private final SimilarityService similarityService;
private final CodecService codecService;
private final ReadWriteLock rwl = new ReentrantReadWriteLock();
private volatile IndexWriter indexWriter;
private final SearcherFactory searcherFactory = new SearchFactory();
private volatile SearcherManager searcherManager;
private volatile boolean closed = false;
// flag indicating if a dirty operation has occurred since the last refresh
private volatile boolean dirty = false;
private volatile boolean possibleMergeNeeded = false;
private final AtomicBoolean optimizeMutex = new AtomicBoolean();
// we use flushNeeded here, since if there are no changes, then the commit won't write
// will not really happen, and then the commitUserData and the new translog will not be reflected
private volatile boolean flushNeeded = false;
private final AtomicInteger flushing = new AtomicInteger();
private final Lock flushLock = new ReentrantLock();
private final RecoveryCounter onGoingRecoveries = new RecoveryCounter();
// A uid (in the form of BytesRef) to the version map
// we use the hashed variant since we iterate over it and check removal and additions on existing keys
private final ConcurrentMap<HashedBytesRef, VersionValue> versionMap;
private final Object[] dirtyLocks;
private final Object refreshMutex = new Object();
private final ApplySettings applySettings = new ApplySettings();
private volatile boolean failOnMergeFailure;
private Throwable failedEngine = null;
private final Object failedEngineMutex = new Object();
private final CopyOnWriteArrayList<FailedEngineListener> failedEngineListeners = new CopyOnWriteArrayList<FailedEngineListener>();
private final AtomicLong translogIdGenerator = new AtomicLong();
private SegmentInfos lastCommittedSegmentInfos;
@Inject
public InternalEngine(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool,
IndexSettingsService indexSettingsService, ShardIndexingService indexingService, @Nullable IndicesWarmer warmer,
Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog,
MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler,
AnalysisService analysisService, SimilarityService similarityService, CodecService codecService) throws EngineException {
super(shardId, indexSettings);
Preconditions.checkNotNull(store, "Store must be provided to the engine");
Preconditions.checkNotNull(deletionPolicy, "Snapshot deletion policy must be provided to the engine");
Preconditions.checkNotNull(translog, "Translog must be provided to the engine");
this.gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES, TimeValue.timeValueSeconds(60)).millis();
this.indexingBufferSize = componentSettings.getAsBytesSize("index_buffer_size", new ByteSizeValue(64, ByteSizeUnit.MB)); // not really important, as it is set by the IndexingMemory manager
this.codecName = indexSettings.get(INDEX_CODEC, "default");
this.threadPool = threadPool;
this.indexSettingsService = indexSettingsService;
this.indexingService = indexingService;
this.warmer = (InternalIndicesWarmer) warmer;
this.store = store;
this.deletionPolicy = deletionPolicy;
this.translog = translog;
this.mergePolicyProvider = mergePolicyProvider;
this.mergeScheduler = mergeScheduler;
this.analysisService = analysisService;
this.similarityService = similarityService;
this.codecService = codecService;
this.compoundOnFlush = indexSettings.getAsBoolean(INDEX_COMPOUND_ON_FLUSH, this.compoundOnFlush);
this.indexConcurrency = indexSettings.getAsInt(INDEX_INDEX_CONCURRENCY, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
this.versionMap = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
this.dirtyLocks = new Object[indexConcurrency * 50]; // we multiply it to have enough...
for (int i = 0; i < dirtyLocks.length; i++) {
dirtyLocks[i] = new Object();
}
this.indexSettingsService.addListener(applySettings);
this.failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE, true);
if (failOnMergeFailure) {
this.mergeScheduler.addFailureListener(new FailEngineOnMergeFailure());
}
}
@Override
public void updateIndexingBufferSize(ByteSizeValue indexingBufferSize) {
ByteSizeValue preValue = this.indexingBufferSize;
rwl.readLock().lock();
try {
this.indexingBufferSize = indexingBufferSize;
IndexWriter indexWriter = this.indexWriter;
if (indexWriter != null) {
indexWriter.getConfig().setRAMBufferSizeMB(this.indexingBufferSize.mbFrac());
}
} finally {
rwl.readLock().unlock();
}
if (preValue.bytes() != indexingBufferSize.bytes()) {
// its inactive, make sure we do a full flush in this case, since the memory
// changes only after a "data" change has happened to the writer
if (indexingBufferSize == Engine.INACTIVE_SHARD_INDEXING_BUFFER && preValue != Engine.INACTIVE_SHARD_INDEXING_BUFFER) {
logger.debug("updating index_buffer_size from [{}] to (inactive) [{}]", preValue, indexingBufferSize);
try {
flush(new Flush().type(Flush.Type.NEW_WRITER));
} catch (EngineClosedException e) {
// ignore
} catch (FlushNotAllowedEngineException e) {
// ignore
} catch (Throwable e) {
logger.warn("failed to flush after setting shard to inactive", e);
}
} else {
logger.debug("updating index_buffer_size from [{}] to [{}]", preValue, indexingBufferSize);
}
}
}
@Override
public void addFailedEngineListener(FailedEngineListener listener) {
failedEngineListeners.add(listener);
}
@Override
public void start() throws EngineException {
rwl.writeLock().lock();
try {
if (indexWriter != null) {
throw new EngineAlreadyStartedException(shardId);
}
if (closed) {
throw new EngineClosedException(shardId);
}
if (logger.isDebugEnabled()) {
logger.debug("starting engine");
}
try {
this.indexWriter = createWriter();
} catch (IOException e) {
throw new EngineCreationFailureException(shardId, "failed to create engine", e);
}
try {
// commit on a just opened writer will commit even if there are no changes done to it
// we rely on that for the commit data translog id key
if (Lucene.indexExists(store.directory())) {
Map<String, String> commitUserData = Lucene.readSegmentInfos(store.directory()).getUserData();
if (commitUserData.containsKey(Translog.TRANSLOG_ID_KEY)) {
translogIdGenerator.set(Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY)));
} else {
translogIdGenerator.set(System.currentTimeMillis());
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get())).map());
indexWriter.commit();
}
} else {
translogIdGenerator.set(System.currentTimeMillis());
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get())).map());
indexWriter.commit();
}
translog.newTranslog(translogIdGenerator.get());
this.searcherManager = buildSearchManager(indexWriter);
readLastCommittedSegmentsInfo();
} catch (IOException e) {
try {
indexWriter.rollback();
} catch (IOException e1) {
// ignore
} finally {
IOUtils.closeWhileHandlingException(indexWriter);
}
throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e);
}
} finally {
rwl.writeLock().unlock();
}
}
private void readLastCommittedSegmentsInfo() throws IOException {
SegmentInfos infos = new SegmentInfos();
infos.read(store.directory());
lastCommittedSegmentInfos = infos;
}
@Override
public TimeValue defaultRefreshInterval() {
return new TimeValue(1, TimeUnit.SECONDS);
}
@Override
public void enableGcDeletes(boolean enableGcDeletes) {
this.enableGcDeletes = enableGcDeletes;
}
public GetResult get(Get get) throws EngineException {
rwl.readLock().lock();
try {
if (get.realtime()) {
VersionValue versionValue = versionMap.get(versionKey(get.uid()));
if (versionValue != null) {
if (versionValue.delete()) {
return GetResult.NOT_EXISTS;
}
if (get.version() != Versions.MATCH_ANY) {
if (get.versionType().isVersionConflict(versionValue.version(), get.version())) {
Uid uid = Uid.createUid(get.uid().text());
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), versionValue.version(), get.version());
}
}
if (!get.loadSource()) {
return new GetResult(true, versionValue.version(), null);
}
byte[] data = translog.read(versionValue.translogLocation());
if (data != null) {
try {
Translog.Source source = TranslogStreams.readSource(data);
return new GetResult(true, versionValue.version(), source);
} catch (IOException e) {
// switched on us, read it from the reader
}
}
}
}
// no version, get the version from the index, we know that we refresh on flush
Searcher searcher = acquireSearcher("get");
final Versions.DocIdAndVersion docIdAndVersion;
try {
docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
} catch (Throwable e) {
searcher.release();
//TODO: A better exception goes here
throw new EngineException(shardId(), "Couldn't resolve version", e);
}
if (get.version() != Versions.MATCH_ANY && docIdAndVersion != null) {
if (get.versionType().isVersionConflict(docIdAndVersion.version, get.version())) {
searcher.release();
Uid uid = Uid.createUid(get.uid().text());
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
}
}
if (docIdAndVersion != null) {
// don't release the searcher on this path, it is the responsability of the caller to call GetResult.release
return new GetResult(searcher, docIdAndVersion);
} else {
searcher.release();
return GetResult.NOT_EXISTS;
}
} finally {
rwl.readLock().unlock();
}
}
@Override
public void create(Create create) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId, failedEngine);
}
innerCreate(create, writer);
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new CreateFailedEngineException(shardId, create, e);
} catch (OutOfMemoryError e) {
failEngine(e);
throw new CreateFailedEngineException(shardId, create, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new CreateFailedEngineException(shardId, create, e);
} finally {
rwl.readLock().unlock();
}
}
private void innerCreate(Create create, IndexWriter writer) throws IOException {
synchronized (dirtyLock(create.uid())) {
HashedBytesRef versionKey = versionKey(create.uid());
final long currentVersion;
VersionValue versionValue = versionMap.get(versionKey);
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(create.uid());
} else {
if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
// same logic as index
long updatedVersion;
long expectedVersion = create.version();
if (create.origin() == Operation.Origin.PRIMARY) {
if (create.versionType().isVersionConflict(currentVersion, expectedVersion)) {
throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
}
updatedVersion = create.versionType().updateVersion(currentVersion, expectedVersion);
} else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
// replicas treat the version as "external" as it comes from the primary ->
// only exploding if the version they got is lower or equal to what they know.
if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
if (create.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
}
}
updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
}
// if the doc does not exists or it exists but not delete
if (versionValue != null) {
if (!versionValue.delete()) {
if (create.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
}
}
} else if (currentVersion != Versions.NOT_FOUND) {
// its not deleted, its already there
if (create.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
}
}
create.version(updatedVersion);
if (create.docs().size() > 1) {
writer.addDocuments(create.docs(), create.analyzer());
} else {
writer.addDocument(create.docs().get(0), create.analyzer());
}
Translog.Location translogLocation = translog.add(new Translog.Create(create));
versionMap.put(versionKey, new VersionValue(updatedVersion, false, threadPool.estimatedTimeInMillis(), translogLocation));
indexingService.postCreateUnderLock(create);
}
}
@Override
public void index(Index index) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId, failedEngine);
}
innerIndex(index, writer);
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new IndexFailedEngineException(shardId, index, e);
} catch (OutOfMemoryError e) {
failEngine(e);
throw new IndexFailedEngineException(shardId, index, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new IndexFailedEngineException(shardId, index, e);
} finally {
rwl.readLock().unlock();
}
}
private void innerIndex(Index index, IndexWriter writer) throws IOException {
synchronized (dirtyLock(index.uid())) {
HashedBytesRef versionKey = versionKey(index.uid());
final long currentVersion;
VersionValue versionValue = versionMap.get(versionKey);
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(index.uid());
} else {
if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
long updatedVersion;
long expectedVersion = index.version();
if (index.origin() == Operation.Origin.PRIMARY) {
if (index.versionType().isVersionConflict(currentVersion, expectedVersion)) {
throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
}
updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
} else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
// replicas treat the version as "external" as it comes from the primary ->
// only exploding if the version they got is lower or equal to what they know.
if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
if (index.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
}
}
updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
}
index.version(updatedVersion);
if (currentVersion == Versions.NOT_FOUND) {
// document does not exists, we can optimize for create
index.created(true);
if (index.docs().size() > 1) {
writer.addDocuments(index.docs(), index.analyzer());
} else {
writer.addDocument(index.docs().get(0), index.analyzer());
}
} else {
if (versionValue != null) {
index.created(versionValue.delete()); // we have a delete which is not GC'ed...
}
if (index.docs().size() > 1) {
writer.updateDocuments(index.uid(), index.docs(), index.analyzer());
} else {
writer.updateDocument(index.uid(), index.docs().get(0), index.analyzer());
}
}
Translog.Location translogLocation = translog.add(new Translog.Index(index));
versionMap.put(versionKey, new VersionValue(updatedVersion, false, threadPool.estimatedTimeInMillis(), translogLocation));
indexingService.postIndexUnderLock(index);
}
}
@Override
public void delete(Delete delete) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId, failedEngine);
}
innerDelete(delete, writer);
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new DeleteFailedEngineException(shardId, delete, e);
} catch (OutOfMemoryError e) {
failEngine(e);
throw new DeleteFailedEngineException(shardId, delete, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new DeleteFailedEngineException(shardId, delete, e);
} finally {
rwl.readLock().unlock();
}
}
private void innerDelete(Delete delete, IndexWriter writer) throws IOException {
synchronized (dirtyLock(delete.uid())) {
final long currentVersion;
HashedBytesRef versionKey = versionKey(delete.uid());
VersionValue versionValue = versionMap.get(versionKey);
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(delete.uid());
} else {
if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
long updatedVersion;
long expectedVersion = delete.version();
if (delete.origin() == Operation.Origin.PRIMARY) {
if (delete.versionType().isVersionConflict(currentVersion, expectedVersion)) {
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion, expectedVersion);
}
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
} else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
// replicas treat the version as "external" as it comes from the primary ->
// only exploding if the version they got is lower or equal to what they know.
if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
if (delete.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion - 1, expectedVersion);
}
}
updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
}
if (currentVersion == Versions.NOT_FOUND) {
// doc does not exists and no prior deletes
delete.version(updatedVersion).found(false);
Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
} else if (versionValue != null && versionValue.delete()) {
// a "delete on delete", in this case, we still increment the version, log it, and return that version
delete.version(updatedVersion).found(false);
Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
} else {
delete.version(updatedVersion).found(true);
writer.deleteDocuments(delete.uid());
Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
}
indexingService.postDeleteUnderLock(delete);
}
}
@Override
public void delete(DeleteByQuery delete) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId);
}
Query query;
if (delete.nested() && delete.aliasFilter() != null) {
query = new IncludeNestedDocsQuery(new XFilteredQuery(delete.query(), delete.aliasFilter()), delete.parentFilter());
} else if (delete.nested()) {
query = new IncludeNestedDocsQuery(delete.query(), delete.parentFilter());
} else if (delete.aliasFilter() != null) {
query = new XFilteredQuery(delete.query(), delete.aliasFilter());
} else {
query = delete.query();
}
writer.deleteDocuments(query);
translog.add(new Translog.DeleteByQuery(delete));
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new DeleteByQueryFailedEngineException(shardId, delete, e);
} finally {
rwl.readLock().unlock();
}
//TODO: This is heavy, since we refresh, but we really have to...
refreshVersioningTable(System.currentTimeMillis());
}
@Override
public final Searcher acquireSearcher(String source) throws EngineException {
SearcherManager manager = this.searcherManager;
if (manager == null) {
throw new EngineClosedException(shardId);
}
try {
IndexSearcher searcher = manager.acquire();
return newSearcher(source, searcher, manager);
} catch (Throwable ex) {
logger.error("failed to acquire searcher, source {}", ex, source);
throw new EngineException(shardId, ex.getMessage());
}
}
protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) {
return new EngineSearcher(source, searcher, manager);
}
@Override
public boolean refreshNeeded() {
return dirty;
}
@Override
public boolean possibleMergeNeeded() {
return this.possibleMergeNeeded;
}
@Override
public void refresh(Refresh refresh) throws EngineException {
if (indexWriter == null) {
throw new EngineClosedException(shardId);
}
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
rwl.readLock().lock();
try {
// this engine always acts as if waitForOperations=true
IndexWriter currentWriter = indexWriter;
if (currentWriter == null) {
throw new EngineClosedException(shardId, failedEngine);
}
try {
// maybeRefresh will only allow one refresh to execute, and the rest will "pass through",
// but, we want to make sure not to loose ant refresh calls, if one is taking time
synchronized (refreshMutex) {
if (dirty || refresh.force()) {
dirty = false;
searcherManager.maybeRefresh();
}
}
} catch (AlreadyClosedException e) {
// an index writer got replaced on us, ignore
} catch (OutOfMemoryError e) {
failEngine(e);
throw new RefreshFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new RefreshFailedEngineException(shardId, e);
} catch (Throwable e) {
if (indexWriter == null) {
throw new EngineClosedException(shardId, failedEngine);
} else if (currentWriter != indexWriter) {
// an index writer got replaced on us, ignore
} else {
throw new RefreshFailedEngineException(shardId, e);
}
}
} finally {
rwl.readLock().unlock();
}
}
@Override
public void flush(Flush flush) throws EngineException {
ensureOpen();
if (flush.type() == Flush.Type.NEW_WRITER || flush.type() == Flush.Type.COMMIT_TRANSLOG) {
// check outside the lock as well so we can check without blocking on the write lock
if (onGoingRecoveries.get() > 0) {
throw new FlushNotAllowedEngineException(shardId, "recovery is in progress, flush [" + flush.type() + "] is not allowed");
}
}
int currentFlushing = flushing.incrementAndGet();
if (currentFlushing > 1 && !flush.waitIfOngoing()) {
flushing.decrementAndGet();
throw new FlushNotAllowedEngineException(shardId, "already flushing...");
}
flushLock.lock();
try {
if (flush.type() == Flush.Type.NEW_WRITER) {
rwl.writeLock().lock();
try {
ensureOpen();
if (onGoingRecoveries.get() > 0) {
throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed");
}
// disable refreshing, not dirty
dirty = false;
try {
// that's ok if the index writer failed and is in inconsistent state
// we will get an exception on a dirty operation, and will cause the shard
// to be allocated to a different node
indexWriter.close(false);
indexWriter = createWriter();
// commit on a just opened writer will commit even if there are no changes done to it
// we rely on that for the commit data translog id key
if (flushNeeded || flush.force()) {
flushNeeded = false;
long translogId = translogIdGenerator.incrementAndGet();
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
indexWriter.commit();
translog.newTranslog(translogId);
}
SearcherManager current = this.searcherManager;
this.searcherManager = buildSearchManager(indexWriter);
try {
IOUtils.close(current);
} catch (Throwable t) {
logger.warn("Failed to close current SearcherManager", t);
}
refreshVersioningTable(threadPool.estimatedTimeInMillis());
} catch (OutOfMemoryError e) {
failEngine(e);
throw new FlushFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new FlushFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new FlushFailedEngineException(shardId, e);
}
} finally {
rwl.writeLock().unlock();
}
} else if (flush.type() == Flush.Type.COMMIT_TRANSLOG) {
rwl.readLock().lock();
try {
ensureOpen();
if (onGoingRecoveries.get() > 0) {
throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed");
}
if (flushNeeded || flush.force()) {
flushNeeded = false;
try {
long translogId = translogIdGenerator.incrementAndGet();
translog.newTransientTranslog(translogId);
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
indexWriter.commit();
refreshVersioningTable(threadPool.estimatedTimeInMillis());
// we need to move transient to current only after we refresh
// so items added to current will still be around for realtime get
// when tans overrides it
translog.makeTransientCurrent();
} catch (OutOfMemoryError e) {
translog.revertTransient();
failEngine(e);
throw new FlushFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new FlushFailedEngineException(shardId, e);
} catch (Throwable e) {
translog.revertTransient();
throw new FlushFailedEngineException(shardId, e);
}
}
} finally {
rwl.readLock().unlock();
}
} else if (flush.type() == Flush.Type.COMMIT) {
// note, its ok to just commit without cleaning the translog, its perfectly fine to replay a
// translog on an index that was opened on a committed point in time that is "in the future"
// of that translog
rwl.readLock().lock();
try {
ensureOpen();
// we allow to *just* commit if there is an ongoing recovery happening...
// its ok to use this, only a flush will cause a new translogId, and we are locked here from
// other flushes use flushLock
try {
long translogId = translog.currentId();
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
indexWriter.commit();
} catch (OutOfMemoryError e) {
translog.revertTransient();
failEngine(e);
throw new FlushFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new FlushFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new FlushFailedEngineException(shardId, e);
}
} finally {
rwl.readLock().unlock();
}
} else {
throw new ElasticsearchIllegalStateException("flush type [" + flush.type() + "] not supported");
}
// reread the last committed segment infos
rwl.readLock().lock();
try {
ensureOpen();
readLastCommittedSegmentsInfo();
} catch (Throwable e) {
if (!closed) {
logger.warn("failed to read latest segment infos on flush", e);
}
} finally {
rwl.readLock().unlock();
}
} finally {
flushLock.unlock();
flushing.decrementAndGet();
}
}
private void ensureOpen() {
if (indexWriter == null) {
throw new EngineClosedException(shardId, failedEngine);
}
}
private void refreshVersioningTable(long time) {
// we need to refresh in order to clear older version values
refresh(new Refresh("version_table").force(true));
for (Map.Entry<HashedBytesRef, VersionValue> entry : versionMap.entrySet()) {
HashedBytesRef uid = entry.getKey();
synchronized (dirtyLock(uid.bytes)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set?
VersionValue versionValue = versionMap.get(uid);
if (versionValue == null) {
continue;
}
if (time - versionValue.time() <= 0) {
continue; // its a newer value, from after/during we refreshed, don't clear it
}
if (versionValue.delete()) {
if (enableGcDeletes && (time - versionValue.time()) > gcDeletesInMillis) {
versionMap.remove(uid);
}
} else {
versionMap.remove(uid);
}
}
}
}
@Override
public void maybeMerge() throws EngineException {
if (!possibleMergeNeeded) {
return;
}
possibleMergeNeeded = false;
rwl.readLock().lock();
try {
ensureOpen();
indexWriter.maybeMerge();
} catch (OutOfMemoryError e) {
failEngine(e);
throw new OptimizeFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new OptimizeFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new OptimizeFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
}
}
@Override
public void optimize(Optimize optimize) throws EngineException {
if (optimize.flush()) {
flush(new Flush().force(true).waitIfOngoing(true));
}
if (optimizeMutex.compareAndSet(false, true)) {
rwl.readLock().lock();
try {
ensureOpen();
if (optimize.onlyExpungeDeletes()) {
indexWriter.forceMergeDeletes(false);
} else if (optimize.maxNumSegments() <= 0) {
indexWriter.maybeMerge();
possibleMergeNeeded = false;
} else {
indexWriter.forceMerge(optimize.maxNumSegments(), false);
}
} catch (OutOfMemoryError e) {
failEngine(e);
throw new OptimizeFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new OptimizeFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new OptimizeFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
optimizeMutex.set(false);
}
}
// wait for the merges outside of the read lock
if (optimize.waitForMerge()) {
indexWriter.waitForMerges();
}
if (optimize.flush()) {
flush(new Flush().force(true).waitIfOngoing(true));
}
}
@Override
public <T> T snapshot(SnapshotHandler<T> snapshotHandler) throws EngineException {
SnapshotIndexCommit snapshotIndexCommit = null;
Translog.Snapshot traslogSnapshot = null;
rwl.readLock().lock();
try {
snapshotIndexCommit = deletionPolicy.snapshot();
traslogSnapshot = translog.snapshot();
} catch (Throwable e) {
if (snapshotIndexCommit != null) {
snapshotIndexCommit.release();
}
throw new SnapshotFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
}
try {
return snapshotHandler.snapshot(snapshotIndexCommit, traslogSnapshot);
} finally {
snapshotIndexCommit.release();
traslogSnapshot.release();
}
}
@Override
public SnapshotIndexCommit snapshotIndex() throws EngineException {
rwl.readLock().lock();
try {
flush(new Flush().type(Flush.Type.COMMIT).waitIfOngoing(true));
ensureOpen();
return deletionPolicy.snapshot();
} catch (IOException e) {
throw new SnapshotFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
}
}
@Override
public void recover(RecoveryHandler recoveryHandler) throws EngineException {
// take a write lock here so it won't happen while a flush is in progress
// this means that next commits will not be allowed once the lock is released
rwl.writeLock().lock();
try {
if (closed) {
throw new EngineClosedException(shardId);
}
onGoingRecoveries.increment();
} finally {
rwl.writeLock().unlock();
}
SnapshotIndexCommit phase1Snapshot;
try {
phase1Snapshot = deletionPolicy.snapshot();
} catch (Throwable e) {
onGoingRecoveries.decrement();
throw new RecoveryEngineException(shardId, 1, "Snapshot failed", e);
}
try {
recoveryHandler.phase1(phase1Snapshot);
} catch (Throwable e) {
onGoingRecoveries.decrement();
phase1Snapshot.release();
if (closed) {
e = new EngineClosedException(shardId, e);
}
throw new RecoveryEngineException(shardId, 1, "Execution failed", e);
}
Translog.Snapshot phase2Snapshot;
try {
phase2Snapshot = translog.snapshot();
} catch (Throwable e) {
onGoingRecoveries.decrement();
phase1Snapshot.release();
if (closed) {
e = new EngineClosedException(shardId, e);
}
throw new RecoveryEngineException(shardId, 2, "Snapshot failed", e);
}
try {
recoveryHandler.phase2(phase2Snapshot);
} catch (Throwable e) {
onGoingRecoveries.decrement();
phase1Snapshot.release();
phase2Snapshot.release();
if (closed) {
e = new EngineClosedException(shardId, e);
}
throw new RecoveryEngineException(shardId, 2, "Execution failed", e);
}
rwl.writeLock().lock();
Translog.Snapshot phase3Snapshot = null;
try {
phase3Snapshot = translog.snapshot(phase2Snapshot);
recoveryHandler.phase3(phase3Snapshot);
} catch (Throwable e) {
throw new RecoveryEngineException(shardId, 3, "Execution failed", e);
} finally {
onGoingRecoveries.decrement();
rwl.writeLock().unlock();
phase1Snapshot.release();
phase2Snapshot.release();
if (phase3Snapshot != null) {
phase3Snapshot.release();
}
}
}
private long getReaderRamBytesUsed(AtomicReaderContext reader) {
return SegmentReaderUtils.segmentReader(reader.reader()).ramBytesUsed();
}
@Override
public SegmentsStats segmentsStats() {
rwl.readLock().lock();
try {
ensureOpen();
Searcher searcher = acquireSearcher("segments_stats");
try {
SegmentsStats stats = new SegmentsStats();
for (AtomicReaderContext reader : searcher.reader().leaves()) {
stats.add(1, getReaderRamBytesUsed(reader));
}
return stats;
} finally {
searcher.release();
}
} finally {
rwl.readLock().unlock();
}
}
@Override
public List<Segment> segments() {
rwl.readLock().lock();
try {
ensureOpen();
Map<String, Segment> segments = new HashMap<String, Segment>();
// first, go over and compute the search ones...
Searcher searcher = acquireSearcher("segments");
try {
for (AtomicReaderContext reader : searcher.reader().leaves()) {
assert reader.reader() instanceof SegmentReader;
SegmentCommitInfo info = SegmentReaderUtils.segmentReader(reader.reader()).getSegmentInfo();
assert !segments.containsKey(info.info.name);
Segment segment = new Segment(info.info.name);
segment.search = true;
segment.docCount = reader.reader().numDocs();
segment.delDocCount = reader.reader().numDeletedDocs();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace("failed to get size for [{}]", e, info.info.name);
}
segment.memoryInBytes = getReaderRamBytesUsed(reader);
segments.put(info.info.name, segment);
}
} finally {
searcher.release();
}
// now, correlate or add the committed ones...
if (lastCommittedSegmentInfos != null) {
SegmentInfos infos = lastCommittedSegmentInfos;
for (SegmentCommitInfo info : infos) {
Segment segment = segments.get(info.info.name);
if (segment == null) {
segment = new Segment(info.info.name);
segment.search = false;
segment.committed = true;
segment.docCount = info.info.getDocCount();
segment.delDocCount = info.getDelCount();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace("failed to get size for [{}]", e, info.info.name);
}
segments.put(info.info.name, segment);
} else {
segment.committed = true;
}
}
}
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
Arrays.sort(segmentsArr, new Comparator<Segment>() {
@Override
public int compare(Segment o1, Segment o2) {
return (int) (o1.getGeneration() - o2.getGeneration());
}
});
// fill in the merges flag
Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
for (OnGoingMerge onGoingMerge : onGoingMerges) {
for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
for (Segment segment : segmentsArr) {
if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
segment.mergeId = onGoingMerge.getId();
break;
}
}
}
}
return Arrays.asList(segmentsArr);
} finally {
rwl.readLock().unlock();
}
}
@Override
public void close() throws ElasticsearchException {
rwl.writeLock().lock();
try {
innerClose();
} finally {
rwl.writeLock().unlock();
}
try {
// wait for recoveries to join and close all resources / IO streams
int ongoingRecoveries = onGoingRecoveries.awaitNoRecoveries(5000);
if (ongoingRecoveries > 0) {
logger.debug("Waiting for ongoing recoveries timed out on close currently ongoing disoveries: [{}]", ongoingRecoveries);
}
} catch (InterruptedException e) {
// ignore & restore interrupt
Thread.currentThread().interrupt();
}
}
class FailEngineOnMergeFailure implements MergeSchedulerProvider.FailureListener {
@Override
public void onFailedMerge(MergePolicy.MergeException e) {
failEngine(e);
}
}
private void failEngine(Throwable failure) {
synchronized (failedEngineMutex) {
if (failedEngine != null) {
return;
}
logger.warn("failed engine", failure);
failedEngine = failure;
for (FailedEngineListener listener : failedEngineListeners) {
listener.onFailedEngine(shardId, failure);
}
innerClose();
}
}
private void innerClose() {
if (closed) {
return;
}
indexSettingsService.removeListener(applySettings);
closed = true;
this.versionMap.clear();
this.failedEngineListeners.clear();
try {
try {
IOUtils.close(searcherManager);
} catch (Throwable t) {
logger.warn("Failed to close SearcherManager", t);
}
// no need to commit in this case!, we snapshot before we close the shard, so translog and all sync'ed
if (indexWriter != null) {
try {
indexWriter.rollback();
} catch (AlreadyClosedException e) {
// ignore
}
}
} catch (Throwable e) {
logger.debug("failed to rollback writer on close", e);
} finally {
indexWriter = null;
}
}
private HashedBytesRef versionKey(Term uid) {
return new HashedBytesRef(uid.bytes());
}
private Object dirtyLock(BytesRef uid) {
int hash = DjbHashFunction.DJB_HASH(uid.bytes, uid.offset, uid.length);
// abs returns Integer.MIN_VALUE, so we need to protect against it...
if (hash == Integer.MIN_VALUE) {
hash = 0;
}
return dirtyLocks[Math.abs(hash) % dirtyLocks.length];
}
private Object dirtyLock(Term uid) {
return dirtyLock(uid.bytes());
}
private long loadCurrentVersionFromIndex(Term uid) throws IOException {
Searcher searcher = acquireSearcher("load_version");
try {
return Versions.loadVersion(searcher.reader(), uid);
} finally {
searcher.release();
}
}
/**
* Returns whether a leaf reader comes from a merge (versus flush or addIndexes).
*/
private static boolean isMergedSegment(AtomicReader reader) {
// We expect leaves to be segment readers
final Map<String, String> diagnostics = SegmentReaderUtils.segmentReader(reader).getSegmentInfo().info.getDiagnostics();
final String source = diagnostics.get(IndexWriter.SOURCE);
assert Arrays.asList(IndexWriter.SOURCE_ADDINDEXES_READERS, IndexWriter.SOURCE_FLUSH, IndexWriter.SOURCE_MERGE).contains(source) : "Unknown source " + source;
return IndexWriter.SOURCE_MERGE.equals(source);
}
private IndexWriter createWriter() throws IOException {
try {
// release locks when started
if (IndexWriter.isLocked(store.directory())) {
logger.warn("shard is locked, releasing lock");
IndexWriter.unlock(store.directory());
}
boolean create = !Lucene.indexExists(store.directory());
IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, analysisService.defaultIndexAnalyzer());
config.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
config.setIndexDeletionPolicy(deletionPolicy);
config.setMergeScheduler(mergeScheduler.newMergeScheduler());
MergePolicy mergePolicy = mergePolicyProvider.newMergePolicy();
// Give us the opportunity to upgrade old segments while performing
// background merges
mergePolicy = new IndexUpgraderMergePolicy(mergePolicy);
config.setMergePolicy(mergePolicy);
config.setSimilarity(similarityService.similarity());
config.setRAMBufferSizeMB(indexingBufferSize.mbFrac());
config.setMaxThreadStates(indexConcurrency);
config.setCodec(codecService.codec(codecName));
/* We set this timeout to a highish value to work around
* the default poll interval in the Lucene lock that is
* 1000ms by default. We might need to poll multiple times
* here but with 1s poll this is only executed twice at most
* in combination with the default writelock timeout*/
config.setWriteLockTimeout(5000);
config.setUseCompoundFile(this.compoundOnFlush);
// Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
// of the merge operation and won't slow down _refresh
config.setMergedSegmentWarmer(new IndexReaderWarmer() {
@Override
public void warm(AtomicReader reader) throws IOException {
try {
assert isMergedSegment(reader);
final Engine.Searcher searcher = new SimpleSearcher("warmer", new IndexSearcher(reader));
final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher);
if (warmer != null) warmer.warm(context);
} catch (Throwable t) {
// Don't fail a merge if the warm-up failed
if (!closed) {
logger.warn("Warm-up failed", t);
}
if (t instanceof Error) {
// assertion/out-of-memory error, don't ignore those
throw (Error) t;
}
}
}
});
return new IndexWriter(store.directory(), config);
} catch (LockObtainFailedException ex) {
boolean isLocked = IndexWriter.isLocked(store.directory());
logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked);
throw ex;
}
}
public static final String INDEX_INDEX_CONCURRENCY = "index.index_concurrency";
public static final String INDEX_COMPOUND_ON_FLUSH = "index.compound_on_flush";
public static final String INDEX_GC_DELETES = "index.gc_deletes";
public static final String INDEX_FAIL_ON_MERGE_FAILURE = "index.fail_on_merge_failure";
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
long gcDeletesInMillis = settings.getAsTime(INDEX_GC_DELETES, TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis)).millis();
if (gcDeletesInMillis != InternalEngine.this.gcDeletesInMillis) {
logger.info("updating index.gc_deletes from [{}] to [{}]", TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis), TimeValue.timeValueMillis(gcDeletesInMillis));
InternalEngine.this.gcDeletesInMillis = gcDeletesInMillis;
}
final boolean compoundOnFlush = settings.getAsBoolean(INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush);
if (compoundOnFlush != InternalEngine.this.compoundOnFlush) {
logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush, compoundOnFlush);
InternalEngine.this.compoundOnFlush = compoundOnFlush;
indexWriter.getConfig().setUseCompoundFile(compoundOnFlush);
}
int indexConcurrency = settings.getAsInt(INDEX_INDEX_CONCURRENCY, InternalEngine.this.indexConcurrency);
boolean failOnMergeFailure = settings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure);
String codecName = settings.get(INDEX_CODEC, InternalEngine.this.codecName);
final boolean codecBloomLoad = settings.getAsBoolean(CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter());
boolean requiresFlushing = false;
if (indexConcurrency != InternalEngine.this.indexConcurrency ||
!codecName.equals(InternalEngine.this.codecName) ||
failOnMergeFailure != InternalEngine.this.failOnMergeFailure ||
codecBloomLoad != codecService.isLoadBloomFilter()) {
rwl.readLock().lock();
try {
if (indexConcurrency != InternalEngine.this.indexConcurrency) {
logger.info("updating index.index_concurrency from [{}] to [{}]", InternalEngine.this.indexConcurrency, indexConcurrency);
InternalEngine.this.indexConcurrency = indexConcurrency;
// we have to flush in this case, since it only applies on a new index writer
requiresFlushing = true;
}
if (!codecName.equals(InternalEngine.this.codecName)) {
logger.info("updating index.codec from [{}] to [{}]", InternalEngine.this.codecName, codecName);
InternalEngine.this.codecName = codecName;
// we want to flush in this case, so the new codec will be reflected right away...
requiresFlushing = true;
}
if (failOnMergeFailure != InternalEngine.this.failOnMergeFailure) {
logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure, failOnMergeFailure);
InternalEngine.this.failOnMergeFailure = failOnMergeFailure;
}
if (codecBloomLoad != codecService.isLoadBloomFilter()) {
logger.info("updating {} from [{}] to [{}]", CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter(), codecBloomLoad);
codecService.setLoadBloomFilter(codecBloomLoad);
// we need to flush in this case, to load/unload the bloom filters
requiresFlushing = true;
}
} finally {
rwl.readLock().unlock();
}
if (requiresFlushing) {
flush(new Flush().type(Flush.Type.NEW_WRITER));
}
}
}
}
private SearcherManager buildSearchManager(IndexWriter indexWriter) throws IOException {
return new SearcherManager(indexWriter, true, searcherFactory);
}
static class EngineSearcher implements Searcher {
private final String source;
private final IndexSearcher searcher;
private final SearcherManager manager;
private EngineSearcher(String source, IndexSearcher searcher, SearcherManager manager) {
this.source = source;
this.searcher = searcher;
this.manager = manager;
}
@Override
public String source() {
return this.source;
}
@Override
public IndexReader reader() {
return searcher.getIndexReader();
}
@Override
public IndexSearcher searcher() {
return searcher;
}
@Override
public boolean release() throws ElasticsearchException {
try {
manager.release(searcher);
return true;
} catch (IOException e) {
return false;
} catch (AlreadyClosedException e) {
/* this one can happen if we already closed the
* underlying store / directory and we call into the
* IndexWriter to free up pending files. */
return false;
}
}
}
static class VersionValue {
private final long version;
private final boolean delete;
private final long time;
private final Translog.Location translogLocation;
VersionValue(long version, boolean delete, long time, Translog.Location translogLocation) {
this.version = version;
this.delete = delete;
this.time = time;
this.translogLocation = translogLocation;
}
public long time() {
return this.time;
}
public long version() {
return version;
}
public boolean delete() {
return delete;
}
public Translog.Location translogLocation() {
return this.translogLocation;
}
}
class SearchFactory extends SearcherFactory {
@Override
public IndexSearcher newSearcher(IndexReader reader) throws IOException {
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(similarityService.similarity());
if (warmer != null) {
// we need to pass a custom searcher that does not release anything on Engine.Search Release,
// we will release explicitly
Searcher currentSearcher = null;
IndexSearcher newSearcher = null;
boolean closeNewSearcher = false;
try {
if (searcherManager == null) {
// fresh index writer, just do on all of it
newSearcher = searcher;
} else {
currentSearcher = acquireSearcher("search_factory");
// figure out the newSearcher, with only the new readers that are relevant for us
List<IndexReader> readers = Lists.newArrayList();
for (AtomicReaderContext newReaderContext : searcher.getIndexReader().leaves()) {
if (isMergedSegment(newReaderContext.reader())) {
// merged segments are already handled by IndexWriterConfig.setMergedSegmentWarmer
continue;
}
boolean found = false;
for (AtomicReaderContext currentReaderContext : currentSearcher.reader().leaves()) {
if (currentReaderContext.reader().getCoreCacheKey().equals(newReaderContext.reader().getCoreCacheKey())) {
found = true;
break;
}
}
if (!found) {
readers.add(newReaderContext.reader());
}
}
if (!readers.isEmpty()) {
// we don't want to close the inner readers, just increase ref on them
newSearcher = new IndexSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()]), false));
closeNewSearcher = true;
}
}
if (newSearcher != null) {
IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId,
new SimpleSearcher("warmer", newSearcher));
warmer.warm(context);
}
} catch (Throwable e) {
if (!closed) {
logger.warn("failed to prepare/warm", e);
}
} finally {
// no need to release the fullSearcher, nothing really is done...
if (currentSearcher != null) {
currentSearcher.release();
}
if (newSearcher != null && closeNewSearcher) {
IOUtils.closeWhileHandlingException(newSearcher.getIndexReader()); // ignore
}
}
}
return searcher;
}
}
private static final class RecoveryCounter {
private volatile int ongoingRecoveries = 0;
synchronized void increment() {
ongoingRecoveries++;
}
synchronized void decrement() {
ongoingRecoveries--;
if (ongoingRecoveries == 0) {
notifyAll(); // notify waiting threads - we only wait on ongoingRecoveries == 0
}
assert ongoingRecoveries >= 0 : "ongoingRecoveries must be >= 0 but was: " + ongoingRecoveries;
}
int get() {
// volatile read - no sync needed
return ongoingRecoveries;
}
synchronized int awaitNoRecoveries(long timeout) throws InterruptedException {
if (ongoingRecoveries > 0) { // no loop here - we either time out or we are done!
wait(timeout);
}
return ongoingRecoveries;
}
}
} | 1no label
| src_main_java_org_elasticsearch_index_engine_internal_InternalEngine.java |
561 | public class RoundRobinLB extends AbstractLoadBalancer {
private final AtomicInteger indexRef;
public RoundRobinLB() {
this((int) System.nanoTime());
}
public RoundRobinLB(int seed) {
indexRef = new AtomicInteger(seed);
}
@Override
public Member next() {
Member[] members = getMembers();
if (members == null || members.length == 0) {
return null;
}
int length = members.length;
int index = (indexRef.getAndAdd(1) % length + length) % length;
return members[index];
}
} | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_util_RoundRobinLB.java |
5,400 | @SuppressWarnings({"unchecked", "ForLoopReplaceableByForEach"})
public class AggregationContext implements ReaderContextAware, ScorerAware {
private final SearchContext searchContext;
private ObjectObjectOpenHashMap<String, FieldDataSource>[] perDepthFieldDataSources = new ObjectObjectOpenHashMap[4];
private List<ReaderContextAware> readerAwares = new ArrayList<ReaderContextAware>();
private List<ScorerAware> scorerAwares = new ArrayList<ScorerAware>();
private AtomicReaderContext reader;
private Scorer scorer;
public AggregationContext(SearchContext searchContext) {
this.searchContext = searchContext;
}
public SearchContext searchContext() {
return searchContext;
}
public CacheRecycler cacheRecycler() {
return searchContext.cacheRecycler();
}
public PageCacheRecycler pageCacheRecycler() {
return searchContext.pageCacheRecycler();
}
public AtomicReaderContext currentReader() {
return reader;
}
public Scorer currentScorer() {
return scorer;
}
public void setNextReader(AtomicReaderContext reader) {
this.reader = reader;
for (ReaderContextAware aware : readerAwares) {
aware.setNextReader(reader);
}
}
public void setScorer(Scorer scorer) {
this.scorer = scorer;
for (ScorerAware scorerAware : scorerAwares) {
scorerAware.setScorer(scorer);
}
}
/** Get a value source given its configuration and the depth of the aggregator in the aggregation tree. */
public <VS extends ValuesSource> VS valuesSource(ValuesSourceConfig<VS> config, int depth) {
assert config.valid() : "value source config is invalid - must have either a field context or a script or marked as unmapped";
assert !config.unmapped : "value source should not be created for unmapped fields";
if (perDepthFieldDataSources.length <= depth) {
perDepthFieldDataSources = Arrays.copyOf(perDepthFieldDataSources, ArrayUtil.oversize(1 + depth, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
}
if (perDepthFieldDataSources[depth] == null) {
perDepthFieldDataSources[depth] = new ObjectObjectOpenHashMap<String, FieldDataSource>();
}
final ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources = perDepthFieldDataSources[depth];
if (config.fieldContext == null) {
if (NumericValuesSource.class.isAssignableFrom(config.valueSourceType)) {
return (VS) numericScript(config);
}
if (BytesValuesSource.class.isAssignableFrom(config.valueSourceType)) {
return (VS) bytesScript(config);
}
throw new AggregationExecutionException("value source of type [" + config.valueSourceType.getSimpleName() + "] is not supported by scripts");
}
if (NumericValuesSource.class.isAssignableFrom(config.valueSourceType)) {
return (VS) numericField(fieldDataSources, config);
}
if (GeoPointValuesSource.class.isAssignableFrom(config.valueSourceType)) {
return (VS) geoPointField(fieldDataSources, config);
}
// falling back to bytes values
return (VS) bytesField(fieldDataSources, config);
}
private NumericValuesSource numericScript(ValuesSourceConfig<?> config) {
setScorerIfNeeded(config.script);
setReaderIfNeeded(config.script);
scorerAwares.add(config.script);
readerAwares.add(config.script);
FieldDataSource.Numeric source = new FieldDataSource.Numeric.Script(config.script, config.scriptValueType);
if (config.ensureUnique || config.ensureSorted) {
source = new FieldDataSource.Numeric.SortedAndUnique(source);
readerAwares.add((ReaderContextAware) source);
}
return new NumericValuesSource(source, config.formatter(), config.parser());
}
private NumericValuesSource numericField(ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {
FieldDataSource.Numeric dataSource = (FieldDataSource.Numeric) fieldDataSources.get(config.fieldContext.field());
if (dataSource == null) {
FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);
dataSource = new FieldDataSource.Numeric.FieldData((IndexNumericFieldData<?>) config.fieldContext.indexFieldData(), metaData);
setReaderIfNeeded((ReaderContextAware) dataSource);
readerAwares.add((ReaderContextAware) dataSource);
fieldDataSources.put(config.fieldContext.field(), dataSource);
}
if (config.script != null) {
setScorerIfNeeded(config.script);
setReaderIfNeeded(config.script);
scorerAwares.add(config.script);
readerAwares.add(config.script);
dataSource = new FieldDataSource.Numeric.WithScript(dataSource, config.script);
if (config.ensureUnique || config.ensureSorted) {
dataSource = new FieldDataSource.Numeric.SortedAndUnique(dataSource);
readerAwares.add((ReaderContextAware) dataSource);
}
}
if (config.needsHashes) {
dataSource.setNeedsHashes(true);
}
return new NumericValuesSource(dataSource, config.formatter(), config.parser());
}
private ValuesSource bytesField(ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {
FieldDataSource dataSource = fieldDataSources.get(config.fieldContext.field());
if (dataSource == null) {
final IndexFieldData<?> indexFieldData = config.fieldContext.indexFieldData();
FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);
if (indexFieldData instanceof IndexFieldData.WithOrdinals) {
dataSource = new FieldDataSource.Bytes.WithOrdinals.FieldData((IndexFieldData.WithOrdinals) indexFieldData, metaData);
} else {
dataSource = new FieldDataSource.Bytes.FieldData(indexFieldData, metaData);
}
setReaderIfNeeded((ReaderContextAware) dataSource);
readerAwares.add((ReaderContextAware) dataSource);
fieldDataSources.put(config.fieldContext.field(), dataSource);
}
if (config.script != null) {
setScorerIfNeeded(config.script);
setReaderIfNeeded(config.script);
scorerAwares.add(config.script);
readerAwares.add(config.script);
dataSource = new FieldDataSource.WithScript(dataSource, config.script);
}
// Even in case we wrap field data, we might still need to wrap for sorting, because the wrapped field data might be
// eg. a numeric field data that doesn't sort according to the byte order. However field data values are unique so no
// need to wrap for uniqueness
if ((config.ensureUnique && !dataSource.metaData().uniqueness().unique()) || config.ensureSorted) {
dataSource = new FieldDataSource.Bytes.SortedAndUnique(dataSource);
readerAwares.add((ReaderContextAware) dataSource);
}
if (config.needsHashes) { // the data source needs hash if at least one consumer needs hashes
dataSource.setNeedsHashes(true);
}
if (dataSource instanceof FieldDataSource.Bytes.WithOrdinals) {
return new BytesValuesSource.WithOrdinals((FieldDataSource.Bytes.WithOrdinals) dataSource);
} else {
return new BytesValuesSource(dataSource);
}
}
private BytesValuesSource bytesScript(ValuesSourceConfig<?> config) {
setScorerIfNeeded(config.script);
setReaderIfNeeded(config.script);
scorerAwares.add(config.script);
readerAwares.add(config.script);
FieldDataSource.Bytes source = new FieldDataSource.Bytes.Script(config.script);
if (config.ensureUnique || config.ensureSorted) {
source = new FieldDataSource.Bytes.SortedAndUnique(source);
readerAwares.add((ReaderContextAware) source);
}
return new BytesValuesSource(source);
}
private GeoPointValuesSource geoPointField(ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {
FieldDataSource.GeoPoint dataSource = (FieldDataSource.GeoPoint) fieldDataSources.get(config.fieldContext.field());
if (dataSource == null) {
FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);
dataSource = new FieldDataSource.GeoPoint((IndexGeoPointFieldData<?>) config.fieldContext.indexFieldData(), metaData);
setReaderIfNeeded(dataSource);
readerAwares.add(dataSource);
fieldDataSources.put(config.fieldContext.field(), dataSource);
}
if (config.needsHashes) {
dataSource.setNeedsHashes(true);
}
return new GeoPointValuesSource(dataSource);
}
public void registerReaderContextAware(ReaderContextAware readerContextAware) {
setReaderIfNeeded(readerContextAware);
readerAwares.add(readerContextAware);
}
public void registerScorerAware(ScorerAware scorerAware) {
setScorerIfNeeded(scorerAware);
scorerAwares.add(scorerAware);
}
private void setReaderIfNeeded(ReaderContextAware readerAware) {
if (reader != null) {
readerAware.setNextReader(reader);
}
}
private void setScorerIfNeeded(ScorerAware scorerAware) {
if (scorer != null) {
scorerAware.setScorer(scorer);
}
}
} | 1no label
| src_main_java_org_elasticsearch_search_aggregations_support_AggregationContext.java |
863 | public class CandidateItemOfferAnswer implements IAnswer<CandidateItemOffer> {
@Override
public CandidateItemOffer answer() throws Throwable {
return new CandidateItemOfferImpl();
}
} | 0true
| core_broadleaf-framework_src_test_java_org_broadleafcommerce_core_offer_service_OfferServiceTest.java |
1,343 | public class NodeIndexDeletedAction extends AbstractComponent {
private final ThreadPool threadPool;
private final TransportService transportService;
private final List<Listener> listeners = new CopyOnWriteArrayList<Listener>();
@Inject
public NodeIndexDeletedAction(Settings settings, ThreadPool threadPool, TransportService transportService) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
transportService.registerHandler(NodeIndexDeletedTransportHandler.ACTION, new NodeIndexDeletedTransportHandler());
transportService.registerHandler(NodeIndexStoreDeletedTransportHandler.ACTION, new NodeIndexStoreDeletedTransportHandler());
}
public void add(Listener listener) {
listeners.add(listener);
}
public void remove(Listener listener) {
listeners.remove(listener);
}
public void nodeIndexDeleted(final ClusterState clusterState, final String index, final String nodeId) throws ElasticsearchException {
DiscoveryNodes nodes = clusterState.nodes();
if (nodes.localNodeMaster()) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
innerNodeIndexDeleted(index, nodeId);
}
});
} else {
transportService.sendRequest(clusterState.nodes().masterNode(),
NodeIndexDeletedTransportHandler.ACTION, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
}
}
public void nodeIndexStoreDeleted(final ClusterState clusterState, final String index, final String nodeId) throws ElasticsearchException {
DiscoveryNodes nodes = clusterState.nodes();
if (nodes.localNodeMaster()) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
innerNodeIndexStoreDeleted(index, nodeId);
}
});
} else {
transportService.sendRequest(clusterState.nodes().masterNode(),
NodeIndexStoreDeletedTransportHandler.ACTION, new NodeIndexStoreDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
}
}
private void innerNodeIndexDeleted(String index, String nodeId) {
for (Listener listener : listeners) {
listener.onNodeIndexDeleted(index, nodeId);
}
}
private void innerNodeIndexStoreDeleted(String index, String nodeId) {
for (Listener listener : listeners) {
listener.onNodeIndexStoreDeleted(index, nodeId);
}
}
public static interface Listener {
void onNodeIndexDeleted(String index, String nodeId);
void onNodeIndexStoreDeleted(String index, String nodeId);
}
private class NodeIndexDeletedTransportHandler extends BaseTransportRequestHandler<NodeIndexDeletedMessage> {
static final String ACTION = "cluster/nodeIndexDeleted";
@Override
public NodeIndexDeletedMessage newInstance() {
return new NodeIndexDeletedMessage();
}
@Override
public void messageReceived(NodeIndexDeletedMessage message, TransportChannel channel) throws Exception {
innerNodeIndexDeleted(message.index, message.nodeId);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
private class NodeIndexStoreDeletedTransportHandler extends BaseTransportRequestHandler<NodeIndexStoreDeletedMessage> {
static final String ACTION = "cluster/nodeIndexStoreDeleted";
@Override
public NodeIndexStoreDeletedMessage newInstance() {
return new NodeIndexStoreDeletedMessage();
}
@Override
public void messageReceived(NodeIndexStoreDeletedMessage message, TransportChannel channel) throws Exception {
innerNodeIndexStoreDeleted(message.index, message.nodeId);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
static class NodeIndexDeletedMessage extends TransportRequest {
String index;
String nodeId;
NodeIndexDeletedMessage() {
}
NodeIndexDeletedMessage(String index, String nodeId) {
this.index = index;
this.nodeId = nodeId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
out.writeString(nodeId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readString();
nodeId = in.readString();
}
}
static class NodeIndexStoreDeletedMessage extends TransportRequest {
String index;
String nodeId;
NodeIndexStoreDeletedMessage() {
}
NodeIndexStoreDeletedMessage(String index, String nodeId) {
this.index = index;
this.nodeId = nodeId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
out.writeString(nodeId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readString();
nodeId = in.readString();
}
}
} | 0true
| src_main_java_org_elasticsearch_cluster_action_index_NodeIndexDeletedAction.java |
516 | public class ODatabaseException extends OException {
private static final long serialVersionUID = -2655748565531836968L;
public ODatabaseException(String string) {
super(string);
}
public ODatabaseException(String message, Throwable cause) {
super(message, cause);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_exception_ODatabaseException.java |
643 | public class BroadleafResourceHttpRequestHandler extends ResourceHttpRequestHandler {
private static final Log LOG = LogFactory.getLog(BroadleafResourceHttpRequestHandler.class);
// XML Configured generated resource handlers
protected List<AbstractGeneratedResourceHandler> handlers;
@javax.annotation.Resource(name = "blResourceBundlingService")
protected ResourceBundlingService bundlingService;
/**
* Checks to see if the requested path corresponds to a registered bundle. If so, returns the generated bundle.
* Otherwise, checks to see if any of the configured GeneratedResourceHandlers can handle the given request.
* If neither of those cases match, delegates to the normal ResourceHttpRequestHandler
*/
@Override
protected Resource getResource(HttpServletRequest request) {
String path = (String) request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE);
if (bundlingService.hasBundle(path)) {
return bundlingService.getBundle(path);
}
if (handlers != null) {
for (AbstractGeneratedResourceHandler handler : handlers) {
if (handler.canHandle(path)) {
return handler.getResource(path, getLocations());
}
}
}
return super.getResource(request);
}
public boolean isBundleRequest(HttpServletRequest request) {
String path = (String) request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE);
return bundlingService.hasBundle(path);
}
/**
* @return a clone of the locations list that is in {@link ResourceHttpRequestHandler}. Note that we must use
* reflection to access this field as it is marked private.
*/
@SuppressWarnings("unchecked")
public List<Resource> getLocations() {
try {
List<Resource> locations = (List<Resource>) FieldUtils.readField(this, "locations", true);
return new ArrayList<Resource>(locations);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
/* *********** */
/* BOILERPLATE */
/* *********** */
public List<AbstractGeneratedResourceHandler> getHandlers() {
return handlers;
}
public void setHandlers(List<AbstractGeneratedResourceHandler> handlers) {
this.handlers = handlers;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_web_resource_BroadleafResourceHttpRequestHandler.java |
301 | public enum WriteConsistencyLevel {
DEFAULT((byte) 0),
ONE((byte) 1),
QUORUM((byte) 2),
ALL((byte) 3);
private final byte id;
WriteConsistencyLevel(byte id) {
this.id = id;
}
public byte id() {
return id;
}
public static WriteConsistencyLevel fromId(byte value) {
if (value == 0) {
return DEFAULT;
} else if (value == 1) {
return ONE;
} else if (value == 2) {
return QUORUM;
} else if (value == 3) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
}
public static WriteConsistencyLevel fromString(String value) {
if (value.equals("default")) {
return DEFAULT;
} else if (value.equals("one")) {
return ONE;
} else if (value.equals("quorum")) {
return QUORUM;
} else if (value.equals("all")) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]");
}
} | 0true
| src_main_java_org_elasticsearch_action_WriteConsistencyLevel.java |
1,653 | public class OLocalQueryExecutor extends OQueryExecutor {
public OLocalQueryExecutor(OCommandRequestText iCommand, OStorageEmbedded wrapped) {
super(iCommand, wrapped);
}
@Override
public Object execute() {
return wrapped.command(iCommand);
}
} | 0true
| distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_oldsharding_OLocalQueryExecutor.java |
826 | public class SetBackupOperation extends AtomicLongBaseOperation implements BackupOperation {
private long newValue;
public SetBackupOperation() {
}
public SetBackupOperation(String name, long newValue) {
super(name);
this.newValue = newValue;
}
@Override
public void run() throws Exception {
LongWrapper number = getNumber();
number.set(newValue);
}
@Override
public int getId() {
return AtomicLongDataSerializerHook.SET_BACKUP;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(newValue);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
newValue = in.readLong();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_operations_SetBackupOperation.java |
741 | public class ProductOptionType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, ProductOptionType> TYPES = new LinkedHashMap<String, ProductOptionType>();
public static final ProductOptionType COLOR = new ProductOptionType("COLOR","Color");
public static final ProductOptionType SIZE = new ProductOptionType("SIZE","Size");
public static final ProductOptionType DATE = new ProductOptionType("DATE","Date");
public static final ProductOptionType TEXT = new ProductOptionType("TEXT","Text");
public static final ProductOptionType BOOLEAN = new ProductOptionType("BOOLEAN","Boolean");
public static final ProductOptionType DECIMAL = new ProductOptionType("DECIMAL","Decimal");
public static final ProductOptionType INTEGER = new ProductOptionType("INTEGER","Integer");
public static final ProductOptionType INPUT = new ProductOptionType("INPUT","Input");
public static final ProductOptionType PRODUCT = new ProductOptionType("PRODUCT","Product");
public static ProductOptionType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public ProductOptionType() {
//do nothing
}
public ProductOptionType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ProductOptionType other = (ProductOptionType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_service_type_ProductOptionType.java |
3,348 | static class GeoPointValuesSingleFixedSet extends GeoPointValues {
private final GeoPointFieldMapper.Encoding encoding;
private final PagedMutable lat, lon;
private final FixedBitSet set;
private final GeoPoint scratch = new GeoPoint();
GeoPointValuesSingleFixedSet(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, FixedBitSet set) {
super(false);
this.encoding = encoding;
this.lon = lon;
this.lat = lat;
this.set = set;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return set.get(docId) ? 1 : 0;
}
@Override
public GeoPoint nextValue() {
return encoding.decode(lat.get(docId), lon.get(docId), scratch);
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_GeoPointCompressedAtomicFieldData.java |
2,281 | public class DequeRecycler<T> extends AbstractRecycler<T> {
final Deque<T> deque;
final int maxSize;
public DequeRecycler(C<T> c, Deque<T> queue, int maxSize) {
super(c);
this.deque = queue;
this.maxSize = maxSize;
}
@Override
public void close() {
deque.clear();
}
@Override
public V<T> obtain(int sizing) {
final T v = deque.pollFirst();
if (v == null) {
return new DV(c.newInstance(sizing), false);
}
return new DV(v, true);
}
/** Called before releasing an object, returns true if the object should be recycled and false otherwise. */
protected boolean beforeRelease() {
return deque.size() < maxSize;
}
/** Called after a release. */
protected void afterRelease(boolean recycled) {}
private class DV implements Recycler.V<T> {
T value;
final boolean recycled;
DV(T value, boolean recycled) {
this.value = value;
this.recycled = recycled;
}
@Override
public T v() {
return value;
}
@Override
public boolean isRecycled() {
return recycled;
}
@Override
public boolean release() {
if (value == null) {
throw new ElasticsearchIllegalStateException("recycler entry already released...");
}
final boolean recycle = beforeRelease();
if (recycle) {
c.clear(value);
deque.addFirst(value);
}
value = null;
afterRelease(recycle);
return true;
}
}
} | 0true
| src_main_java_org_elasticsearch_common_recycler_DequeRecycler.java |
1,473 | public class OSQLFunctionInV extends OSQLFunctionMove {
public static final String NAME = "inV";
public OSQLFunctionInV() {
super(NAME, 0, 1);
}
@Override
protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) {
return e2v(graph, iRecord, Direction.IN, iLabels);
}
} | 1no label
| graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionInV.java |
2,625 | public interface PingListener {
void onPing(PingResponse[] pings);
} | 0true
| src_main_java_org_elasticsearch_discovery_zen_ping_ZenPing.java |
743 | @Test
public class SBTreeTest {
private static final int KEYS_COUNT = 500000;
private ODatabaseDocumentTx databaseDocumentTx;
protected OSBTree<Integer, OIdentifiable> sbTree;
private String buildDirectory;
@BeforeClass
public void beforeClass() {
buildDirectory = System.getProperty("buildDirectory");
if (buildDirectory == null)
buildDirectory = ".";
databaseDocumentTx = new ODatabaseDocumentTx("plocal:" + buildDirectory + "/localSBTreeTest");
if (databaseDocumentTx.exists()) {
databaseDocumentTx.open("admin", "admin");
databaseDocumentTx.drop();
}
databaseDocumentTx.create();
sbTree = new OSBTree<Integer, OIdentifiable>(".sbt", 1, false);
sbTree.create("sbTree", OIntegerSerializer.INSTANCE, OLinkSerializer.INSTANCE, null, (OStorageLocalAbstract) databaseDocumentTx
.getStorage().getUnderlying());
}
@AfterMethod
public void afterMethod() throws Exception {
sbTree.clear();
}
@AfterClass
public void afterClass() throws Exception {
sbTree.clear();
sbTree.delete();
databaseDocumentTx.drop();
}
public void testKeyPut() throws Exception {
for (int i = 0; i < KEYS_COUNT; i++) {
sbTree.put(i, new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
}
for (int i = 0; i < KEYS_COUNT; i++)
Assert.assertEquals(sbTree.get(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)), i
+ " key is absent");
Assert.assertEquals(0, (int) sbTree.firstKey());
Assert.assertEquals(KEYS_COUNT - 1, (int) sbTree.lastKey());
for (int i = KEYS_COUNT; i < 2 * KEYS_COUNT; i++)
Assert.assertNull(sbTree.get(i));
}
public void testKeyPutRandomUniform() throws Exception {
final NavigableSet<Integer> keys = new TreeSet<Integer>();
final MersenneTwisterFast random = new MersenneTwisterFast();
while (keys.size() < KEYS_COUNT) {
int key = random.nextInt(Integer.MAX_VALUE);
sbTree.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
keys.add(key);
Assert.assertEquals(sbTree.get(key), new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
Assert.assertEquals(sbTree.firstKey(), keys.first());
Assert.assertEquals(sbTree.lastKey(), keys.last());
for (int key : keys)
Assert.assertEquals(sbTree.get(key), new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
public void testKeyPutRandomGaussian() throws Exception {
NavigableSet<Integer> keys = new TreeSet<Integer>();
long seed = System.currentTimeMillis();
System.out.println("testKeyPutRandomGaussian seed : " + seed);
MersenneTwisterFast random = new MersenneTwisterFast(seed);
while (keys.size() < KEYS_COUNT) {
int key = (int) (random.nextGaussian() * Integer.MAX_VALUE / 2 + Integer.MAX_VALUE);
if (key < 0)
continue;
sbTree.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
keys.add(key);
Assert.assertEquals(sbTree.get(key), new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
Assert.assertEquals(sbTree.firstKey(), keys.first());
Assert.assertEquals(sbTree.lastKey(), keys.last());
for (int key : keys)
Assert.assertEquals(sbTree.get(key), new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
public void testKeyDeleteRandomUniform() throws Exception {
NavigableSet<Integer> keys = new TreeSet<Integer>();
for (int i = 0; i < KEYS_COUNT; i++) {
sbTree.put(i, new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
keys.add(i);
}
Iterator<Integer> keysIterator = keys.iterator();
while (keysIterator.hasNext()) {
int key = keysIterator.next();
if (key % 3 == 0) {
sbTree.remove(key);
keysIterator.remove();
}
}
Assert.assertEquals(sbTree.firstKey(), keys.first());
Assert.assertEquals(sbTree.lastKey(), keys.last());
for (int key : keys) {
if (key % 3 == 0) {
Assert.assertNull(sbTree.get(key));
} else {
Assert.assertEquals(sbTree.get(key), new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
}
}
public void testKeyDeleteRandomGaussian() throws Exception {
NavigableSet<Integer> keys = new TreeSet<Integer>();
long seed = System.currentTimeMillis();
System.out.println("testKeyDeleteRandomGaussian seed : " + seed);
MersenneTwisterFast random = new MersenneTwisterFast(seed);
while (keys.size() < KEYS_COUNT) {
int key = (int) (random.nextGaussian() * Integer.MAX_VALUE / 2 + Integer.MAX_VALUE);
if (key < 0)
continue;
sbTree.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
keys.add(key);
Assert.assertEquals(sbTree.get(key), new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
Iterator<Integer> keysIterator = keys.iterator();
while (keysIterator.hasNext()) {
int key = keysIterator.next();
if (key % 3 == 0) {
sbTree.remove(key);
keysIterator.remove();
}
}
Assert.assertEquals(sbTree.firstKey(), keys.first());
Assert.assertEquals(sbTree.lastKey(), keys.last());
for (int key : keys) {
if (key % 3 == 0) {
Assert.assertNull(sbTree.get(key));
} else {
Assert.assertEquals(sbTree.get(key), new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
}
}
public void testKeyDelete() throws Exception {
for (int i = 0; i < KEYS_COUNT; i++) {
sbTree.put(i, new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
}
for (int i = 0; i < KEYS_COUNT; i++) {
if (i % 3 == 0)
Assert.assertEquals(sbTree.remove(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
}
Assert.assertEquals((int) sbTree.firstKey(), 1);
Assert.assertEquals((int) sbTree.lastKey(), (KEYS_COUNT - 1) % 3 == 0 ? KEYS_COUNT - 2 : KEYS_COUNT - 1);
for (int i = 0; i < KEYS_COUNT; i++) {
if (i % 3 == 0)
Assert.assertNull(sbTree.get(i));
else
Assert.assertEquals(sbTree.get(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
}
}
public void testKeyAddDelete() throws Exception {
for (int i = 0; i < KEYS_COUNT; i++) {
sbTree.put(i, new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
Assert.assertEquals(sbTree.get(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
}
for (int i = 0; i < KEYS_COUNT; i++) {
if (i % 3 == 0)
Assert.assertEquals(sbTree.remove(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
if (i % 2 == 0)
sbTree.put(KEYS_COUNT + i,
new ORecordId((KEYS_COUNT + i) % 32000, OClusterPositionFactory.INSTANCE.valueOf(KEYS_COUNT + i)));
}
Assert.assertEquals((int) sbTree.firstKey(), 1);
Assert.assertEquals((int) sbTree.lastKey(), 2 * KEYS_COUNT - 2);
for (int i = 0; i < KEYS_COUNT; i++) {
if (i % 3 == 0)
Assert.assertNull(sbTree.get(i));
else
Assert.assertEquals(sbTree.get(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
if (i % 2 == 0)
Assert.assertEquals(sbTree.get(KEYS_COUNT + i),
new ORecordId((KEYS_COUNT + i) % 32000, OClusterPositionFactory.INSTANCE.valueOf(KEYS_COUNT + i)));
}
}
public void testValuesMajor() {
NavigableMap<Integer, ORID> keyValues = new TreeMap<Integer, ORID>();
MersenneTwisterFast random = new MersenneTwisterFast();
while (keyValues.size() < KEYS_COUNT) {
int key = random.nextInt(Integer.MAX_VALUE);
sbTree.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
keyValues.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
assertMajorValues(keyValues, random, true);
assertMajorValues(keyValues, random, false);
Assert.assertEquals(sbTree.firstKey(), keyValues.firstKey());
Assert.assertEquals(sbTree.lastKey(), keyValues.lastKey());
}
public void testValuesMinor() {
NavigableMap<Integer, ORID> keyValues = new TreeMap<Integer, ORID>();
MersenneTwisterFast random = new MersenneTwisterFast();
while (keyValues.size() < KEYS_COUNT) {
int key = random.nextInt(Integer.MAX_VALUE);
sbTree.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
keyValues.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
assertMinorValues(keyValues, random, true);
assertMinorValues(keyValues, random, false);
Assert.assertEquals(sbTree.firstKey(), keyValues.firstKey());
Assert.assertEquals(sbTree.lastKey(), keyValues.lastKey());
}
public void testValuesBetween() {
NavigableMap<Integer, ORID> keyValues = new TreeMap<Integer, ORID>();
MersenneTwisterFast random = new MersenneTwisterFast();
while (keyValues.size() < KEYS_COUNT) {
int key = random.nextInt(Integer.MAX_VALUE);
sbTree.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
keyValues.put(key, new ORecordId(key % 32000, OClusterPositionFactory.INSTANCE.valueOf(key)));
}
assertBetweenValues(keyValues, random, true, true);
assertBetweenValues(keyValues, random, true, false);
assertBetweenValues(keyValues, random, false, true);
assertBetweenValues(keyValues, random, false, false);
Assert.assertEquals(sbTree.firstKey(), keyValues.firstKey());
Assert.assertEquals(sbTree.lastKey(), keyValues.lastKey());
}
public void testAddKeyValuesInTwoBucketsAndMakeFirstEmpty() throws Exception {
for (int i = 0; i < 5167; i++)
sbTree.put(i, new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
for (int i = 0; i < 3500; i++)
sbTree.remove(i);
Assert.assertEquals((int) sbTree.firstKey(), 3500);
for (int i = 0; i < 3500; i++)
Assert.assertNull(sbTree.get(i));
for (int i = 3500; i < 5167; i++)
Assert.assertEquals(sbTree.get(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
}
public void testAddKeyValuesInTwoBucketsAndMakeLastEmpty() throws Exception {
for (int i = 0; i < 5167; i++)
sbTree.put(i, new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
for (int i = 5166; i > 1700; i--)
sbTree.remove(i);
Assert.assertEquals((int) sbTree.lastKey(), 1700);
for (int i = 5166; i > 1700; i--)
Assert.assertNull(sbTree.get(i));
for (int i = 1700; i >= 0; i--)
Assert.assertEquals(sbTree.get(i), new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
}
public void testAddKeyValuesAndRemoveFirstMiddleAndLastPages() throws Exception {
for (int i = 0; i < 12055; i++)
sbTree.put(i, new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
for (int i = 0; i < 1730; i++)
sbTree.remove(i);
for (int i = 3440; i < 6900; i++)
sbTree.remove(i);
for (int i = 8600; i < 12055; i++)
sbTree.remove(i);
Assert.assertEquals((int) sbTree.firstKey(), 1730);
Assert.assertEquals((int) sbTree.lastKey(), 8599);
Collection<OIdentifiable> result = sbTree.getValuesMinor(7200, true, -1);
Set<OIdentifiable> identifiables = new HashSet<OIdentifiable>(result);
for (int i = 7200; i >= 6900; i--) {
boolean removed = identifiables.remove(new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
Assert.assertTrue(removed);
}
for (int i = 3439; i >= 1730; i--) {
boolean removed = identifiables.remove(new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
Assert.assertTrue(removed);
}
Assert.assertTrue(identifiables.isEmpty());
result = sbTree.getValuesMajor(1740, true, -1);
identifiables = new HashSet<OIdentifiable>(result);
for (int i = 1740; i < 3440; i++) {
boolean removed = identifiables.remove(new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
Assert.assertTrue(removed);
}
for (int i = 6900; i < 8600; i++) {
boolean removed = identifiables.remove(new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
Assert.assertTrue(removed);
}
Assert.assertTrue(identifiables.isEmpty());
result = sbTree.getValuesBetween(1740, true, 7200, true, -1);
identifiables = new HashSet<OIdentifiable>(result);
for (int i = 1740; i < 3440; i++) {
boolean removed = identifiables.remove(new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
Assert.assertTrue(removed);
}
for (int i = 6900; i <= 7200; i++) {
boolean removed = identifiables.remove(new ORecordId(i % 32000, OClusterPositionFactory.INSTANCE.valueOf(i)));
Assert.assertTrue(removed);
}
Assert.assertTrue(identifiables.isEmpty());
}
private void assertMajorValues(NavigableMap<Integer, ORID> keyValues, MersenneTwisterFast random, boolean keyInclusive) {
for (int i = 0; i < 100; i++) {
int upperBorder = keyValues.lastKey() + 5000;
int fromKey;
if (upperBorder > 0)
fromKey = random.nextInt(upperBorder);
else
fromKey = random.nextInt(Integer.MAX_VALUE);
if (random.nextBoolean()) {
Integer includedKey = keyValues.ceilingKey(fromKey);
if (includedKey != null)
fromKey = includedKey;
else
fromKey = keyValues.floorKey(fromKey);
}
int maxValuesToFetch = 10000;
Collection<OIdentifiable> orids = sbTree.getValuesMajor(fromKey, keyInclusive, maxValuesToFetch);
Set<OIdentifiable> result = new HashSet<OIdentifiable>(orids);
Iterator<ORID> valuesIterator = keyValues.tailMap(fromKey, keyInclusive).values().iterator();
int fetchedValues = 0;
while (valuesIterator.hasNext() && fetchedValues < maxValuesToFetch) {
ORID value = valuesIterator.next();
Assert.assertTrue(result.remove(value));
fetchedValues++;
}
if (valuesIterator.hasNext())
Assert.assertEquals(fetchedValues, maxValuesToFetch);
Assert.assertEquals(result.size(), 0);
}
}
private void assertMinorValues(NavigableMap<Integer, ORID> keyValues, MersenneTwisterFast random, boolean keyInclusive) {
for (int i = 0; i < 100; i++) {
int upperBorder = keyValues.lastKey() + 5000;
int toKey;
if (upperBorder > 0)
toKey = random.nextInt(upperBorder) - 5000;
else
toKey = random.nextInt(Integer.MAX_VALUE) - 5000;
if (random.nextBoolean()) {
Integer includedKey = keyValues.ceilingKey(toKey);
if (includedKey != null)
toKey = includedKey;
else
toKey = keyValues.floorKey(toKey);
}
int maxValuesToFetch = 10000;
Collection<OIdentifiable> orids = sbTree.getValuesMinor(toKey, keyInclusive, maxValuesToFetch);
Set<OIdentifiable> result = new HashSet<OIdentifiable>(orids);
Iterator<ORID> valuesIterator = keyValues.headMap(toKey, keyInclusive).descendingMap().values().iterator();
int fetchedValues = 0;
while (valuesIterator.hasNext() && fetchedValues < maxValuesToFetch) {
ORID value = valuesIterator.next();
Assert.assertTrue(result.remove(value));
fetchedValues++;
}
if (valuesIterator.hasNext())
Assert.assertEquals(fetchedValues, maxValuesToFetch);
Assert.assertEquals(result.size(), 0);
}
}
private void assertBetweenValues(NavigableMap<Integer, ORID> keyValues, MersenneTwisterFast random, boolean fromInclusive,
boolean toInclusive) {
for (int i = 0; i < 100; i++) {
int upperBorder = keyValues.lastKey() + 5000;
int fromKey;
if (upperBorder > 0)
fromKey = random.nextInt(upperBorder);
else
fromKey = random.nextInt(Integer.MAX_VALUE - 1);
if (random.nextBoolean()) {
Integer includedKey = keyValues.ceilingKey(fromKey);
if (includedKey != null)
fromKey = includedKey;
else
fromKey = keyValues.floorKey(fromKey);
}
int toKey = random.nextInt() + fromKey + 1;
if (toKey < 0)
toKey = Integer.MAX_VALUE;
if (random.nextBoolean()) {
Integer includedKey = keyValues.ceilingKey(toKey);
if (includedKey != null)
toKey = includedKey;
else
toKey = keyValues.floorKey(toKey);
}
if (fromKey > toKey)
toKey = fromKey;
int maxValuesToFetch = 10000;
Collection<OIdentifiable> orids = sbTree.getValuesBetween(fromKey, fromInclusive, toKey, toInclusive, maxValuesToFetch);
Set<OIdentifiable> result = new HashSet<OIdentifiable>(orids);
Iterator<ORID> valuesIterator = keyValues.subMap(fromKey, fromInclusive, toKey, toInclusive).values().iterator();
int fetchedValues = 0;
while (valuesIterator.hasNext() && fetchedValues < maxValuesToFetch) {
ORID value = valuesIterator.next();
Assert.assertTrue(result.remove(value));
fetchedValues++;
}
if (valuesIterator.hasNext())
Assert.assertEquals(fetchedValues, maxValuesToFetch);
Assert.assertEquals(result.size(), 0);
}
}
} | 0true
| core_src_test_java_com_orientechnologies_orient_core_index_sbtree_local_SBTreeTest.java |
577 | public static final class Defaults {
public static final boolean WAIT_FOR_MERGE = true;
public static final int MAX_NUM_SEGMENTS = -1;
public static final boolean ONLY_EXPUNGE_DELETES = false;
public static final boolean FLUSH = true;
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_optimize_OptimizeRequest.java |
887 | public class OQueryContext {
protected ODocument initialRecord;
protected OQuery<ODocument> sourceQuery;
public void setRecord(final ODocument iRecord) {
this.initialRecord = iRecord;
}
public void setSourceQuery(final OQuery<ODocument> sourceQuery) {
this.sourceQuery = sourceQuery;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_query_OQueryContext.java |
2,549 | public final class ClassLoaderUtil {
public static final String HAZELCAST_BASE_PACKAGE = "com.hazelcast.";
public static final String HAZELCAST_ARRAY = "[L" + HAZELCAST_BASE_PACKAGE;
private static final Map<String, Class> PRIMITIVE_CLASSES;
private static final int MAX_PRIM_CLASSNAME_LENGTH = 7;
private static final ConstructorCache CONSTRUCTOR_CACHE = new ConstructorCache();
static {
final Map<String, Class> primitives = new HashMap<String, Class>(10, 1.0f);
primitives.put("boolean", boolean.class);
primitives.put("byte", byte.class);
primitives.put("int", int.class);
primitives.put("long", long.class);
primitives.put("short", short.class);
primitives.put("float", float.class);
primitives.put("double", double.class);
primitives.put("char", char.class);
primitives.put("void", void.class);
PRIMITIVE_CLASSES = Collections.unmodifiableMap(primitives);
}
private ClassLoaderUtil() {
}
public static <T> T newInstance(ClassLoader classLoader, final String className)
throws Exception {
classLoader = classLoader == null ? ClassLoaderUtil.class.getClassLoader() : classLoader;
Constructor<T> constructor = CONSTRUCTOR_CACHE.get(classLoader, className);
if (constructor != null) {
return constructor.newInstance();
}
Class<T> klass = (Class<T>) loadClass(classLoader, className);
return (T) newInstance(klass, classLoader, className);
}
public static <T> T newInstance(Class<T> klass, ClassLoader classLoader, String className)
throws Exception {
final Constructor<T> constructor = klass.getDeclaredConstructor();
if (!constructor.isAccessible()) {
constructor.setAccessible(true);
}
CONSTRUCTOR_CACHE.put(classLoader, className, constructor);
return constructor.newInstance();
}
public static Class<?> loadClass(final ClassLoader classLoader, final String className)
throws ClassNotFoundException {
ValidationUtil.isNotNull(className, "className");
if (className.length() <= MAX_PRIM_CLASSNAME_LENGTH && Character.isLowerCase(className.charAt(0))) {
final Class primitiveClass = PRIMITIVE_CLASSES.get(className);
if (primitiveClass != null) {
return primitiveClass;
}
}
ClassLoader theClassLoader = classLoader;
if (theClassLoader == null) {
theClassLoader = Thread.currentThread().getContextClassLoader();
}
// First try to load it through the given classloader
if (theClassLoader != null) {
try {
return tryLoadClass(className, theClassLoader);
} catch (ClassNotFoundException ignore) {
// Reset selected classloader and try with others
theClassLoader = null;
}
}
// If failed and this is a Hazelcast class try again with our classloader
if (className.startsWith(HAZELCAST_BASE_PACKAGE) || className.startsWith(HAZELCAST_ARRAY)) {
theClassLoader = ClassLoaderUtil.class.getClassLoader();
}
if (theClassLoader == null) {
theClassLoader = Thread.currentThread().getContextClassLoader();
}
if (theClassLoader != null) {
return tryLoadClass(className, theClassLoader);
}
return Class.forName(className);
}
private static Class<?> tryLoadClass(String className, ClassLoader classLoader)
throws ClassNotFoundException {
if (className.startsWith("[")) {
return Class.forName(className, false, classLoader);
} else {
return classLoader.loadClass(className);
}
}
public static boolean isInternalType(Class type) {
String name = type.getName();
ClassLoader classLoader = ClassLoaderUtil.class.getClassLoader();
return type.getClassLoader() == classLoader && name.startsWith(HAZELCAST_BASE_PACKAGE);
}
private static final class ConstructorCache {
private final ConcurrentMap<ClassLoader, ConcurrentMap<String, WeakReference<Constructor>>> cache;
private ConstructorCache() {
// Guess 16 classloaders to not waste to much memory (16 is default concurrency level)
cache = new ConcurrentReferenceHashMap<ClassLoader, ConcurrentMap<String, WeakReference<Constructor>>>(16);
}
private <T> Constructor put(ClassLoader classLoader, String className, Constructor<T> constructor) {
ClassLoader cl = classLoader == null ? ClassLoaderUtil.class.getClassLoader() : classLoader;
ConcurrentMap<String, WeakReference<Constructor>> innerCache = cache.get(cl);
if (innerCache == null) {
// Let's guess a start of 100 classes per classloader
innerCache = new ConcurrentHashMap<String, WeakReference<Constructor>>(100);
ConcurrentMap<String, WeakReference<Constructor>> old = cache.putIfAbsent(cl, innerCache);
if (old != null) {
innerCache = old;
}
}
innerCache.put(className, new WeakReference<Constructor>(constructor));
return constructor;
}
public <T> Constructor<T> get(ClassLoader classLoader, String className) {
ConcurrentMap<String, WeakReference<Constructor>> innerCache = cache.get(classLoader);
if (innerCache == null) {
return null;
}
WeakReference<Constructor> reference = innerCache.get(className);
Constructor constructor = reference == null ? null : reference.get();
if (reference != null && constructor == null) {
innerCache.remove(className);
}
return (Constructor<T>) constructor;
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_nio_ClassLoaderUtil.java |
3,489 | public static enum Loading {
LAZY {
@Override
public String toString() {
return LAZY_VALUE;
}
},
EAGER {
@Override
public String toString() {
return EAGER_VALUE;
}
};
public static final String KEY = "loading";
public static final String EAGER_VALUE = "eager";
public static final String LAZY_VALUE = "lazy";
public static Loading parse(String loading, Loading defaultValue) {
if (Strings.isNullOrEmpty(loading)) {
return defaultValue;
} else if (EAGER_VALUE.equalsIgnoreCase(loading)) {
return EAGER;
} else if (LAZY_VALUE.equalsIgnoreCase(loading)) {
return LAZY;
} else {
throw new MapperParsingException("Unknown [" + KEY + "] value: [" + loading + "]");
}
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_FieldMapper.java |
3,697 | public class TypeFieldMapper extends AbstractFieldMapper<String> implements InternalMapper, RootMapper {
public static final String NAME = "_type";
public static final String CONTENT_TYPE = "_type";
public static class Defaults extends AbstractFieldMapper.Defaults {
public static final String NAME = TypeFieldMapper.NAME;
public static final String INDEX_NAME = TypeFieldMapper.NAME;
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.setStored(false);
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
FIELD_TYPE.freeze();
}
}
public static class Builder extends AbstractFieldMapper.Builder<Builder, TypeFieldMapper> {
public Builder() {
super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
indexName = Defaults.INDEX_NAME;
}
@Override
public TypeFieldMapper build(BuilderContext context) {
return new TypeFieldMapper(name, indexName, boost, fieldType, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
TypeFieldMapper.Builder builder = type();
parseField(builder, builder.name, node, parserContext);
return builder;
}
}
public TypeFieldMapper() {
this(Defaults.NAME, Defaults.INDEX_NAME);
}
protected TypeFieldMapper(String name, String indexName) {
this(name, indexName, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null, null, null, ImmutableSettings.EMPTY);
}
public TypeFieldMapper(String name, String indexName, float boost, FieldType fieldType, PostingsFormatProvider postingsProvider,
DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
super(new Names(name, indexName, indexName, name), boost, fieldType, null, Lucene.KEYWORD_ANALYZER,
Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings);
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return new FieldDataType("string");
}
@Override
public boolean hasDocValues() {
return false;
}
@Override
public String value(Object value) {
if (value == null) {
return null;
}
return value.toString();
}
@Override
public Query termQuery(Object value, @Nullable QueryParseContext context) {
return new XConstantScoreQuery(context.cacheFilter(termFilter(value, context), null));
}
@Override
public Filter termFilter(Object value, @Nullable QueryParseContext context) {
if (!fieldType.indexed()) {
return new PrefixFilter(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))));
}
return new TermFilter(names().createIndexNameTerm(BytesRefs.toBytesRef(value)));
}
@Override
public boolean useTermQueryWithQueryString() {
return true;
}
@Override
public void preParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
public void postParse(ParseContext context) throws IOException {
}
@Override
public void parse(ParseContext context) throws IOException {
// we parse in pre parse
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
}
@Override
public boolean includeInObject() {
return false;
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
if (!fieldType.indexed() && !fieldType.stored()) {
return;
}
fields.add(new Field(names.indexName(), context.type(), fieldType));
if (hasDocValues()) {
fields.add(new SortedSetDocValuesField(names.indexName(), new BytesRef(context.type())));
}
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
// if all are defaults, no sense to write it at all
if (!includeDefaults && fieldType.stored() == Defaults.FIELD_TYPE.stored() && fieldType.indexed() == Defaults.FIELD_TYPE.indexed()) {
return builder;
}
builder.startObject(CONTENT_TYPE);
if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
builder.field("store", fieldType.stored());
}
if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
// do nothing here, no merging, but also no exception
}
} | 1no label
| src_main_java_org_elasticsearch_index_mapper_internal_TypeFieldMapper.java |
3,863 | public class HasParentQueryParser implements QueryParser {
public static final String NAME = "has_parent";
@Inject
public HasParentQueryParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query innerQuery = null;
boolean queryFound = false;
float boost = 1.0f;
String parentType = null;
boolean score = false;
String queryName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
// TODO handle `query` element before `type` element...
String[] origTypes = QueryParseContext.setTypesWithPrevious(parentType == null ? null : new String[]{parentType});
try {
innerQuery = parseContext.parseInnerQuery();
queryFound = true;
} finally {
QueryParseContext.setTypes(origTypes);
}
} else {
throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) {
parentType = parser.text();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_parent] query has been removed, use a filter as a facet_filter in the relevant global facet");
} else if ("score_type".equals(currentFieldName) || "scoreType".equals(currentFieldName)) {
String scoreTypeValue = parser.text();
if ("score".equals(scoreTypeValue)) {
score = true;
} else if ("none".equals(scoreTypeValue)) {
score = false;
}
} else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
String scoreModeValue = parser.text();
if ("score".equals(scoreModeValue)) {
score = true;
} else if ("none".equals(scoreModeValue)) {
score = false;
}
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound) {
throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'query' field");
}
if (innerQuery == null) {
return null;
}
if (parentType == null) {
throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'parent_type' field");
}
DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
if (parentDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "[has_parent] query configured 'parent_type' [" + parentType + "] is not a valid type");
}
innerQuery.setBoost(boost);
// wrap the query with type query
innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(parentDocMapper.typeFilter(), null));
Set<String> parentTypes = new HashSet<String>(5);
parentTypes.add(parentType);
for (DocumentMapper documentMapper : parseContext.mapperService()) {
ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
if (parentFieldMapper.active()) {
DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());
if (parentTypeDocumentMapper == null) {
// Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
parentTypes.add(parentFieldMapper.type());
}
}
}
Filter parentFilter;
if (parentTypes.size() == 1) {
DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypes.iterator().next());
parentFilter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
} else {
XBooleanFilter parentsFilter = new XBooleanFilter();
for (String parentTypeStr : parentTypes) {
DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypeStr);
Filter filter = parseContext.cacheFilter(documentMapper.typeFilter(), null);
parentsFilter.add(filter, BooleanClause.Occur.SHOULD);
}
parentFilter = parentsFilter;
}
Filter childrenFilter = parseContext.cacheFilter(new NotFilter(parentFilter), null);
boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
Query query;
if (!deleteByQuery && score) {
query = new ParentQuery(innerQuery, parentType, childrenFilter);
} else {
query = new ParentConstantScoreQuery(innerQuery, parentType, childrenFilter);
if (deleteByQuery) {
query = new XConstantScoreQuery(new DeleteByQueryWrappingFilter(query));
}
}
query.setBoost(boost);
if (queryName != null) {
parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
}
return query;
}
} | 1no label
| src_main_java_org_elasticsearch_index_query_HasParentQueryParser.java |
3,246 | abstract class DoubleValuesComparatorBase<T extends Number> extends NumberComparatorBase<T> {
protected final IndexNumericFieldData<?> indexFieldData;
protected final double missingValue;
protected double bottom;
protected DoubleValues readerValues;
protected final SortMode sortMode;
public DoubleValuesComparatorBase(IndexNumericFieldData<?> indexFieldData, double missingValue, SortMode sortMode) {
this.indexFieldData = indexFieldData;
this.missingValue = missingValue;
this.sortMode = sortMode;
}
@Override
public final int compareBottom(int doc) throws IOException {
final double v2 = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(bottom, v2);
}
@Override
public final int compareDocToValue(int doc, T valueObj) throws IOException {
final double value = valueObj.doubleValue();
final double docValue = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(docValue, value);
}
@Override
public final FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
readerValues = indexFieldData.load(context).getDoubleValues();
return this;
}
@Override
public int compareBottomMissing() {
return compare(bottom, missingValue);
}
static final int compare(double left, double right) {
return Double.compare(left, right);
}
} | 1no label
| src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_DoubleValuesComparatorBase.java |
1,577 | MessageListener messageListener = new MessageListener() {
public void onMessage(Message message) {
totalMessageCount.incrementAndGet();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_jmx_TopicMBean.java |
1,523 | public static class Result {
private final boolean changed;
private final RoutingTable routingTable;
private final AllocationExplanation explanation;
/**
* Creates a new {@link RoutingAllocation.Result}
*
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param routingTable the {@link RoutingTable} this Result references
* @param explanation Explanation of the Result
*/
public Result(boolean changed, RoutingTable routingTable, AllocationExplanation explanation) {
this.changed = changed;
this.routingTable = routingTable;
this.explanation = explanation;
}
/** determine whether the actual {@link RoutingTable} has been changed
* @return <code>true</code> if the {@link RoutingTable} has been changed by allocation. Otherwise <code>false</code>
*/
public boolean changed() {
return this.changed;
}
/**
* Get the {@link RoutingTable} referenced by this result
* @return referenced {@link RoutingTable}
*/
public RoutingTable routingTable() {
return routingTable;
}
/**
* Get the explanation of this result
* @return explanation
*/
public AllocationExplanation explanation() {
return explanation;
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_RoutingAllocation.java |
1,837 | class InjectorBuilder {
private final Stopwatch stopwatch = new Stopwatch();
private final Errors errors = new Errors();
private Stage stage;
private final Initializer initializer = new Initializer();
private final BindingProcessor bindingProcesor;
private final InjectionRequestProcessor injectionRequestProcessor;
private final InjectorShell.Builder shellBuilder = new InjectorShell.Builder();
private List<InjectorShell> shells;
InjectorBuilder() {
injectionRequestProcessor = new InjectionRequestProcessor(errors, initializer);
bindingProcesor = new BindingProcessor(errors, initializer);
}
/**
* Sets the stage for the created injector. If the stage is {@link Stage#PRODUCTION}, this class
* will eagerly load singletons.
*/
InjectorBuilder stage(Stage stage) {
shellBuilder.stage(stage);
this.stage = stage;
return this;
}
/**
* Sets the parent of the injector to-be-constructed. As a side effect, this sets this injector's
* stage to the stage of {@code parent}.
*/
InjectorBuilder parentInjector(InjectorImpl parent) {
shellBuilder.parent(parent);
return stage(parent.getInstance(Stage.class));
}
InjectorBuilder addModules(Iterable<? extends Module> modules) {
shellBuilder.addModules(modules);
return this;
}
Injector build() {
if (shellBuilder == null) {
throw new AssertionError("Already built, builders are not reusable.");
}
// Synchronize while we're building up the bindings and other injector state. This ensures that
// the JIT bindings in the parent injector don't change while we're being built
synchronized (shellBuilder.lock()) {
shells = shellBuilder.build(initializer, bindingProcesor, stopwatch, errors);
stopwatch.resetAndLog("Injector construction");
initializeStatically();
}
// If we're in the tool stage, stop here. Don't eagerly inject or load anything.
if (stage == Stage.TOOL) {
return new ToolStageInjector(primaryInjector());
}
injectDynamically();
return primaryInjector();
}
/**
* Initialize and validate everything.
*/
private void initializeStatically() {
bindingProcesor.initializeBindings();
stopwatch.resetAndLog("Binding initialization");
for (InjectorShell shell : shells) {
shell.getInjector().index();
}
stopwatch.resetAndLog("Binding indexing");
injectionRequestProcessor.process(shells);
stopwatch.resetAndLog("Collecting injection requests");
bindingProcesor.runCreationListeners();
stopwatch.resetAndLog("Binding validation");
injectionRequestProcessor.validate();
stopwatch.resetAndLog("Static validation");
initializer.validateOustandingInjections(errors);
stopwatch.resetAndLog("Instance member validation");
new LookupProcessor(errors).process(shells);
for (InjectorShell shell : shells) {
((DeferredLookups) shell.getInjector().lookups).initialize(errors);
}
stopwatch.resetAndLog("Provider verification");
for (InjectorShell shell : shells) {
if (!shell.getElements().isEmpty()) {
throw new AssertionError("Failed to execute " + shell.getElements());
}
}
errors.throwCreationExceptionIfErrorsExist();
}
/**
* Returns the injector being constructed. This is not necessarily the root injector.
*/
private Injector primaryInjector() {
return shells.get(0).getInjector();
}
/**
* Inject everything that can be injected. This method is intentionally not synchronized. If we
* locked while injecting members (ie. running user code), things would deadlock should the user
* code build a just-in-time binding from another thread.
*/
private void injectDynamically() {
injectionRequestProcessor.injectMembers();
stopwatch.resetAndLog("Static member injection");
initializer.injectAll(errors);
stopwatch.resetAndLog("Instance injection");
errors.throwCreationExceptionIfErrorsExist();
for (InjectorShell shell : shells) {
loadEagerSingletons(shell.getInjector(), stage, errors);
}
stopwatch.resetAndLog("Preloading singletons");
errors.throwCreationExceptionIfErrorsExist();
}
/**
* Loads eager singletons, or all singletons if we're in Stage.PRODUCTION. Bindings discovered
* while we're binding these singletons are not be eager.
*/
public void loadEagerSingletons(InjectorImpl injector, Stage stage, final Errors errors) {
@SuppressWarnings("unchecked") // casting Collection<Binding> to Collection<BindingImpl> is safe
Set<BindingImpl<?>> candidateBindings = ImmutableSet.copyOf(Iterables.concat(
(Collection) injector.state.getExplicitBindingsThisLevel().values(),
injector.jitBindings.values()));
for (final BindingImpl<?> binding : candidateBindings) {
if (binding.getScoping().isEagerSingleton(stage)) {
try {
injector.callInContext(new ContextualCallable<Void>() {
Dependency<?> dependency = Dependency.get(binding.getKey());
public Void call(InternalContext context) {
context.setDependency(dependency);
Errors errorsForBinding = errors.withSource(dependency);
try {
binding.getInternalFactory().get(errorsForBinding, context, dependency);
} catch (ErrorsException e) {
errorsForBinding.merge(e.getErrors());
} finally {
context.setDependency(null);
}
return null;
}
});
} catch (ErrorsException e) {
throw new AssertionError();
}
}
}
}
/**
* {@link Injector} exposed to users in {@link Stage#TOOL}.
*/
static class ToolStageInjector implements Injector {
private final Injector delegateInjector;
ToolStageInjector(Injector delegateInjector) {
this.delegateInjector = delegateInjector;
}
public void injectMembers(Object o) {
throw new UnsupportedOperationException(
"Injector.injectMembers(Object) is not supported in Stage.TOOL");
}
public Map<Key<?>, Binding<?>> getBindings() {
return this.delegateInjector.getBindings();
}
public <T> Binding<T> getBinding(Key<T> key) {
return this.delegateInjector.getBinding(key);
}
public <T> Binding<T> getBinding(Class<T> type) {
return this.delegateInjector.getBinding(type);
}
public <T> List<Binding<T>> findBindingsByType(TypeLiteral<T> type) {
return this.delegateInjector.findBindingsByType(type);
}
public Injector getParent() {
return delegateInjector.getParent();
}
public Injector createChildInjector(Iterable<? extends Module> modules) {
return delegateInjector.createChildInjector(modules);
}
public Injector createChildInjector(Module... modules) {
return delegateInjector.createChildInjector(modules);
}
public <T> Provider<T> getProvider(Key<T> key) {
throw new UnsupportedOperationException(
"Injector.getProvider(Key<T>) is not supported in Stage.TOOL");
}
public <T> Provider<T> getProvider(Class<T> type) {
throw new UnsupportedOperationException(
"Injector.getProvider(Class<T>) is not supported in Stage.TOOL");
}
public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral) {
throw new UnsupportedOperationException(
"Injector.getMembersInjector(TypeLiteral<T>) is not supported in Stage.TOOL");
}
public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
throw new UnsupportedOperationException(
"Injector.getMembersInjector(Class<T>) is not supported in Stage.TOOL");
}
public <T> T getInstance(Key<T> key) {
throw new UnsupportedOperationException(
"Injector.getInstance(Key<T>) is not supported in Stage.TOOL");
}
public <T> T getInstance(Class<T> type) {
throw new UnsupportedOperationException(
"Injector.getInstance(Class<T>) is not supported in Stage.TOOL");
}
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_InjectorBuilder.java |
125 | public interface PageService {
/**
* Returns the page with the passed in id.
*
* @param pageId - The id of the page.
* @return The associated page.
*/
public Page findPageById(Long pageId);
/**
* Returns the page template with the passed in id.
*
* @param id - the id of the page template
* @return The associated page template.
*/
public PageTemplate findPageTemplateById(Long id);
/**
* Saves the given {@link PageTemplate}
*
* @param template the {@link PageTemplate} to save
* @return the database-saved {@link PageTemplate}
*/
public PageTemplate savePageTemplate(PageTemplate template);
/**
* Returns the page-fields associated with the passed in page-id.
* This is preferred over the direct access from Page so that the
* two items can be cached distinctly
*
* @param pageId - The id of the page.
* @return The associated page.
*/
public Map<String,PageField> findPageFieldsByPageId(Long pageId);
/**
* This method is intended to be called from within the CMS
* admin only.
*
* Adds the passed in page to the DB.
*
* Creates a sandbox/site if one doesn't already exist.
*/
public Page addPage(Page page, SandBox destinationSandbox);
/**
* This method is intended to be called from within the CMS
* admin only.
*
* Updates the page according to the following rules:
*
* 1. If sandbox has changed from null to a value
* This means that the user is editing an item in production and
* the edit is taking place in a sandbox.
*
* Clone the page and add it to the new sandbox and set the cloned
* page's originalPageId to the id of the page being updated.
*
* 2. If the sandbox has changed from one value to another
* This means that the user is moving the item from one sandbox
* to another.
*
* Update the siteId for the page to the one associated with the
* new sandbox
*
* 3. If the sandbox has changed from a value to null
* This means that the item is moving from the sandbox to production.
*
* If the page has an originalPageId, then update that page by
* setting it's archived flag to true.
*
* Then, update the siteId of the page being updated to be the
* siteId of the original page.
*
* 4. If the sandbox is the same then just update the page.
*/
public Page updatePage(Page page, SandBox sandbox);
/**
* Looks up the page from the backend datastore. Processes the page's fields to
* fix the URL if the site has overridden the URL for images. If secure is true
* and images are being overridden, the system will use https.
*
* @param currentSandbox - current sandbox
* @param locale - current locale
* @param uri - the URI to return a page for
* @param ruleDTOs - ruleDTOs that are used as the data to process page rules
* @param secure - set to true if current request is over HTTPS
* @return
*/
public PageDTO findPageByURI(SandBox currentSandbox, Locale locale, String uri, Map<String,Object> ruleDTOs, boolean secure);
/**
* If deleting and item where page.originalPageId != null
* then the item is deleted from the database.
*
* If the originalPageId is null, then this method marks
* the items as deleted within the passed in sandbox.
*
* @param page
* @param destinationSandbox
* @return
*/
public void deletePage(Page page, SandBox destinationSandbox);
public List<Page> findPages(SandBox sandBox, Criteria criteria);
/**
* Returns all pages, regardless of any sandbox they are apart of
* @return all {@link Page}s configured in the system
*/
public List<Page> readAllPages();
/**
* Returns all page templates, regardless of any sandbox they are apart of
* @return all {@link PageTemplate}s configured in the system
*/
public List<PageTemplate> readAllPageTemplates();
public Long countPages(SandBox sandBox, Criteria criteria);
/**
* Call to evict both secure and non-secure pages matching
* the passed in key.
*
* @param baseKey
*/
public void removePageFromCache(String baseKey);
public List<ArchivedPagePublisher> getArchivedPageListeners();
public void setArchivedPageListeners(List<ArchivedPagePublisher> archivedPageListeners);
public boolean isAutomaticallyApproveAndPromotePages();
public void setAutomaticallyApproveAndPromotePages(boolean automaticallyApproveAndPromotePages);
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_service_PageService.java |
826 | public class ReduceSearchPhaseException extends SearchPhaseExecutionException {
public ReduceSearchPhaseException(String phaseName, String msg, ShardSearchFailure[] shardFailures) {
super(phaseName, "[reduce] " + msg, shardFailures);
}
public ReduceSearchPhaseException(String phaseName, String msg, Throwable cause, ShardSearchFailure[] shardFailures) {
super(phaseName, "[reduce] " + msg, cause, shardFailures);
}
} | 0true
| src_main_java_org_elasticsearch_action_search_ReduceSearchPhaseException.java |
3,248 | public class AtomicLongPermission extends InstancePermission {
private static final int READ = 0x4;
private static final int MODIFY = 0x8;
private static final int ALL = READ | MODIFY | CREATE | DESTROY;
public AtomicLongPermission(String name, String... actions) {
super(name, actions);
}
@Override
protected int initMask(String[] actions) {
int mask = NONE;
for (String action : actions) {
if (ActionConstants.ACTION_ALL.equals(action)) {
return ALL;
}
if (ActionConstants.ACTION_CREATE.equals(action)) {
mask |= CREATE;
} else if (ActionConstants.ACTION_READ.equals(action)) {
mask |= READ;
} else if (ActionConstants.ACTION_MODIFY.equals(action)) {
mask |= MODIFY;
} else if (ActionConstants.ACTION_DESTROY.equals(action)) {
mask |= DESTROY;
}
}
return mask;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_security_permission_AtomicLongPermission.java |
134 | class InvertBooleanProposal implements ICompletionProposal {
private final InvertBooleanRefactoringAction action;
public InvertBooleanProposal(CeylonEditor editor) {
action = new InvertBooleanRefactoringAction(editor);
}
@Override
public Image getImage() {
return CeylonLabelProvider.COMPOSITE_CHANGE;
}
@Override
public String getDisplayString() {
return "Invert boolean value of '" + action.getValueName() + "'";
}
@Override
public Point getSelection(IDocument doc) {
return null;
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument doc) {
action.run();
}
public static void add(Collection<ICompletionProposal> proposals, CeylonEditor editor) {
InvertBooleanProposal proposal = new InvertBooleanProposal(editor);
if (proposal.action.isEnabled()) {
proposals.add(proposal);
}
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_InvertBooleanProposal.java |
1,697 | runnable = new Runnable() { public void run() { map.replace(null, "value"); } }; | 0true
| hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java |
1,539 | public class ResourceAdapterImpl implements ResourceAdapter, Serializable {
/**
* Identity generator
*/
private static final AtomicInteger ID_GEN = new AtomicInteger();
private static final long serialVersionUID = -1727994229521767306L;
/**
* The hazelcast instance itself
*/
private HazelcastInstance hazelcast;
/**
* The configured hazelcast configuration location
*/
private String configurationLocation;
/**
* Identity
*/
private final transient int id;
public ResourceAdapterImpl() {
id = ID_GEN.incrementAndGet();
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter
* #endpointActivation(javax.resource.spi.endpoint.MessageEndpointFactory, javax.resource.spi.ActivationSpec)
*/
public void endpointActivation(MessageEndpointFactory endpointFactory, ActivationSpec spec)
throws ResourceException {
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter
* #endpointDeactivation(javax.resource.spi.endpoint.MessageEndpointFactory, javax.resource.spi.ActivationSpec)
*/
public void endpointDeactivation(MessageEndpointFactory endpointFactory, ActivationSpec spec) {
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter
* #getXAResources(javax.resource.spi.ActivationSpec[])
*/
public XAResource[] getXAResources(ActivationSpec[] specs) throws ResourceException {
//JBoss is fine with null, weblogic requires an empty array
return new XAResource[0];
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter#start(javax.resource.spi.BootstrapContext)
*/
public void start(BootstrapContext ctx) throws ResourceAdapterInternalException {
// Gets/creates the hazelcast instance
ConfigBuilder config = buildConfiguration();
setHazelcast(Hazelcast.newHazelcastInstance(config.build()));
;
}
/**
* Creates a hazelcast configuration based on the {@link #getConfigLocation()}
*
* @return the created hazelcast configuration
* @throws ResourceAdapterInternalException If there was a problem with the configuration creation
*/
private ConfigBuilder buildConfiguration()
throws ResourceAdapterInternalException {
XmlConfigBuilder config;
if (configurationLocation == null || configurationLocation.length() == 0) {
config = new XmlConfigBuilder();
} else {
try {
config = new XmlConfigBuilder(configurationLocation);
} catch (FileNotFoundException e) {
throw new ResourceAdapterInternalException(e.getMessage(), e);
}
}
return config;
}
/* (non-Javadoc)
* @see javax.resource.spi.ResourceAdapter#stop()
*/
public void stop() {
if (getHazelcast() != null) {
getHazelcast().getLifecycleService().shutdown();
}
}
/**
* Sets the underlying hazelcast instance
*/
private void setHazelcast(HazelcastInstance hazelcast) {
this.hazelcast = hazelcast;
}
/**
* Provides access to the underlying hazelcast instance
*/
HazelcastInstance getHazelcast() {
return hazelcast;
}
/**
* Called by the container
*
* @param configLocation Hazelcast's configuration location
*/
public void setConfigLocation(String configLocation) {
this.configurationLocation = configLocation;
}
/**
* @return The configured hazelcast configuration location via RAR deployment descriptor
*/
public String getConfigLocation() {
return configurationLocation;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + id;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ResourceAdapterImpl other = (ResourceAdapterImpl) obj;
if (id != other.id) {
return false;
}
return true;
}
} | 1no label
| hazelcast-ra_hazelcast-jca_src_main_java_com_hazelcast_jca_ResourceAdapterImpl.java |
386 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientMultiMapLockTest {
static HazelcastInstance server;
static HazelcastInstance client;
@BeforeClass
public static void init() {
server = Hazelcast.newHazelcastInstance();
client = HazelcastClient.newHazelcastClient();
}
@AfterClass
public static void destroy() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testIsLocked_whenNotLocked() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "KeyNotLocked";
final boolean result = mm.isLocked(key);
assertFalse(result);
}
@Test
public void testLock() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key);
assertTrue(mm.isLocked(key));
}
@Test(expected = NullPointerException.class)
public void testLock_whenKeyNull() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
mm.lock(null);
}
@Test(expected = NullPointerException.class)
public void testUnLock_whenNullKey() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
mm.unlock(null);
}
@Test(expected = IllegalMonitorStateException.class)
public void testUnlock_whenNotLocked() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
mm.unlock("NOT_LOCKED");
}
@Test
public void testUnLock() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "key";
mm.lock(key);
mm.unlock(key);
assertFalse(mm.isLocked(key));
}
@Test
public void testUnlock_whenRentrantlyLockedBySelf() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "key";
mm.lock(key);
mm.lock(key);
mm.unlock(key);
assertTrue(mm.isLocked(key));
}
@Test(expected = IllegalMonitorStateException.class)
public void testUnlock_whenLockedByOther() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "key";
mm.lock(key);
UnLockThread t = new UnLockThread(mm, key);
t.start();
assertJoinable(t);
throw t.exception;
}
static class UnLockThread extends Thread{
public Exception exception=null;
public MultiMap mm=null;
public Object key=null;
public UnLockThread(MultiMap mm, Object key){
this.mm = mm;
this.key = key;
}
public void run() {
try{
mm.unlock(key);
}catch (Exception e){
exception = e;
}
}
};
@Test
public void testLock_whenAlreadyLockedBySelf() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key);
mm.lock(key);
assertTrue(mm.isLocked(key));
}
@Test
public void testTryLock() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
Object key = "key";
assertTrue(mm.tryLock(key));
}
@Test(expected = NullPointerException.class)
public void testTryLock_whenNullKey() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
mm.tryLock(null);
}
@Test
public void testTryLock_whenLockedBySelf() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key);
assertTrue(mm.tryLock(key));
}
@Test
public void testTryLock_whenLockedByOther() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key1";
mm.lock(key);
final CountDownLatch tryLockFailed = new CountDownLatch(1);
new Thread() {
public void run() {
if (mm.tryLock(key) == false) {
tryLockFailed.countDown();
}
}
}.start();
assertOpenEventually(tryLockFailed);
}
@Test
public void testTryLockWaitingOnLockedKey_thenKeyUnlockedByOtherThread() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "keyZ";
mm.lock(key);
final CountDownLatch tryLockReturnsTrue = new CountDownLatch(1);
new Thread(){
public void run() {
try {
if(mm.tryLock(key, 10, TimeUnit.SECONDS)){
tryLockReturnsTrue.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
mm.unlock(key);
assertOpenEventually(tryLockReturnsTrue);
assertTrue(mm.isLocked(key));
}
@Test(expected = NullPointerException.class)
public void testForceUnlock_whenKeyNull() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
mm.forceUnlock(null);
}
@Test
public void testForceUnlock_whenKeyLocked() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key);
mm.forceUnlock(key);
assertFalse(mm.isLocked(key));
}
@Test
public void testForceUnlock_whenKeyLockedTwice() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key);
mm.lock(key);
mm.forceUnlock(key);
assertFalse(mm.isLocked(key));
}
@Test
public void testForceUnlock_whenKeyLockedByOther() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "key";
mm.lock(key);
final CountDownLatch forceUnlock = new CountDownLatch(1);
new Thread(){
public void run() {
mm.forceUnlock(key);
forceUnlock.countDown();
}
}.start();
assertOpenEventually(forceUnlock);
assertFalse(mm.isLocked(key));
}
@Test
public void testForceUnlock_whenKeyLockedTwiceByOther() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "key";
mm.lock(key);
mm.lock(key);
final CountDownLatch forceUnlock = new CountDownLatch(1);
new Thread(){
public void run() {
mm.forceUnlock(key);
forceUnlock.countDown();
}
}.start();
assertOpenEventually(forceUnlock);
assertFalse(mm.isLocked(key));
}
@Test
public void testLockTTL() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key, 30, TimeUnit.SECONDS);
assertTrue(mm.isLocked(key));
}
@Test(expected = IllegalArgumentException.class)
public void testLockTTL_whenZeroTimeout() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key, 0, TimeUnit.SECONDS);
}
@Test(expected = IllegalArgumentException.class)
public void testLockTTL_whenNegativeTimeout() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key, -1, TimeUnit.SECONDS);
}
@Test
public void testLockTTL_whenLockedBySelf() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key);
mm.lock(key, 30, TimeUnit.SECONDS);
assertTrue(mm.isLocked(key));
}
@Test
public void testLockTTLExpired() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key, 1, TimeUnit.SECONDS);
sleepSeconds(2);
assertFalse(mm.isLocked(key));
}
@Test
public void testLockTTLExpired_whenLockedBySelf() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key);
mm.lock(key, 1, TimeUnit.SECONDS);
sleepSeconds(2);
assertFalse(mm.isLocked(key));
}
@Test
public void testLockTTLExpires_thenOtherThreadCanObtain() throws Exception {
final MultiMap mm = client.getMultiMap(randomString());
final Object key = "Key";
mm.lock(key, 2, TimeUnit.SECONDS);
final CountDownLatch tryLockSuccess = new CountDownLatch(1);
new Thread() {
public void run() {
try {
if (mm.tryLock(key, 4, TimeUnit.SECONDS)) {
tryLockSuccess.countDown();
}
} catch (InterruptedException e) {
fail(e.getMessage());
}
}
}.start();
assertOpenEventually(tryLockSuccess);
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapLockTest.java |
1,328 | executorService.execute(new Runnable() {
@Override
public void run() {
startLatch.countDown();
assertOpenEventually(sleepLatch);
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
1,017 | public interface Order extends Serializable {
public Long getId();
public void setId(Long id);
/**
* Gets the name of the order, mainly in order to support wishlists.
*
* @return the name of the order
*/
public String getName();
/**
* Sets the name of the order in the context of a wishlist. In this fashion, a {@link Customer} can have multiple
* wishlists like "Christmas" or "Gaming Computer" etc.
*
* @param name
*/
public void setName(String name);
/**
* Gets the auditable associated with this Order instance which tracks changes made to this Order (creation/update)
*
* @return
*/
public Auditable getAuditable();
public void setAuditable(Auditable auditable);
/**
* Returns the subtotal price for the order. The subtotal price is the price of all order items
* with item offers applied. The subtotal does not take into account the order promotions, shipping costs or any
* taxes that apply to this order.
*
* @return the total item price with offers applied
*/
public Money getSubTotal();
/**
* Sets the subtotal price for the order. The subtotal price is the price of all order items
* with item offers applied. The subtotal does not take into account the order offers or any
* taxes that apply to this order.
*
* @param subTotal
*/
public void setSubTotal(Money subTotal);
/**
* Assigns a final price to all the order items
*/
public void assignOrderItemsFinalPrice();
/**
* Returns the sum of the item totals.
*
* @return
*/
public Money calculateSubTotal();
/**
* The grand total of this {@link Order} which includes all shipping costs and taxes, as well as any adjustments from
* promotions.
*
* @return the grand total price of this {@link Order}
*/
public Money getTotal();
/**
* Used in {@link TotalActivity} to set the grand total of this {@link Order}. This includes the prices of all of the
* {@link OrderItem}s as well as any taxes, fees, shipping and adjustments for all 3.
*
* @param orderTotal the total cost of this {@link Order}
*/
public void setTotal(Money orderTotal);
/**
* Convenience method for determining how much is left on the Order based on the payments that have already been
* applied. This takes {@link #getTotal()} and subtracts the sum of all the {@link PaymentInfo}s associated with this
* Order. Note that if an order has been fully paid for, this method will return zero.
*
* @return {@link #getTotal()} minus the {@link PaymentInfo#getAmount()} for each {@link PaymentInfo} on this Order
*/
public Money getRemainingTotal();
/**
* Convenience method for determining how much of the order total has been captured. This takes the {@link PaymentInfo}s
* and checks the {@link org.broadleafcommerce.core.payment.domain.PaymentInfoDetailType} for captured records.
*
* @return
*/
public Money getCapturedTotal();
/**
* Gets the {@link Customer} for this {@link Order}.
*
* @return
*/
public Customer getCustomer();
/**
* Sets the associated {@link Customer} for this Order.
*
* @param customer
*/
public void setCustomer(Customer customer);
/**
* Gets the status of the Order.
*
* @return
*/
public OrderStatus getStatus();
/**
* Sets the status of the Order
*
* @param status
*/
public void setStatus(OrderStatus status);
/**
* Gets all the {@link OrderItem}s included in this {@link Order}
*
* @return
*/
public List<OrderItem> getOrderItems();
public void setOrderItems(List<OrderItem> orderItems);
/**
* Adds an {@link OrderItem} to the list of {@link OrderItem}s already associated with this {@link Order}
*
* @param orderItem the {@link OrderItem} to add to this {@link Order}
*/
public void addOrderItem(OrderItem orderItem);
/**
* Gets the {@link FulfillmentGroup}s associated with this {@link Order}. An {@link Order} can have many
* {@link FulfillmentGroup}s associated with it in order to support multi-address (and multi-type) shipping.
*
* @return the {@link FulfillmentGroup}s associated with this {@link Order}
*/
public List<FulfillmentGroup> getFulfillmentGroups();
public void setFulfillmentGroups(List<FulfillmentGroup> fulfillmentGroups);
/**
* Sets the {@link Offer}s that could potentially apply to this {@link Order}
*
* @param candidateOrderOffers
*/
public void setCandidateOrderOffers(List<CandidateOrderOffer> candidateOrderOffers);
/**
* Gets the {@link Offer}s that could potentially apply to this {@link Order}. Used in the promotion engine.
*
* @return
*/
public List<CandidateOrderOffer> getCandidateOrderOffers();
/**
* Gets the date that this {@link Order} was submitted. Note that if this date is non-null, then the following should
* also be true:
* <ul>
* <li>{@link #getStatus()} should return {@link OrderStatus#SUBMITTED}</li>
* <li>{@link #getOrderNumber()} should return a non-null value</li>
* </ul>
*
* @return
*/
public Date getSubmitDate();
/**
* Set the date that this {@link Order} was submitted. Used in the blCheckoutWorkflow as the last step after everything
* else has been completed (payments charged, integration systems notified, etc).
*
* @param submitDate the date that this {@link Order} was submitted.
*/
public void setSubmitDate(Date submitDate);
/**
* Gets the total tax for this order, which is the sum of the taxes on all fulfillment
* groups. This total is calculated in the TotalActivity stage of the pricing workflow.
*
* @return the total tax for the order
*/
public Money getTotalTax();
/**
* Sets the total tax of this order, which is the sum of the taxes on all fulfillment
* groups. This total should only be set during the TotalActivity stage of the pricing workflow.
*
* @param the total tax for this order
*/
public void setTotalTax(Money totalTax);
/**
* @deprected - use {@link #getTotalFulfillmentCharges()} instead.
*/
public Money getTotalShipping();
/**
* @deprecated - Use {@link #setTotalFulfillmentCharges(Money)} instead.
*
* @param totalShipping
*/
public void setTotalShipping(Money totalShipping);
/**
* Gets the total fulfillment costs that should be charged for this {@link Order}. This value should be equivalent to
* the summation of {@link FulfillmentGroup#getTotal()} for each {@link FulfillmentGroup} associated with this
* {@link Order}
*
* @return the total fulfillment cost of this {@link Order}
*/
public Money getTotalFulfillmentCharges();
/**
* Set the total fulfillment cost of this {@link Order}. Used in the {@link FulfillmentGroupPricingActivity} after the cost
* of each {@link FulfillmentGroup} has been calculated.
*
* @param totalShipping
*/
public void setTotalFulfillmentCharges(Money totalFulfillmentCharges);
/**
* Gets all the {@link PaymentInfo}s associated with this {@link Order}. An {@link Order} can have many
* {@link PaymentInfo}s associated with it to support things like paying with multiple cards or perhaps paying some of
* this {@link Order} with a gift card and some with a credit card.
*
* @return the {@link PaymentInfo}s associated with this {@link Order}.
*/
public List<PaymentInfo> getPaymentInfos();
/**
* Sets the various payment types associated with this {@link Order}
*
* @param paymentInfos
*/
public void setPaymentInfos(List<PaymentInfo> paymentInfos);
/**
* Determines if this {@link Order} has an item in the given category.
*
* @param categoryName the {@link Category#getName} to check
* @return <b>true</b> if at least one {@link OrderItem} is in the given category, <b>false</b> otherwise.
* @see {@link OrderItem#isInCategory(String)}
*/
public boolean hasCategoryItem(String categoryName);
/**
* Returns a unmodifiable List of OrderAdjustment. To modify the List of OrderAdjustment, please
* use the addOrderAdjustments or removeAllOrderAdjustments methods.
*
* @return a unmodifiable List of OrderItemAdjustment
*/
public List<OrderAdjustment> getOrderAdjustments();
/**
* Returns all of the {@link OrderItem}s in this {@link Order} that are an instanceof {@link DiscreteOrderItem}. This
* will also go into each {@link BundleOrderItem} (if there are any) and return all of the
* {@link BundleOrderItem#getDiscreteOrderItems()} from each of those as well.
*
* @return
*/
public List<DiscreteOrderItem> getDiscreteOrderItems();
/**
* Checks the DiscreteOrderItems in the cart and returns whether or not the given SKU was found.
* The equality of the SKUs is based on the .equals() method in SkuImpl. This includes checking the
* {@link DiscreteOrderItem}s from {link {@link BundleOrderItem#getDiscreteOrderItems()}
*
* @param sku The sku to check for
* @return whether or not the given SKU exists in the cart
*/
public boolean containsSku(Sku sku);
public List<OfferCode> getAddedOfferCodes();
public String getFulfillmentStatus();
/**
* The unique number associated with this {@link Order}. Generally preferred to use instead of just using {@link #getId()}
* since that exposes unwanted information about your database.
*
* @return the unique order number for this {@link Order}
*/
public String getOrderNumber();
/**
* Set the unique order number for this {@link Order}
*
* @param orderNumber
*/
public void setOrderNumber(String orderNumber);
public String getEmailAddress();
public void setEmailAddress(String emailAddress);
public Map<Offer, OfferInfo> getAdditionalOfferInformation();
public void setAdditionalOfferInformation(Map<Offer, OfferInfo> additionalOfferInformation);
/**
* Returns the discount value of all the applied item offers for this order. This value is already
* deducted from the order subTotal.
*
* @return the discount value of all the applied item offers for this order
*/
public Money getItemAdjustmentsValue();
/**
* Returns the discount value of all the applied order offers. The value returned from this
* method should be subtracted from the getSubTotal() to get the order price with all item and
* order offers applied.
*
* @return the discount value of all applied order offers.
*/
public Money getOrderAdjustmentsValue();
/**
* Returns the total discount value for all applied item and order offers in the order. The return
* value should not be used with getSubTotal() to calculate the final price, since getSubTotal()
* already takes into account the applied item offers.
*
* @return the total discount of all applied item and order offers
*/
public Money getTotalAdjustmentsValue();
/**
* Updates all of the prices of the {@link OrderItem}s in this {@link Order}
* @return <b>true</b> if at least 1 {@link OrderItem} returned true from {@link OrderItem#updatePrices}, <b>false</b>
* otherwise.
* @see {@link OrderItem#updatePrices()}
*/
public boolean updatePrices();
/**
* Updates the averagePriceField for all order items.
* @return
*/
public boolean finalizeItemPrices();
public Money getFulfillmentGroupAdjustmentsValue();
public void addOfferCode(OfferCode addedOfferCode);
@Deprecated
public void addAddedOfferCode(OfferCode offerCode);
/**
* A list of arbitrary attributes added to this order.
*/
public Map<String,OrderAttribute> getOrderAttributes();
/**
* Sets the map of order attributes.
*
* @param orderAttributes
*/
public void setOrderAttributes(Map<String,OrderAttribute> orderAttributes);
/**
* This method returns the total number of items in this order. It iterates through all of the
* discrete order items and sums up the quantity. This method is useful for display to the customer
* the current number of "physical" items in the cart
*
* @return the number of items in the order
*/
public int getItemCount();
/**
* The currency that the {@link Order} is priced in. Note that this is only on {@link Order} since all of the other
* entities that are related (like {@link FulfillmentGroup} and {@link OrderItem} have a link back to here. This also
* has the side effect that an {@link Order} can only be priced in a single currency.
*
* @return
*/
public BroadleafCurrency getCurrency();
/**
* Set the currency that the {@link Order} is priced in.
*
* @param currency
*/
public void setCurrency(BroadleafCurrency currency);
public Locale getLocale();
public void setLocale(Locale locale);
/**
* Returns true if this item has order adjustments.
* @return
*/
boolean getHasOrderAdjustments();
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_Order.java |
1,287 | public interface ClusterService extends LifecycleComponent<ClusterService> {
/**
* The local node.
*/
DiscoveryNode localNode();
/**
* The current state.
*/
ClusterState state();
/**
* Adds an initial block to be set on the first cluster state created.
*/
void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException;
/**
* Remove an initial block to be set on the first cluster state created.
*/
void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException;
/**
* The operation routing.
*/
OperationRouting operationRouting();
/**
* Adds a priority listener for updated cluster states.
*/
void addFirst(ClusterStateListener listener);
/**
* Adds last listener.
*/
void addLast(ClusterStateListener listener);
/**
* Adds a listener for updated cluster states.
*/
void add(ClusterStateListener listener);
/**
* Removes a listener for updated cluster states.
*/
void remove(ClusterStateListener listener);
/**
* Add a listener for on/off local node master events
*/
void add(LocalNodeMasterListener listener);
/**
* Remove the given listener for on/off local master events
*/
void remove(LocalNodeMasterListener listener);
/**
* Adds a cluster state listener that will timeout after the provided timeout.
*/
void add(TimeValue timeout, TimeoutClusterStateListener listener);
/**
* Submits a task that will update the cluster state.
*/
void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask);
/**
* Submits a task that will update the cluster state (the task has a default priority of {@link Priority#NORMAL}).
*/
void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask);
/**
* Returns the tasks that are pending.
*/
List<PendingClusterTask> pendingTasks();
} | 0true
| src_main_java_org_elasticsearch_cluster_ClusterService.java |
114 | public interface TransactionRecovery {
/**
* Shuts down the transaction recovery process
*
* @throws TitanException
*/
public void shutdown() throws TitanException;
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_log_TransactionRecovery.java |
3,076 | public class DocumentAlreadyExistsException extends EngineException {
public DocumentAlreadyExistsException(ShardId shardId, String type, String id) {
super(shardId, "[" + type + "][" + id + "]: document already exists");
}
@Override
public RestStatus status() {
return RestStatus.CONFLICT;
}
} | 0true
| src_main_java_org_elasticsearch_index_engine_DocumentAlreadyExistsException.java |
1,688 | blobStore.executor().execute(new Runnable() {
@Override
public void run() {
byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
InputStream is = null;
try {
is = new URL(path, blobName).openStream();
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
listener.onPartial(buffer, 0, bytesRead);
}
} catch (Throwable t) {
IOUtils.closeWhileHandlingException(is);
listener.onFailure(t);
return;
}
try {
IOUtils.closeWhileHandlingException(is);
listener.onCompleted();
} catch (Throwable t) {
listener.onFailure(t);
}
}
}); | 0true
| src_main_java_org_elasticsearch_common_blobstore_url_AbstractURLBlobContainer.java |
1,345 | public class OWALRecordsFactory {
private Map<Byte, Class> idToTypeMap = new HashMap<Byte, Class>();
private Map<Class, Byte> typeToIdMap = new HashMap<Class, Byte>();
public static final OWALRecordsFactory INSTANCE = new OWALRecordsFactory();
public byte[] toStream(OWALRecord walRecord) {
int contentSize = walRecord.serializedSize() + 1;
byte[] content = new byte[contentSize];
if (walRecord instanceof OUpdatePageRecord)
content[0] = 0;
else if (walRecord instanceof OFuzzyCheckpointStartRecord)
content[0] = 1;
else if (walRecord instanceof OFuzzyCheckpointEndRecord)
content[0] = 2;
else if (walRecord instanceof ODirtyPagesRecord)
content[0] = 3;
else if (walRecord instanceof OFullCheckpointStartRecord)
content[0] = 4;
else if (walRecord instanceof OCheckpointEndRecord)
content[0] = 5;
else if (walRecord instanceof OAtomicUnitStartRecord)
content[0] = 8;
else if (walRecord instanceof OAtomicUnitEndRecord)
content[0] = 9;
else if (typeToIdMap.containsKey(walRecord.getClass())) {
content[0] = typeToIdMap.get(walRecord.getClass());
} else
throw new IllegalArgumentException(walRecord.getClass().getName() + " class can not be serialized.");
walRecord.toStream(content, 1);
return content;
}
public OWALRecord fromStream(byte[] content) {
OWALRecord walRecord;
switch (content[0]) {
case 0:
walRecord = new OUpdatePageRecord();
break;
case 1:
walRecord = new OFuzzyCheckpointStartRecord();
break;
case 2:
walRecord = new OFuzzyCheckpointEndRecord();
break;
case 3:
walRecord = new ODirtyPagesRecord();
break;
case 4:
walRecord = new OFullCheckpointStartRecord();
break;
case 5:
walRecord = new OCheckpointEndRecord();
break;
case 8:
walRecord = new OAtomicUnitStartRecord();
break;
case 9:
walRecord = new OAtomicUnitEndRecord();
break;
default:
if (idToTypeMap.containsKey(content[0]))
try {
walRecord = (OWALRecord) idToTypeMap.get(content[0]).newInstance();
} catch (InstantiationException e) {
throw new IllegalStateException("Can not deserialize passed in record", e);
} catch (IllegalAccessException e) {
throw new IllegalStateException("Can not deserialize passed in record", e);
}
else
throw new IllegalStateException("Can not deserialize passed in wal record.");
}
walRecord.fromStream(content, 1);
return walRecord;
}
public void registerNewRecord(byte id, Class<? extends OWALRecord> type) {
typeToIdMap.put(type, id);
idToTypeMap.put(id, type);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OWALRecordsFactory.java |
1,208 | public interface PaymentModule {
public PaymentResponseItem authorize(PaymentContext paymentContext) throws PaymentException;
public PaymentResponseItem reverseAuthorize(PaymentContext paymentContext) throws PaymentException;
public PaymentResponseItem debit(PaymentContext paymentContext) throws PaymentException;
public PaymentResponseItem authorizeAndDebit(PaymentContext paymentContext) throws PaymentException;
public PaymentResponseItem credit(PaymentContext paymentContext) throws PaymentException;
public PaymentResponseItem voidPayment(PaymentContext paymentContext) throws PaymentException;
public PaymentResponseItem balance(PaymentContext paymentContext) throws PaymentException;
public PaymentResponseItem partialPayment(PaymentContext paymentContext) throws PaymentException;
public Boolean isValidCandidate(PaymentInfoType paymentType);
public PaymentResponseItem processReverseAuthorize(PaymentContext paymentContext, Money amountToReverseAuthorize, PaymentResponseItem responseItem) throws PaymentException;
public PaymentResponseItem processAuthorize(PaymentContext paymentContext, Money amountToAuthorize, PaymentResponseItem responseItem) throws PaymentException;
public PaymentResponseItem processDebit(PaymentContext paymentContext, Money amountToDebit, PaymentResponseItem responseItem) throws PaymentException;
public PaymentResponseItem processAuthorizeAndDebit(PaymentContext paymentContext, Money amountToDebit, PaymentResponseItem responseItem) throws PaymentException;
public PaymentResponseItem processCredit(PaymentContext paymentContext, Money amountToCredit, PaymentResponseItem responseItem) throws PaymentException;
public PaymentResponseItem processVoidPayment(PaymentContext paymentContext, Money amountToVoid, PaymentResponseItem responseItem) throws PaymentException;
public PaymentResponseItem processBalance(PaymentContext paymentContext, PaymentResponseItem responseItem) throws PaymentException;
public PaymentResponseItem processPartialPayment(PaymentContext paymentContext, Money amountToDebit, PaymentResponseItem responseItem) throws PaymentException;
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_module_PaymentModule.java |
2,350 | public class BoundTransportAddress implements Streamable {
private TransportAddress boundAddress;
private TransportAddress publishAddress;
BoundTransportAddress() {
}
public BoundTransportAddress(TransportAddress boundAddress, TransportAddress publishAddress) {
this.boundAddress = boundAddress;
this.publishAddress = publishAddress;
}
public TransportAddress boundAddress() {
return boundAddress;
}
public TransportAddress publishAddress() {
return publishAddress;
}
public static BoundTransportAddress readBoundTransportAddress(StreamInput in) throws IOException {
BoundTransportAddress addr = new BoundTransportAddress();
addr.readFrom(in);
return addr;
}
@Override
public void readFrom(StreamInput in) throws IOException {
boundAddress = TransportAddressSerializers.addressFromStream(in);
publishAddress = TransportAddressSerializers.addressFromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
TransportAddressSerializers.addressToStream(out, boundAddress);
TransportAddressSerializers.addressToStream(out, publishAddress);
}
@Override
public String toString() {
return "bound_address {" + boundAddress + "}, publish_address {" + publishAddress + "}";
}
} | 0true
| src_main_java_org_elasticsearch_common_transport_BoundTransportAddress.java |
214 | protected abstract class NextSubWordAction extends TextNavigationAction {
protected CeylonWordIterator fIterator= new CeylonWordIterator();
/**
* Creates a new next sub-word action.
*
* @param code Action code for the default operation. Must be an action code from @see org.eclipse.swt.custom.ST.
*/
protected NextSubWordAction(int code) {
super(getSourceViewer().getTextWidget(), code);
}
@Override
public void run() {
// Check whether we are in a java code partition and the preference is enabled
final IPreferenceStore store= getPreferenceStore();
if (!store.getBoolean(SUB_WORD_NAVIGATION)) {
super.run();
return;
}
final ISourceViewer viewer= getSourceViewer();
final IDocument document= viewer.getDocument();
try {
fIterator.setText((CharacterIterator) new DocumentCharacterIterator(document));
int position= widgetOffset2ModelOffset(viewer, viewer.getTextWidget().getCaretOffset());
if (position == -1)
return;
int next= findNextPosition(position);
if (isBlockSelectionModeEnabled() &&
document.getLineOfOffset(next) != document.getLineOfOffset(position)) {
super.run(); // may navigate into virtual white space
} else if (next != BreakIterator.DONE) {
setCaretPosition(next);
getTextWidget().showSelection();
fireSelectionChanged();
}
} catch (BadLocationException x) {
// ignore
}
}
/**
* Finds the next position after the given position.
*
* @param position the current position
* @return the next position
*/
protected int findNextPosition(int position) {
ISourceViewer viewer= getSourceViewer();
int widget= -1;
int next= position;
while (next != BreakIterator.DONE && widget == -1) { // XXX: optimize
next= fIterator.following(next);
if (next != BreakIterator.DONE)
widget= modelOffset2WidgetOffset(viewer, next);
}
IDocument document= viewer.getDocument();
LinkedModeModel model= LinkedModeModel.getModel(document, position);
if (model != null && next != BreakIterator.DONE) {
LinkedPosition linkedPosition=
model.findPosition(new LinkedPosition(document, position, 0));
if (linkedPosition != null) {
int linkedPositionEnd=
linkedPosition.getOffset() + linkedPosition.getLength();
if (position != linkedPositionEnd && linkedPositionEnd < next)
next= linkedPositionEnd;
} else {
LinkedPosition nextLinkedPosition=
model.findPosition(new LinkedPosition(document, next, 0));
if (nextLinkedPosition != null) {
int nextLinkedPositionOffset= nextLinkedPosition.getOffset();
if (position != nextLinkedPositionOffset && nextLinkedPositionOffset < next)
next= nextLinkedPositionOffset;
}
}
}
return next;
}
/**
* Sets the caret position to the sub-word boundary given with <code>position</code>.
*
* @param position Position where the action should move the caret
*/
protected abstract void setCaretPosition(int position);
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_CeylonEditor.java |
1,277 | public class OTxSegment extends OSingleFileSegment {
public static final byte STATUS_FREE = 0;
public static final byte STATUS_COMMITTING = 1;
public static final byte OPERATION_CREATE = 0;
public static final byte OPERATION_DELETE = 1;
public static final byte OPERATION_UPDATE = 2;
private static final int DEF_START_SIZE = 262144;
private static final int OFFSET_TX_ID = 2;
private static final int CLUSTER_OFFSET_SIZE = OClusterPositionFactory.INSTANCE.getSerializedSize();
private static final int OFFSET_RECORD_SIZE = 13 + CLUSTER_OFFSET_SIZE
+ OVersionFactory.instance().getVersionSize();
private static final int OFFSET_RECORD_CONTENT = 17 + CLUSTER_OFFSET_SIZE
+ OVersionFactory.instance().getVersionSize();
private final boolean synchEnabled;
private OSharedResourceAdaptiveExternal lock = new OSharedResourceAdaptiveExternal(
OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(),
0, true);
public OTxSegment(final OStorageLocal iStorage, final OStorageTxConfiguration iConfig) throws IOException {
super(iStorage, iConfig, OGlobalConfiguration.TX_LOG_TYPE.getValueAsString());
synchEnabled = OGlobalConfiguration.TX_LOG_SYNCH.getValueAsBoolean();
}
/**
* Opens the file segment and recovers pending transactions if any
*/
@Override
public boolean open() throws IOException {
lock.acquireExclusiveLock();
try {
// IGNORE IF IT'S SOFTLY CLOSED
super.open();
// CHECK FOR PENDING TRANSACTION ENTRIES TO RECOVER
recoverTransactions();
return true;
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public void create(final int iStartSize) throws IOException {
lock.acquireExclusiveLock();
try {
super.create(iStartSize > -1 ? iStartSize : DEF_START_SIZE);
} finally {
lock.releaseExclusiveLock();
}
}
/**
* Appends a log entry
*/
public void addLog(final byte iOperation, final int iTxId, final int iClusterId, final OClusterPosition iClusterOffset,
final byte iRecordType, final ORecordVersion iRecordVersion, final byte[] iRecordContent, int dataSegmentId)
throws IOException {
final int contentSize = iRecordContent != null ? iRecordContent.length : 0;
final int size = OFFSET_RECORD_CONTENT + contentSize;
lock.acquireExclusiveLock();
try {
long offset = file.allocateSpace(size);
file.writeByte(offset, STATUS_COMMITTING);
offset += OBinaryProtocol.SIZE_BYTE;
file.writeByte(offset, iOperation);
offset += OBinaryProtocol.SIZE_BYTE;
file.writeInt(offset, iTxId);
offset += OBinaryProtocol.SIZE_INT;
file.writeShort(offset, (short) iClusterId);
offset += OBinaryProtocol.SIZE_SHORT;
final byte[] clusterContent = iClusterOffset.toStream();
file.write(offset, clusterContent);
offset += CLUSTER_OFFSET_SIZE;
file.writeByte(offset, iRecordType);
offset += OBinaryProtocol.SIZE_BYTE;
offset += iRecordVersion.getSerializer().writeTo(file, offset, iRecordVersion);
file.writeInt(offset, dataSegmentId);
offset += OBinaryProtocol.SIZE_INT;
file.writeInt(offset, contentSize);
offset += OBinaryProtocol.SIZE_INT;
file.write(offset, iRecordContent);
offset += contentSize;
if (synchEnabled)
file.synch();
} finally {
lock.releaseExclusiveLock();
}
}
/**
* Clears the entire file.
*
* @param iTxId
* The id of transaction
*
* @throws IOException
*/
public void clearLogEntries(final int iTxId) throws IOException {
lock.acquireExclusiveLock();
try {
truncate();
} finally {
lock.releaseExclusiveLock();
}
}
public void rollback(final OTransaction iTx) throws IOException {
lock.acquireExclusiveLock();
try {
recoverTransaction(iTx.getId());
} finally {
lock.releaseExclusiveLock();
}
}
private void recoverTransactions() throws IOException {
if (file.getFilledUpTo() == 0)
return;
OLogManager.instance().debug(this, "Started the recovering of pending transactions after a hard shutdown. Scanning...");
int recoveredTxs = 0;
int recoveredRecords = 0;
int recs;
final Set<Integer> txToRecover = scanForTransactionsToRecover();
for (Integer txId : txToRecover) {
recs = recoverTransaction(txId);
if (recs > 0) {
recoveredTxs++;
recoveredRecords += recs;
}
}
// EMPTY THE FILE
file.shrink(0);
if (recoveredRecords > 0) {
OLogManager.instance().warn(this, "Recovering successfully completed:");
OLogManager.instance().warn(this, "- Recovered Tx.....: " + recoveredTxs);
OLogManager.instance().warn(this, "- Recovered Records: " + recoveredRecords);
} else
OLogManager.instance().debug(this, "Recovering successfully completed: no pending tx records found.");
}
/**
* Scans the segment and returns the set of transactions ids to recover.
*/
private Set<Integer> scanForTransactionsToRecover() throws IOException {
// SCAN ALL THE FILE SEARCHING FOR THE TRANSACTIONS TO RECOVER
final Set<Integer> txToRecover = new HashSet<Integer>();
final Set<Integer> txToNotRecover = new HashSet<Integer>();
// BROWSE ALL THE ENTRIES
for (long offset = 0; eof(offset); offset = nextEntry(offset)) {
// READ STATUS
final byte status = file.readByte(offset);
// READ TX-ID
final int txId = file.readInt(offset + OFFSET_TX_ID);
switch (status) {
case STATUS_FREE:
// NOT RECOVER IT SINCE IF FIND AT LEAST ONE "FREE" STATUS MEANS THAT ALL THE LOGS WAS COMMITTED BUT THE USER DIDN'T
// RECEIVED THE ACK
txToNotRecover.add(txId);
break;
case STATUS_COMMITTING:
// TO RECOVER UNLESS THE REQ/TX IS IN THE MAP txToNotRecover
txToRecover.add(txId);
break;
}
}
if (txToNotRecover.size() > 0)
// FILTER THE TX MAP TO RECOVER BY REMOVING THE TX WITH AT LEAST ONE "FREE" STATUS
txToRecover.removeAll(txToNotRecover);
return txToRecover;
}
/**
* Recover a transaction.
*
* @param iTxId
* @return Number of records recovered
*
* @throws IOException
*/
private int recoverTransaction(final int iTxId) throws IOException {
int recordsRecovered = 0;
final ORecordId rid = new ORecordId();
final List<Long> txRecordPositions = new ArrayList<Long>();
// BROWSE ALL THE ENTRIES
for (long beginEntry = 0; eof(beginEntry); beginEntry = nextEntry(beginEntry)) {
long offset = beginEntry;
final byte status = file.readByte(offset);
offset += OBinaryProtocol.SIZE_BYTE;
if (status != STATUS_FREE) {
// DIRTY TX LOG ENTRY
offset += OBinaryProtocol.SIZE_BYTE;
final int txId = file.readInt(offset);
if (txId == iTxId) {
txRecordPositions.add(beginEntry);
}
}
}
for (int i = txRecordPositions.size() - 1; i >= 0; i--) {
final long beginEntry = txRecordPositions.get(i);
long offset = beginEntry;
final byte status = file.readByte(offset);
offset += OBinaryProtocol.SIZE_BYTE;
// DIRTY TX LOG ENTRY
final byte operation = file.readByte(offset);
offset += OBinaryProtocol.SIZE_BYTE;
// TX ID FOUND
final int txId = file.readInt(offset);
offset += OBinaryProtocol.SIZE_INT;
rid.clusterId = file.readShort(offset);
offset += OBinaryProtocol.SIZE_SHORT;
final byte[] content = new byte[OClusterPositionFactory.INSTANCE.getSerializedSize()];
file.read(offset, content, content.length);
rid.clusterPosition = OClusterPositionFactory.INSTANCE.fromStream(content);
offset += CLUSTER_OFFSET_SIZE;
final byte recordType = file.readByte(offset);
offset += OBinaryProtocol.SIZE_BYTE;
final ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
offset += recordVersion.getSerializer().readFrom(file, offset, recordVersion);
final int dataSegmentId = file.readInt(offset);
offset += OBinaryProtocol.SIZE_INT;
final int recordSize = file.readInt(offset);
offset += OBinaryProtocol.SIZE_INT;
final byte[] buffer;
if (recordSize > 0) {
buffer = new byte[recordSize];
file.read(offset, buffer, recordSize);
offset += recordSize;
} else
buffer = null;
recoverTransactionEntry(status, operation, txId, rid, recordType, recordVersion, buffer, dataSegmentId);
recordsRecovered++;
// CLEAR THE ENTRY BY WRITING '0'
file.writeByte(beginEntry, STATUS_FREE);
}
return recordsRecovered;
}
private void recoverTransactionEntry(final byte iStatus, final byte iOperation, final int iTxId, final ORecordId iRid,
final byte iRecordType, final ORecordVersion iRecordVersion, final byte[] iRecordContent, int dataSegmentId)
throws IOException {
final OCluster cluster = storage.getClusterById(iRid.clusterId);
if (!(cluster instanceof OClusterLocal))
return;
OLogManager.instance().debug(this, "Recovering tx <%d>. Operation <%d> was in status <%d> on record %s size=%d...", iTxId,
iOperation, iStatus, iRid, iRecordContent != null ? iRecordContent.length : 0);
switch (iOperation) {
case OPERATION_CREATE:
// JUST DELETE THE RECORD
storage.deleteRecord(iRid, OVersionFactory.instance().createUntrackedVersion(), 0, null);
break;
case OPERATION_UPDATE:
// REPLACE WITH THE OLD ONE
iRecordVersion.setRollbackMode();
storage.updateRecord(cluster, iRid, iRecordContent, iRecordVersion, iRecordType);
break;
case OPERATION_DELETE:
final ODataLocal dataSegment = (ODataLocal) storage.getDataSegmentById(dataSegmentId);
storage.createRecord(dataSegment, cluster, iRecordContent, iRecordType, iRid, iRecordVersion);
break;
}
}
private boolean eof(final long iOffset) {
return iOffset + OFFSET_RECORD_CONTENT < file.getFilledUpTo();
}
private long nextEntry(final long iOffset) throws IOException {
final int recordSize = file.readInt(iOffset + OFFSET_RECORD_SIZE);
return iOffset + OFFSET_RECORD_CONTENT + recordSize;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OTxSegment.java |
1,538 | public interface UpdateCartServiceExtensionHandler extends ExtensionHandler {
/**
* Throws an exception if cart is invalid.
*
* @param cart
* @param resultHolder
* @return
*/
public ExtensionResultStatusType updateAndValidateCart(Order cart, ExtensionResultHolder resultHolder);
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_service_UpdateCartServiceExtensionHandler.java |
1,672 | public interface BlobStore {
ImmutableBlobContainer immutableBlobContainer(BlobPath path);
void delete(BlobPath path);
void close();
} | 0true
| src_main_java_org_elasticsearch_common_blobstore_BlobStore.java |
1,238 | public class Requests {
/**
* The content type used to generate request builders (query / search).
*/
public static XContentType CONTENT_TYPE = XContentType.SMILE;
/**
* The default content type to use to generate source documents when indexing.
*/
public static XContentType INDEX_CONTENT_TYPE = XContentType.JSON;
public static IndexRequest indexRequest() {
return new IndexRequest();
}
/**
* Create an index request against a specific index. Note the {@link IndexRequest#type(String)} must be
* set as well and optionally the {@link IndexRequest#id(String)}.
*
* @param index The index name to index the request against
* @return The index request
* @see org.elasticsearch.client.Client#index(org.elasticsearch.action.index.IndexRequest)
*/
public static IndexRequest indexRequest(String index) {
return new IndexRequest(index);
}
/**
* Creates a delete request against a specific index. Note the {@link DeleteRequest#type(String)} and
* {@link DeleteRequest#id(String)} must be set.
*
* @param index The index name to delete from
* @return The delete request
* @see org.elasticsearch.client.Client#delete(org.elasticsearch.action.delete.DeleteRequest)
*/
public static DeleteRequest deleteRequest(String index) {
return new DeleteRequest(index);
}
/**
* Creats a new bulk request.
*/
public static BulkRequest bulkRequest() {
return new BulkRequest();
}
/**
* Creates a delete by query request. Note, the query itself must be set either by setting the JSON source
* of the query, or by using a {@link org.elasticsearch.index.query.QueryBuilder} (using {@link org.elasticsearch.index.query.QueryBuilders}).
*
* @param indices The indices the delete by query against. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The delete by query request
* @see org.elasticsearch.client.Client#deleteByQuery(org.elasticsearch.action.deletebyquery.DeleteByQueryRequest)
*/
public static DeleteByQueryRequest deleteByQueryRequest(String... indices) {
return new DeleteByQueryRequest(indices);
}
/**
* Creates a get request to get the JSON source from an index based on a type and id. Note, the
* {@link GetRequest#type(String)} and {@link GetRequest#id(String)} must be set.
*
* @param index The index to get the JSON source from
* @return The get request
* @see org.elasticsearch.client.Client#get(org.elasticsearch.action.get.GetRequest)
*/
public static GetRequest getRequest(String index) {
return new GetRequest(index);
}
/**
* Creates a count request which counts the hits matched against a query. Note, the query itself must be set
* either using the JSON source of the query, or using a {@link org.elasticsearch.index.query.QueryBuilder} (using {@link org.elasticsearch.index.query.QueryBuilders}).
*
* @param indices The indices to count matched documents against a query. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The count request
* @see org.elasticsearch.client.Client#count(org.elasticsearch.action.count.CountRequest)
*/
public static CountRequest countRequest(String... indices) {
return new CountRequest(indices);
}
/**
* More like this request represents a request to search for documents that are "like" the provided (fetched)
* document.
*
* @param index The index to load the document from
* @return The more like this request
* @see org.elasticsearch.client.Client#moreLikeThis(org.elasticsearch.action.mlt.MoreLikeThisRequest)
*/
public static MoreLikeThisRequest moreLikeThisRequest(String index) {
return new MoreLikeThisRequest(index);
}
/**
* Creates a search request against one or more indices. Note, the search source must be set either using the
* actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}.
*
* @param indices The indices to search against. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The search request
* @see org.elasticsearch.client.Client#search(org.elasticsearch.action.search.SearchRequest)
*/
public static SearchRequest searchRequest(String... indices) {
return new SearchRequest(indices);
}
/**
* Creates a search scroll request allowing to continue searching a previous search request.
*
* @param scrollId The scroll id representing the scrollable search
* @return The search scroll request
* @see org.elasticsearch.client.Client#searchScroll(org.elasticsearch.action.search.SearchScrollRequest)
*/
public static SearchScrollRequest searchScrollRequest(String scrollId) {
return new SearchScrollRequest(scrollId);
}
/**
* Creates an indices status request.
*
* @param indices The indices to query status about. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The indices status request
* @see org.elasticsearch.client.IndicesAdminClient#status(org.elasticsearch.action.admin.indices.status.IndicesStatusRequest)
*/
public static IndicesStatusRequest indicesStatusRequest(String... indices) {
return new IndicesStatusRequest(indices);
}
public static IndicesSegmentsRequest indicesSegmentsRequest(String... indices) {
return new IndicesSegmentsRequest(indices);
}
/**
* Creates an indices exists request.
*
* @param indices The indices to check if they exists or not.
* @return The indices exists request
* @see org.elasticsearch.client.IndicesAdminClient#exists(org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest)
*/
public static IndicesExistsRequest indicesExistsRequest(String... indices) {
return new IndicesExistsRequest(indices);
}
/**
* Creates a create index request.
*
* @param index The index to create
* @return The index create request
* @see org.elasticsearch.client.IndicesAdminClient#create(org.elasticsearch.action.admin.indices.create.CreateIndexRequest)
*/
public static CreateIndexRequest createIndexRequest(String index) {
return new CreateIndexRequest(index);
}
/**
* Creates a delete index request.
*
* @param index The index to delete
* @return The delete index request
* @see org.elasticsearch.client.IndicesAdminClient#delete(org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest)
*/
public static DeleteIndexRequest deleteIndexRequest(String index) {
return new DeleteIndexRequest(index);
}
/**
* Creates a close index request.
*
* @param index The index to close
* @return The delete index request
* @see org.elasticsearch.client.IndicesAdminClient#close(org.elasticsearch.action.admin.indices.close.CloseIndexRequest)
*/
public static CloseIndexRequest closeIndexRequest(String index) {
return new CloseIndexRequest(index);
}
/**
* Creates an open index request.
*
* @param index The index to open
* @return The delete index request
* @see org.elasticsearch.client.IndicesAdminClient#open(org.elasticsearch.action.admin.indices.open.OpenIndexRequest)
*/
public static OpenIndexRequest openIndexRequest(String index) {
return new OpenIndexRequest(index);
}
/**
* Create a create mapping request against one or more indices.
*
* @param indices The indices to create mapping. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The create mapping request
* @see org.elasticsearch.client.IndicesAdminClient#putMapping(org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest)
*/
public static PutMappingRequest putMappingRequest(String... indices) {
return new PutMappingRequest(indices);
}
/**
* Deletes mapping (and all its data) from one or more indices.
*
* @param indices The indices the mapping will be deleted from. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The create mapping request
* @see org.elasticsearch.client.IndicesAdminClient#deleteMapping(org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest)
*/
public static DeleteMappingRequest deleteMappingRequest(String... indices) {
return new DeleteMappingRequest(indices);
}
/**
* Creates an index aliases request allowing to add and remove aliases.
*
* @return The index aliases request
*/
public static IndicesAliasesRequest indexAliasesRequest() {
return new IndicesAliasesRequest();
}
/**
* Creates a refresh indices request.
*
* @param indices The indices to refresh. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The refresh request
* @see org.elasticsearch.client.IndicesAdminClient#refresh(org.elasticsearch.action.admin.indices.refresh.RefreshRequest)
*/
public static RefreshRequest refreshRequest(String... indices) {
return new RefreshRequest(indices);
}
/**
* Creates a flush indices request.
*
* @param indices The indices to flush. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The flush request
* @see org.elasticsearch.client.IndicesAdminClient#flush(org.elasticsearch.action.admin.indices.flush.FlushRequest)
*/
public static FlushRequest flushRequest(String... indices) {
return new FlushRequest(indices);
}
/**
* Creates an optimize request.
*
* @param indices The indices to optimize. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The optimize request
* @see org.elasticsearch.client.IndicesAdminClient#optimize(org.elasticsearch.action.admin.indices.optimize.OptimizeRequest)
*/
public static OptimizeRequest optimizeRequest(String... indices) {
return new OptimizeRequest(indices);
}
/**
* Creates a gateway snapshot indices request.
*
* @param indices The indices the gateway snapshot will be performed on. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The gateway snapshot request
* @see org.elasticsearch.client.IndicesAdminClient#gatewaySnapshot(org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest)
* @deprecated Use snapshot/restore API instead
*/
@Deprecated
public static GatewaySnapshotRequest gatewaySnapshotRequest(String... indices) {
return new GatewaySnapshotRequest(indices);
}
/**
* Creates a clean indices cache request.
*
* @param indices The indices to clean their caches. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The request
*/
public static ClearIndicesCacheRequest clearIndicesCacheRequest(String... indices) {
return new ClearIndicesCacheRequest(indices);
}
/**
* A request to update indices settings.
*
* @param indices The indices to update the settings for. Use <tt>null</tt> or <tt>_all</tt> to executed against all indices.
* @return The request
*/
public static UpdateSettingsRequest updateSettingsRequest(String... indices) {
return new UpdateSettingsRequest(indices);
}
/**
* Creates a cluster state request.
*
* @return The cluster state request.
* @see org.elasticsearch.client.ClusterAdminClient#state(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest)
*/
public static ClusterStateRequest clusterStateRequest() {
return new ClusterStateRequest();
}
public static ClusterRerouteRequest clusterRerouteRequest() {
return new ClusterRerouteRequest();
}
public static ClusterUpdateSettingsRequest clusterUpdateSettingsRequest() {
return new ClusterUpdateSettingsRequest();
}
/**
* Creates a cluster health request.
*
* @param indices The indices to provide additional cluster health information for. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The cluster health request
* @see org.elasticsearch.client.ClusterAdminClient#health(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest)
*/
public static ClusterHealthRequest clusterHealthRequest(String... indices) {
return new ClusterHealthRequest(indices);
}
/**
* List all shards for the give search
*/
public static ClusterSearchShardsRequest clusterSearchShardsRequest() {
return new ClusterSearchShardsRequest();
}
/**
* List all shards for the give search
*/
public static ClusterSearchShardsRequest clusterSearchShardsRequest(String... indices) {
return new ClusterSearchShardsRequest(indices);
}
/**
* Creates a nodes info request against all the nodes.
*
* @return The nodes info request
* @see org.elasticsearch.client.ClusterAdminClient#nodesInfo(org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest)
*/
public static NodesInfoRequest nodesInfoRequest() {
return new NodesInfoRequest();
}
/**
* Creates a nodes info request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
*
* @param nodesIds The nodes ids to get the status for
* @return The nodes info request
* @see org.elasticsearch.client.ClusterAdminClient#nodesStats(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest)
*/
public static NodesInfoRequest nodesInfoRequest(String... nodesIds) {
return new NodesInfoRequest(nodesIds);
}
/**
* Creates a nodes stats request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
*
* @param nodesIds The nodes ids to get the stats for
* @return The nodes info request
* @see org.elasticsearch.client.ClusterAdminClient#nodesStats(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest)
*/
public static NodesStatsRequest nodesStatsRequest(String... nodesIds) {
return new NodesStatsRequest(nodesIds);
}
/**
* Creates a cluster stats request.
*
* @return The cluster stats request
* @see org.elasticsearch.client.ClusterAdminClient#clusterStats(org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest)
*/
public static ClusterStatsRequest clusterStatsRequest() {
return new ClusterStatsRequest();
}
/**
* Shuts down all nodes in the cluster.
*/
public static NodesShutdownRequest nodesShutdownRequest() {
return new NodesShutdownRequest();
}
/**
* Shuts down the specified nodes in the cluster.
*
* @param nodesIds The nodes ids to get the status for
* @return The nodes info request
* @see org.elasticsearch.client.ClusterAdminClient#nodesShutdown(org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest)
*/
public static NodesShutdownRequest nodesShutdownRequest(String... nodesIds) {
return new NodesShutdownRequest(nodesIds);
}
/**
* Restarts all nodes in the cluster.
*/
public static NodesRestartRequest nodesRestartRequest() {
return new NodesRestartRequest();
}
/**
* Restarts specific nodes in the cluster.
*
* @param nodesIds The nodes ids to restart
* @return The nodes info request
* @see org.elasticsearch.client.ClusterAdminClient#nodesRestart(org.elasticsearch.action.admin.cluster.node.restart.NodesRestartRequest)
*/
public static NodesRestartRequest nodesRestartRequest(String... nodesIds) {
return new NodesRestartRequest(nodesIds);
}
/**
* Registers snapshot repository
*
* @param name repository name
* @return repository registration request
*/
public static PutRepositoryRequest putRepositoryRequest(String name) {
return new PutRepositoryRequest(name);
}
/**
* Gets snapshot repository
*
* @param repositories names of repositories
* @return get repository request
*/
public static GetRepositoriesRequest getRepositoryRequest(String... repositories) {
return new GetRepositoriesRequest(repositories);
}
/**
* Deletes registration for snapshot repository
*
* @param name repository name
* @return delete repository request
*/
public static DeleteRepositoryRequest deleteRepositoryRequest(String name) {
return new DeleteRepositoryRequest(name);
}
/**
* Creates new snapshot
*
* @param repository repository name
* @param snapshot snapshot name
* @return create snapshot request
*/
public static CreateSnapshotRequest createSnapshotRequest(String repository, String snapshot) {
return new CreateSnapshotRequest(repository, snapshot);
}
/**
* Gets snapshots from repository
*
* @param repository repository name
* @return get snapshot request
*/
public static GetSnapshotsRequest getSnapshotsRequest(String repository) {
return new GetSnapshotsRequest(repository);
}
/**
* Restores new snapshot
*
* @param repository repository name
* @param snapshot snapshot name
* @return snapshot creation request
*/
public static RestoreSnapshotRequest restoreSnapshotRequest(String repository, String snapshot) {
return new RestoreSnapshotRequest(repository, snapshot);
}
/**
* Restores new snapshot
*
* @param snapshot snapshot name
* @param repository repository name
* @return delete snapshot request
*/
public static DeleteSnapshotRequest deleteSnapshotRequest(String repository, String snapshot) {
return new DeleteSnapshotRequest(repository, snapshot);
}
} | 0true
| src_main_java_org_elasticsearch_client_Requests.java |
202 | public class ONetworkConnectionPool<CH extends OChannel> implements OResourcePoolListener<String, CH> {
private static final int DEF_WAIT_TIMEOUT = 5000;
private final Map<String, OResourcePool<String, CH>> pools = new HashMap<String, OResourcePool<String, CH>>();
private int maxSize;
private int timeout = DEF_WAIT_TIMEOUT;
public ONetworkConnectionPool(final int iMinSize, final int iMaxSize) {
this(iMinSize, iMaxSize, DEF_WAIT_TIMEOUT);
}
public ONetworkConnectionPool(final int iMinSize, final int iMaxSize, final int iTimeout) {
maxSize = iMaxSize;
timeout = iTimeout;
}
public CH createNewResource(String iKey, Object... iAdditionalArgs) {
return null;
}
public CH acquire(final String iRemoteAddress) throws OLockException {
OResourcePool<String, CH> pool = pools.get(iRemoteAddress);
if (pool == null) {
synchronized (pools) {
pool = pools.get(iRemoteAddress);
if (pool == null) {
pool = new OResourcePool<String, CH>(maxSize, this);
pools.put(iRemoteAddress, pool);
}
}
}
return pool.getResource(iRemoteAddress, timeout);
}
public void release(final CH iChannel) {
final String address = iChannel.socket.getInetAddress().toString();
final OResourcePool<String, CH> pool = pools.get(address);
if (pool == null)
throw new OLockException("Cannot release a network channel not acquired before. Remote address: " + address);
pool.returnResource(iChannel);
}
public boolean reuseResource(final String iKey, final Object[] iAdditionalArgs, final CH iValue) {
return true;
}
public Map<String, OResourcePool<String, CH>> getPools() {
return pools;
}
/**
* Closes all the channels.
*/
public void close() {
for (Entry<String, OResourcePool<String, CH>> pool : pools.entrySet()) {
for (CH channel : pool.getValue().getResources()) {
channel.close();
}
}
}
} | 0true
| client_src_main_java_com_orientechnologies_orient_client_remote_ONetworkConnectionPool.java |
783 | @Repository("blCustomerOfferDao")
public class CustomerOfferDaoImpl implements CustomerOfferDao {
@PersistenceContext(unitName="blPU")
protected EntityManager em;
@Resource(name="blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
public CustomerOffer create() {
return ((CustomerOffer) entityConfiguration.createEntityInstance(CustomerOffer.class.getName()));
}
public void delete(CustomerOffer customerOffer) {
if (!em.contains(customerOffer)) {
customerOffer = readCustomerOfferById(customerOffer.getId());
}
em.remove(customerOffer);
}
public CustomerOffer save(final CustomerOffer customerOffer) {
return em.merge(customerOffer);
}
public CustomerOffer readCustomerOfferById(final Long customerOfferId) {
return (CustomerOffer) em.find(CustomerOfferImpl.class, customerOfferId);
}
@SuppressWarnings("unchecked")
public List<CustomerOffer> readCustomerOffersByCustomer(final Customer customer) {
final Query query = em.createNamedQuery("BC_READ_CUSTOMER_OFFER_BY_CUSTOMER_ID");
query.setParameter("customerId", customer.getId());
query.setHint(QueryHints.HINT_CACHEABLE, true);
query.setHint(QueryHints.HINT_CACHE_REGION, "query.Catalog");
return query.getResultList();
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_dao_CustomerOfferDaoImpl.java |
1,151 | public class ScrollSearchBenchmark {
// Run with: -Xms1G -Xms1G
public static void main(String[] args) {
String indexName = "test";
String typeName = "type";
String clusterName = ScrollSearchBenchmark.class.getSimpleName();
long numDocs = SizeValue.parseSizeValue("300k").singles();
int requestSize = 50;
Settings settings = settingsBuilder()
.put(SETTING_NUMBER_OF_SHARDS, 3)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
Node[] nodes = new Node[3];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = nodeBuilder()
.clusterName(clusterName)
.settings(settingsBuilder().put(settings).put("name", "node" + i))
.node();
}
Client client = nodes[0].client();
try {
client.admin().indices().prepareCreate(indexName).get();
for (int counter = 1; counter <= numDocs;) {
BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
for (int bulkCounter = 0; bulkCounter < 100; bulkCounter++) {
if (counter > numDocs) {
break;
}
bulkRequestBuilder.add(
client.prepareIndex(indexName, typeName, String.valueOf(counter))
.setSource("field1", counter++)
);
}
int indexedDocs = counter - 1;
if (indexedDocs % 100000 == 0) {
System.out.printf("--> Indexed %d so far\n", indexedDocs);
}
bulkRequestBuilder.get();
}
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh(indexName).get();
System.out.printf("--> Number of docs in index: %d\n", client.prepareCount().get().getCount());
Long counter = numDocs;
SearchResponse searchResponse = client.prepareSearch(indexName)
.addSort("field1", SortOrder.DESC)
.setSize(requestSize)
.setScroll("10m").get();
if (searchResponse.getHits().getTotalHits() != numDocs) {
System.err.printf("Expected total hits [%d] but got [%d]\n", numDocs, searchResponse.getHits().getTotalHits());
}
if (searchResponse.getHits().hits().length != requestSize) {
System.err.printf("Expected hits length [%d] but got [%d]\n", requestSize, searchResponse.getHits().hits().length);
}
for (SearchHit hit : searchResponse.getHits()) {
if (!hit.sortValues()[0].equals(counter--)) {
System.err.printf("Expected sort value [%d] but got [%s]\n", counter + 1, hit.sortValues()[0]);
}
}
String scrollId = searchResponse.getScrollId();
int scrollRequestCounter = 0;
long sumTimeSpent = 0;
while (true) {
long timeSpent = System.currentTimeMillis();
searchResponse = client.prepareSearchScroll(scrollId).setScroll("10m").get();
sumTimeSpent += (System.currentTimeMillis() - timeSpent);
scrollRequestCounter++;
if (searchResponse.getHits().getTotalHits() != numDocs) {
System.err.printf("Expected total hits [%d] but got [%d]\n", numDocs, searchResponse.getHits().getTotalHits());
}
if (scrollRequestCounter % 20 == 0) {
long avgTimeSpent = sumTimeSpent / 20;
JvmStats.Mem mem = JvmStats.jvmStats().mem();
System.out.printf("Cursor location=%d, avg time spent=%d ms\n", (requestSize * scrollRequestCounter), (avgTimeSpent));
System.out.printf("heap max=%s, used=%s, percentage=%d\n", mem.getHeapMax(), mem.getHeapUsed(), mem.getHeapUsedPrecent());
sumTimeSpent = 0;
}
if (searchResponse.getHits().hits().length == 0) {
break;
}
if (searchResponse.getHits().hits().length != requestSize) {
System.err.printf("Expected hits length [%d] but got [%d]\n", requestSize, searchResponse.getHits().hits().length);
}
for (SearchHit hit : searchResponse.getHits()) {
if (!hit.sortValues()[0].equals(counter--)) {
System.err.printf("Expected sort value [%d] but got [%s]\n", counter + 1, hit.sortValues()[0]);
}
}
scrollId = searchResponse.getScrollId();
}
if (counter != 0) {
System.err.printf("Counter should be 0 because scroll has been consumed\n");
}
for (Node node : nodes) {
node.close();
}
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_search_scroll_ScrollSearchBenchmark.java |
829 | getDatabase().getStorage().callInLock(new Callable<Object>() {
@Override
public Object call() throws Exception {
reload(null);
return null;
}
}, true); | 0true
| core_src_main_java_com_orientechnologies_orient_core_metadata_schema_OSchemaShared.java |
583 | public class PaymentHostException extends PaymentException {
private static final long serialVersionUID = 1L;
public PaymentHostException() {
super();
}
public PaymentHostException(String message, Throwable cause) {
super(message, cause);
}
public PaymentHostException(String message) {
super(message);
}
public PaymentHostException(Throwable cause) {
super(cause);
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_vendor_service_exception_PaymentHostException.java |
3,155 | public abstract class AbstractStringFieldDataTests extends AbstractFieldDataImplTests {
protected void fillSingleValueAllSet() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(new StringField("value", "2", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(new StringField("value", "1", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(new StringField("value", "3", Field.Store.NO));
writer.addDocument(d);
}
protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(new StringField("value", "2", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
d.add(new StringField("value", "4", Field.Store.NO));
writer.addDocument(d);
writer.commit();
writer.deleteDocuments(new Term("_id", "1"));
}
protected void fillSingleValueWithMissing() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(new StringField("value", "2", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
//d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(new StringField("value", "3", Field.Store.NO));
writer.addDocument(d);
}
protected void fillMultiValueAllSet() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(new StringField("value", "2", Field.Store.NO));
d.add(new StringField("value", "4", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
d.add(new StringField("value", "1", Field.Store.NO));
writer.addDocument(d);
writer.commit(); // TODO: Have tests with more docs for sorting
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(new StringField("value", "3", Field.Store.NO));
writer.addDocument(d);
}
protected void fillMultiValueWithMissing() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(new StringField("value", "2", Field.Store.NO));
d.add(new StringField("value", "4", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
//d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(new StringField("value", "3", Field.Store.NO));
writer.addDocument(d);
}
protected void fillAllMissing() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
writer.addDocument(d);
}
protected void fillExtendedMvSet() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(new StringField("value", "02", Field.Store.NO));
d.add(new StringField("value", "04", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(new StringField("value", "03", Field.Store.NO));
writer.addDocument(d);
writer.commit();
d = new Document();
d.add(new StringField("_id", "4", Field.Store.NO));
d.add(new StringField("value", "04", Field.Store.NO));
d.add(new StringField("value", "05", Field.Store.NO));
d.add(new StringField("value", "06", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "5", Field.Store.NO));
d.add(new StringField("value", "06", Field.Store.NO));
d.add(new StringField("value", "07", Field.Store.NO));
d.add(new StringField("value", "08", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "6", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "7", Field.Store.NO));
d.add(new StringField("value", "08", Field.Store.NO));
d.add(new StringField("value", "09", Field.Store.NO));
d.add(new StringField("value", "10", Field.Store.NO));
writer.addDocument(d);
writer.commit();
d = new Document();
d.add(new StringField("_id", "8", Field.Store.NO));
d.add(new StringField("value", "!08", Field.Store.NO));
d.add(new StringField("value", "!09", Field.Store.NO));
d.add(new StringField("value", "!10", Field.Store.NO));
writer.addDocument(d);
}
@Repeat(iterations=10)
public void testActualMissingValue() throws IOException {
testActualMissingValue(false);
}
@Repeat(iterations=10)
public void testActualMissingValueReverse() throws IOException {
testActualMissingValue(true);
}
public void testActualMissingValue(boolean reverse) throws IOException {
// missing value is set to an actual value
Document d = new Document();
final StringField s = new StringField("value", "", Field.Store.YES);
d.add(s);
final String[] values = new String[randomIntBetween(2, 30)];
for (int i = 1; i < values.length; ++i) {
values[i] = _TestUtil.randomUnicodeString(getRandom());
}
final int numDocs = atLeast(100);
for (int i = 0; i < numDocs; ++i) {
final String value = RandomPicks.randomFrom(getRandom(), values);
if (value == null) {
writer.addDocument(new Document());
} else {
s.setStringValue(value);
writer.addDocument(d);
}
if (randomInt(10) == 0) {
writer.commit();
}
}
final IndexFieldData indexFieldData = getForField("value");
final String missingValue = values[1];
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
XFieldComparatorSource comparator = indexFieldData.comparatorSource(missingValue, SortMode.MIN);
TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
assertEquals(numDocs, topDocs.totalHits);
BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
final BytesRef value = new BytesRef(docValue == null ? missingValue : docValue);
if (reverse) {
assertTrue(previousValue.compareTo(value) >= 0);
} else {
assertTrue(previousValue.compareTo(value) <= 0);
}
previousValue = value;
}
searcher.getIndexReader().close();
}
@Repeat(iterations=3)
public void testSortMissingFirst() throws IOException {
testSortMissing(true, false);
}
@Repeat(iterations=3)
public void testSortMissingFirstReverse() throws IOException {
testSortMissing(true, true);
}
@Repeat(iterations=3)
public void testSortMissingLast() throws IOException {
testSortMissing(false, false);
}
@Repeat(iterations=3)
public void testSortMissingLastReverse() throws IOException {
testSortMissing(false, true);
}
public void testSortMissing(boolean first, boolean reverse) throws IOException {
Document d = new Document();
final StringField s = new StringField("value", "", Field.Store.YES);
d.add(s);
final String[] values = new String[randomIntBetween(2, 10)];
for (int i = 1; i < values.length; ++i) {
values[i] = _TestUtil.randomUnicodeString(getRandom());
}
final int numDocs = atLeast(100);
for (int i = 0; i < numDocs; ++i) {
final String value = RandomPicks.randomFrom(getRandom(), values);
if (value == null) {
writer.addDocument(new Document());
} else {
s.setStringValue(value);
writer.addDocument(d);
}
if (randomInt(10) == 0) {
writer.commit();
}
}
final IndexFieldData indexFieldData = getForField("value");
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
XFieldComparatorSource comparator = indexFieldData.comparatorSource(first ? "_first" : "_last", SortMode.MIN);
TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
assertEquals(numDocs, topDocs.totalHits);
BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
if (first && docValue == null) {
assertNull(previousValue);
} else if (!first && docValue != null) {
assertNotNull(previousValue);
}
final BytesRef value = docValue == null ? null : new BytesRef(docValue);
if (previousValue != null && value != null) {
if (reverse) {
assertTrue(previousValue.compareTo(value) >= 0);
} else {
assertTrue(previousValue.compareTo(value) <= 0);
}
}
previousValue = value;
}
searcher.getIndexReader().close();
}
@Repeat(iterations=3)
public void testNestedSortingMin() throws IOException {
testNestedSorting(SortMode.MIN);
}
@Repeat(iterations=3)
public void testNestedSortingMax() throws IOException {
testNestedSorting(SortMode.MAX);
}
public void testNestedSorting(SortMode sortMode) throws IOException {
final String[] values = new String[randomIntBetween(2, 20)];
for (int i = 0; i < values.length; ++i) {
values[i] = _TestUtil.randomSimpleString(getRandom());
}
final int numParents = atLeast(100);
List<Document> docs = new ArrayList<Document>();
final OpenBitSet parents = new OpenBitSet();
for (int i = 0; i < numParents; ++i) {
docs.clear();
final int numChildren = randomInt(4);
for (int j = 0; j < numChildren; ++j) {
final Document child = new Document();
final int numValues = randomInt(3);
for (int k = 0; k < numValues; ++k) {
final String value = RandomPicks.randomFrom(getRandom(), values);
child.add(new StringField("text", value, Store.YES));
}
docs.add(child);
}
final Document parent = new Document();
parent.add(new StringField("type", "parent", Store.YES));
final String value = RandomPicks.randomFrom(getRandom(), values);
if (value != null) {
parent.add(new StringField("text", value, Store.YES));
}
docs.add(parent);
parents.set(parents.prevSetBit(parents.length() - 1) + docs.size());
writer.addDocuments(docs);
if (randomInt(10) == 0) {
writer.commit();
}
}
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
IndexFieldData<?> fieldData = getForField("text");
final BytesRef missingValue;
switch (randomInt(4)) {
case 0:
missingValue = new BytesRef();
break;
case 1:
missingValue = BytesRefFieldComparatorSource.MAX_TERM;
break;
case 2:
missingValue = new BytesRef(RandomPicks.randomFrom(getRandom(), values));
break;
default:
missingValue = new BytesRef(_TestUtil.randomSimpleString(getRandom()));
break;
}
BytesRefFieldComparatorSource innerSource = new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode);
Filter parentFilter = new TermFilter(new Term("type", "parent"));
Filter childFilter = new NotFilter(parentFilter);
NestedFieldComparatorSource nestedComparatorSource = new NestedFieldComparatorSource(sortMode, innerSource, parentFilter, childFilter);
ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new XFilteredQuery(new MatchAllDocsQuery(), childFilter), new FixedBitSetCachingWrapperFilter(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("text", nestedComparatorSource));
TopFieldDocs topDocs = searcher.search(query, randomIntBetween(1, numParents), sort);
assertTrue(topDocs.scoreDocs.length > 0);
BytesRef previous = null;
for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
final int docID = topDocs.scoreDocs[i].doc;
assertTrue("expected " + docID + " to be a parent", parents.get(docID));
BytesRef cmpValue = null;
for (int child = parents.prevSetBit(docID - 1) + 1; child < docID; ++child) {
String[] vals = searcher.doc(child).getValues("text");
if (vals.length == 0) {
vals = new String[] {missingValue.utf8ToString()};
}
for (String value : vals) {
final BytesRef bytesValue = new BytesRef(value);
if (cmpValue == null) {
cmpValue = bytesValue;
} else if (sortMode == SortMode.MIN && bytesValue.compareTo(cmpValue) < 0) {
cmpValue = bytesValue;
} else if (sortMode == SortMode.MAX && bytesValue.compareTo(cmpValue) > 0) {
cmpValue = bytesValue;
}
}
}
if (cmpValue == null) {
cmpValue = missingValue;
}
if (previous != null) {
assertNotNull(cmpValue);
assertTrue(previous.utf8ToString() + " / " + cmpValue.utf8ToString(), previous.compareTo(cmpValue) <= 0);
}
previous = cmpValue;
}
searcher.getIndexReader().close();
}
} | 0true
| src_test_java_org_elasticsearch_index_fielddata_AbstractStringFieldDataTests.java |
567 | public class JPAConfigurationTask extends ConfigurationTask {
private String persistenceUnit;
private String dialect;
private String url;
private String userName;
private String password;
private String driverClassName;
public JPAConfigurationTask() {
setDescription("JPA Configuration");
}
protected Configuration createConfiguration(MergeFileSystemAndClassPathXMLApplicationContext mergeContext) {
try {
PersistenceUnitInfo unitInfo = ((MergePersistenceUnitManager) mergeContext.getBean("blPersistenceUnitManager")).obtainPersistenceUnitInfo(persistenceUnit);
Map<Object, Object> overrides = new HashMap<Object, Object>();
Properties p = getProperties();
if(p!=null) {
overrides.putAll( p );
}
overrides.put("hibernate.dialect",dialect);
if (this.url != null && ! "null".equalsIgnoreCase(this.url)) {
overrides.put("hibernate.connection.url", this.url);
}
if (this.userName != null && ! "null".equalsIgnoreCase(this.userName)) {
overrides.put("hibernate.connection.username", this.userName);
if (this.password == null) {
//This is for situations like HSQLDB that, by default, use no password
overrides.put("hibernate.connection.password", "");
} else if (! "null".equalsIgnoreCase(this.password)) {
//This allows you to specify a password or the word "null" to not set this property at all
overrides.put("hibernate.connection.password", this.password);
}
}
if (this.driverClassName != null && ! "null".equalsIgnoreCase(this.driverClassName)) {
overrides.put("hibernate.connection.driver_class", this.driverClassName);
}
Class<?> clazz = ReflectHelper.classForName("org.hibernate.ejb.Ejb3Configuration", JPAConfigurationTask.class);
Object ejb3cfg = clazz.newInstance();
if(entityResolver!=null) {
Class<?> resolver = ReflectHelper.classForName(entityResolver, this.getClass());
Object object = resolver.newInstance();
Method method = clazz.getMethod("setEntityResolver", new Class[] { EntityResolver.class });
method.invoke(ejb3cfg, new Object[] { object } );
}
Method method = clazz.getMethod("configure", new Class[] { PersistenceUnitInfo.class, Map.class });
if ( method.invoke(ejb3cfg, new Object[] { unitInfo, overrides } ) == null ) {
throw new BuildException("Persistence unit not found: '" + persistenceUnit + "'.");
}
method = clazz.getMethod("getHibernateConfiguration", new Class[0]);
return (Configuration) method.invoke(ejb3cfg, (Object[])null);
}
catch(HibernateException he) {
throw new BuildException(he);
}
catch(BuildException be) {
throw be;
}
catch(Exception t) {
throw new BuildException("Problems in creating a configuration for JPA. Have you remembered to add hibernate EntityManager jars to the classpath ?",t);
}
}
protected void doConfiguration(Configuration configuration) {
}
protected void validateParameters() throws BuildException {
}
public String getPersistenceUnit() {
return persistenceUnit;
}
public void setPersistenceUnit(String persistenceUnit) {
this.persistenceUnit = persistenceUnit;
}
public String getDialect() {
return dialect;
}
public void setDialect(String dialect) {
this.dialect = dialect;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getDriverClassName() {
return driverClassName;
}
public void setDriverClassName(String driverClassName) {
this.driverClassName = driverClassName;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_util_sql_JPAConfigurationTask.java |
1,510 | public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(FailedShardsRoutingTests.class);
@Test
public void testFailedShardPrimaryRelocatingToAndFrom() {
AllocationService allocation = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("--> building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting primaries
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting replicas
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
logger.info("--> verifying all is allocated");
assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
logger.info("--> adding additional node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
).build();
rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
logger.info("--> moving primary shard to node3");
rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
);
assertThat(rerouteResult.changed(), equalTo(true));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
logger.info("--> fail primary shard recovering instance on node3 being initialized");
rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node("node3").get(0)));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
logger.info("--> moving primary shard to node3");
rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
);
assertThat(rerouteResult.changed(), equalTo(true));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
logger.info("--> fail primary shard recovering instance on node1 being relocated");
rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node(origPrimaryNodeId).get(0)));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// check promotion of replica to primary
assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(origReplicaNodeId));
assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(origPrimaryNodeId), equalTo("node3")));
}
@Test
public void failPrimaryStartedCheckReplicaElected() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("Start the shards (primaries)");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Start the shards (backups)");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("fail the primary shard, will have no place to be rerouted to (single node), so stays unassigned");
ShardRouting shardToFail = new ImmutableShardRouting(routingTable.index("test").shard(0).primaryShard());
prevRoutingTable = routingTable;
routingTable = strategy.applyFailedShard(clusterState, shardToFail).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), not(equalTo(shardToFail.currentNodeId())));
assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
logger.info("fail the shard again, check that nothing happens");
assertThat(strategy.applyFailedShard(clusterState, shardToFail).changed(), equalTo(false));
}
@Test
public void firstAllocationFailureSingleNode() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("Adding single node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("fail the first shard, will have no place to be rerouted to (single node), so stays unassigned");
prevRoutingTable = routingTable;
routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
RoutingNodes routingNodes = clusterState.routingNodes();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), nullValue());
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("fail the shard again, see that nothing happens");
assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).changed(), equalTo(false));
}
@Test
public void firstAllocationFailureTwoNodes() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("fail the first shard, will start INITIALIZING on the second node");
prevRoutingTable = routingTable;
routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
RoutingNodes routingNodes = clusterState.routingNodes();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), not(equalTo(nodeHoldingPrimary)));
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("fail the shard again, see that nothing happens");
assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).changed(), equalTo(false));
}
@Test
public void rebalanceFailure() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("Start the shards (primaries)");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(2));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Start the shards (backups)");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(2));
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
}
logger.info("Adding third node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
logger.info("Fail the shards on node 3");
ShardRouting shardToFail = routingNodes.node("node3").get(0);
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting(shardToFail)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
// make sure the failedShard is not INITIALIZING again on node3
assertThat(routingNodes.node("node3").get(0).shardId(), not(equalTo(shardToFail.shardId())));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_FailedShardsRoutingTests.java |
1,810 | this.writeBehindQueueManager.addStoreListener(new StoreListener<DelayedEntry>() {
@Override
public void beforeStore(StoreEvent<DelayedEntry> storeEvent) {
}
@Override
public void afterStore(StoreEvent<DelayedEntry> storeEvent) {
final DelayedEntry delayedEntry = storeEvent.getSource();
final Object value = delayedEntry.getValue();
// only process store delete operations.
if (value != null) {
return;
}
final Data key = (Data) storeEvent.getSource().getKey();
final int partitionId = mapService.getNodeEngine().getPartitionService().getPartitionId(key);
final PartitionContainer partitionContainer = mapService.getPartitionContainer(partitionId);
final RecordStore recordStore = partitionContainer.getExistingRecordStore(name);
if (recordStore != null) {
recordStore.removeFromWriteBehindWaitingDeletions(key);
}
}
}); | 0true
| hazelcast_src_main_java_com_hazelcast_map_MapContainer.java |
1,511 | public final class MemberImpl implements Member, HazelcastInstanceAware, IdentifiedDataSerializable {
private final Map<String, Object> attributes = new ConcurrentHashMap<String, Object>();
private boolean localMember;
private Address address;
private String uuid;
private volatile HazelcastInstanceImpl instance;
private volatile long lastRead;
private volatile long lastWrite;
private volatile long lastPing;
private volatile ILogger logger;
public MemberImpl() {
}
public MemberImpl(Address address, boolean localMember) {
this(address, localMember, null, null);
}
public MemberImpl(Address address, boolean localMember, String uuid, HazelcastInstanceImpl instance) {
this(address, localMember, uuid, instance, null);
}
public MemberImpl(Address address, boolean localMember, String uuid, HazelcastInstanceImpl instance,
Map<String, Object> attributes) {
this.localMember = localMember;
this.address = address;
this.lastRead = Clock.currentTimeMillis();
this.uuid = uuid;
this.instance = instance;
if (attributes != null) {
this.attributes.putAll(attributes);
}
}
public MemberImpl(MemberImpl member) {
this.localMember = member.localMember;
this.address = member.address;
this.lastRead = member.lastRead;
this.uuid = member.uuid;
this.attributes.putAll(member.attributes);
}
public Address getAddress() {
return address;
}
public int getPort() {
return address.getPort();
}
public InetAddress getInetAddress() {
try {
return address.getInetAddress();
} catch (UnknownHostException e) {
if (logger != null) {
logger.warning(e);
}
return null;
}
}
@Override
public InetSocketAddress getInetSocketAddress() {
return getSocketAddress();
}
@Override
public InetSocketAddress getSocketAddress() {
try {
return address.getInetSocketAddress();
} catch (UnknownHostException e) {
if (logger != null) {
logger.warning(e);
}
return null;
}
}
@Override
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
if (hazelcastInstance instanceof HazelcastInstanceImpl) {
instance = (HazelcastInstanceImpl) hazelcastInstance;
localMember = instance.node.address.equals(address);
logger = instance.node.getLogger(this.getClass().getName());
}
}
@Override
public boolean localMember() {
return localMember;
}
public void didWrite() {
lastWrite = Clock.currentTimeMillis();
}
public void didRead() {
lastRead = Clock.currentTimeMillis();
}
public void didPing() {
lastPing = Clock.currentTimeMillis();
}
public long getLastPing() {
return lastPing;
}
public long getLastRead() {
return lastRead;
}
public long getLastWrite() {
return lastWrite;
}
void setUuid(String uuid) {
this.uuid = uuid;
}
@Override
public String getUuid() {
return uuid;
}
@Override
public Map<String, Object> getAttributes() {
return Collections.unmodifiableMap(attributes);
}
public void updateAttribute(MemberAttributeOperationType operationType, String key, Object value) {
switch (operationType) {
case PUT:
attributes.put(key, value);
break;
case REMOVE:
attributes.remove(key);
break;
default:
throw new IllegalArgumentException("Not a known OperationType " + operationType);
}
}
@Override
public String getStringAttribute(String key) {
return (String) getAttribute(key);
}
@Override
public void setStringAttribute(String key, String value) {
setAttribute(key, value);
}
@Override
public Boolean getBooleanAttribute(String key) {
return (Boolean) getAttribute(key);
}
@Override
public void setBooleanAttribute(String key, boolean value) {
setAttribute(key, value);
}
@Override
public Byte getByteAttribute(String key) {
return (Byte) getAttribute(key);
}
@Override
public void setByteAttribute(String key, byte value) {
setAttribute(key, value);
}
@Override
public Short getShortAttribute(String key) {
return (Short) getAttribute(key);
}
@Override
public void setShortAttribute(String key, short value) {
setAttribute(key, value);
}
@Override
public Integer getIntAttribute(String key) {
return (Integer) getAttribute(key);
}
@Override
public void setIntAttribute(String key, int value) {
setAttribute(key, value);
}
@Override
public Long getLongAttribute(String key) {
return (Long) getAttribute(key);
}
@Override
public void setLongAttribute(String key, long value) {
setAttribute(key, value);
}
@Override
public Float getFloatAttribute(String key) {
return (Float) getAttribute(key);
}
@Override
public void setFloatAttribute(String key, float value) {
setAttribute(key, value);
}
@Override
public Double getDoubleAttribute(String key) {
return (Double) getAttribute(key);
}
@Override
public void setDoubleAttribute(String key, double value) {
setAttribute(key, value);
}
@Override
public void removeAttribute(String key) {
isLocalMamber();
isNotNull(key, "key");
Object value = attributes.remove(key);
if (value == null) {
return;
}
if (instance != null) {
MemberAttributeChangedOperation operation = new MemberAttributeChangedOperation(REMOVE, key, null);
invokeOnAllMembers(operation);
}
}
private void isLocalMamber() {
if (!localMember) {
throw new UnsupportedOperationException("Attributes on remote members must not be changed");
}
}
private Object getAttribute(String key) {
return attributes.get(key);
}
private void setAttribute(String key, Object value) {
isLocalMamber();
isNotNull(key, "key");
isNotNull(value, "value");
Object oldValue = attributes.put(key, value);
if (value.equals(oldValue)) {
return;
}
if (instance != null) {
MemberAttributeChangedOperation operation = new MemberAttributeChangedOperation(PUT, key, value);
invokeOnAllMembers(operation);
}
}
private void invokeOnAllMembers(Operation operation) {
NodeEngineImpl nodeEngine = instance.node.nodeEngine;
OperationService os = nodeEngine.getOperationService();
String uuid = nodeEngine.getLocalMember().getUuid();
operation.setCallerUuid(uuid).setNodeEngine(nodeEngine);
try {
for (MemberImpl member : nodeEngine.getClusterService().getMemberList()) {
if (!member.localMember()) {
os.send(operation, member.getAddress());
} else {
os.executeOperation(operation);
}
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
@Override
public void readData(ObjectDataInput in) throws IOException {
address = new Address();
address.readData(in);
uuid = in.readUTF();
int size = in.readInt();
for (int i = 0; i < size; i++) {
String key = in.readUTF();
Object value = IOUtil.readAttributeValue(in);
attributes.put(key, value);
}
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
address.writeData(out);
out.writeUTF(uuid);
Map<String, Object> attributes = new HashMap<String, Object>(this.attributes);
out.writeInt(attributes.size());
for (Map.Entry<String, Object> entry : attributes.entrySet()) {
out.writeUTF(entry.getKey());
IOUtil.writeAttributeValue(entry.getValue(), out);
}
}
public int getFactoryId() {
return ClusterDataSerializerHook.F_ID;
}
@Override
public int getId() {
return ClusterDataSerializerHook.MEMBER;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Member [");
sb.append(address.getHost());
sb.append("]");
sb.append(":");
sb.append(address.getPort());
if (localMember) {
sb.append(" this");
}
return sb.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((address == null) ? 0 : address.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final MemberImpl other = (MemberImpl) obj;
if (address == null) {
if (other.address != null) {
return false;
}
} else if (!address.equals(other.address)) {
return false;
}
return true;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_instance_MemberImpl.java |
769 | public static enum OpType {
/**
* Index the source. If there an existing document with the id, it will
* be replaced.
*/
INDEX((byte) 0),
/**
* Creates the resource. Simply adds it to the index, if there is an existing
* document with the id, then it won't be removed.
*/
CREATE((byte) 1);
private final byte id;
private final String lowercase;
OpType(byte id) {
this.id = id;
this.lowercase = this.toString().toLowerCase(Locale.ENGLISH);
}
/**
* The internal representation of the operation type.
*/
public byte id() {
return id;
}
public String lowercase() {
return this.lowercase;
}
/**
* Constructs the operation type from its internal representation.
*/
public static OpType fromId(byte id) {
if (id == 0) {
return INDEX;
} else if (id == 1) {
return CREATE;
} else {
throw new ElasticsearchIllegalArgumentException("No type match for [" + id + "]");
}
}
} | 0true
| src_main_java_org_elasticsearch_action_index_IndexRequest.java |
1,618 | public class BroadleafAdminAuthenticationSuccessHandler extends SimpleUrlAuthenticationSuccessHandler {
private RequestCache requestCache = new HttpSessionRequestCache();
private final String successUrlParameter = "successUrl=";
@Override
public void onAuthenticationSuccess(HttpServletRequest request, HttpServletResponse response,
Authentication authentication) throws ServletException, IOException {
SavedRequest savedRequest = requestCache.getRequest(request, response);
if (savedRequest == null) {
super.onAuthenticationSuccess(request, response, authentication);
return;
}
String targetUrlParameter = getTargetUrlParameter();
if (isAlwaysUseDefaultTargetUrl() || (targetUrlParameter != null && StringUtils.hasText(request.getParameter(targetUrlParameter)))) {
requestCache.removeRequest(request, response);
super.onAuthenticationSuccess(request, response, authentication);
return;
}
clearAuthenticationAttributes(request);
// Use the DefaultSavedRequest URL
String targetUrl = savedRequest.getRedirectUrl();
// Remove the sessionTimeout flag if necessary
targetUrl = targetUrl.replace("sessionTimeout=true", "");
if (targetUrl.charAt(targetUrl.length() - 1) == '?') {
targetUrl = targetUrl.substring(0, targetUrl.length() - 1);
}
if (targetUrl.contains(successUrlParameter)) {
int successUrlPosistion = targetUrl.indexOf(successUrlParameter) + successUrlParameter.length();
int nextParamPosistion = targetUrl.indexOf("&", successUrlPosistion);
if (nextParamPosistion == -1) {
targetUrl = targetUrl.substring(successUrlPosistion, targetUrl.length());
} else {
targetUrl = targetUrl.substring(successUrlPosistion, nextParamPosistion);
}
}
logger.debug("Redirecting to DefaultSavedRequest Url: " + targetUrl);
getRedirectStrategy().sendRedirect(request, response, targetUrl);
}
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_security_BroadleafAdminAuthenticationSuccessHandler.java |
889 | public class OQueryRuntimeValueMulti {
protected final OSQLFilterItemFieldMultiAbstract definition;
protected final List<OCollate> collates;
protected final Object[] values;
public OQueryRuntimeValueMulti(final OSQLFilterItemFieldMultiAbstract iDefinition, final Object[] iValues,
final List<OCollate> iCollates) {
definition = iDefinition;
values = iValues;
collates = iCollates;
}
@Override
public String toString() {
if (getValues() == null)
return "";
StringBuilder buffer = new StringBuilder();
buffer.append("[");
int i = 0;
for (Object v : getValues()) {
if (i++ > 0)
buffer.append(",");
buffer.append(v);
}
buffer.append("]");
return buffer.toString();
}
public OSQLFilterItemFieldMultiAbstract getDefinition() {
return definition;
}
public OCollate getCollate(final int iIndex) {
return collates.get(iIndex);
}
public Object[] getValues() {
return values;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_query_OQueryRuntimeValueMulti.java |
966 | public interface BundleOrderItemFeePrice extends Serializable {
public abstract Long getId();
public abstract void setId(Long id);
public abstract BundleOrderItem getBundleOrderItem();
public abstract void setBundleOrderItem(BundleOrderItem bundleOrderItem);
public abstract Money getAmount();
public abstract void setAmount(Money amount);
public abstract String getName();
public abstract void setName(String name);
public abstract Boolean isTaxable();
public abstract void setTaxable(Boolean isTaxable);
public abstract String getReportingCode();
public abstract void setReportingCode(String reportingCode);
public BundleOrderItemFeePrice clone();
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_BundleOrderItemFeePrice.java |
315 | public interface OStorageClusterConfiguration {
public int getId();
public String getName();
public String getLocation();
public int getDataSegmentId();
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_config_OStorageClusterConfiguration.java |
1,975 | @RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class MapLoaderTest extends HazelcastTestSupport {
//https://github.com/hazelcast/hazelcast/issues/1770
@Test
public void test1770() throws InterruptedException {
Config config = new Config();
config.getManagementCenterConfig().setEnabled(true);
config.getManagementCenterConfig().setUrl("http://www.google.com");
MapConfig mapConfig = new MapConfig("foo");
final AtomicBoolean loadAllCalled = new AtomicBoolean();
MapLoader mapLoader = new MapLoader() {
@Override
public Object load(Object key) {
return null;
}
@Override
public Map loadAll(Collection keys) {
loadAllCalled.set(true);
return new HashMap();
}
@Override
public Set loadAllKeys() {
return new HashSet(Arrays.asList(1));
}
};
MapStoreConfig mapStoreConfig = new MapStoreConfig();
mapStoreConfig.setEnabled(true);
mapStoreConfig.setImplementation(mapLoader);
mapConfig.setMapStoreConfig(mapStoreConfig);
config.addMapConfig(mapConfig);
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
Map map = hz.getMap(mapConfig.getName());
assertTrueAllTheTime(new AssertTask() {
@Override
public void run() {
assertFalse("LoadAll should not have been called", loadAllCalled.get());
}
}, 10);
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapLoaderTest.java |
75 | private static class ResourceElement
{
private Xid xid = null;
private XAResource resource = null;
private int status;
ResourceElement( Xid xid, XAResource resource )
{
this.xid = xid;
this.resource = resource;
status = RS_ENLISTED;
}
Xid getXid()
{
return xid;
}
XAResource getResource()
{
return resource;
}
int getStatus()
{
return status;
}
void setStatus( int status )
{
this.status = status;
}
@Override
public String toString()
{
String statusString;
switch ( status )
{
case RS_ENLISTED:
statusString = "ENLISTED";
break;
case RS_DELISTED:
statusString = "DELISTED";
break;
case RS_SUSPENDED:
statusString = "SUSPENDED";
break;
case RS_READONLY:
statusString = "READONLY";
break;
default:
statusString = "UNKNOWN";
}
return "Xid[" + xid + "] XAResource[" + resource + "] Status["
+ statusString + "]";
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_TransactionImpl.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.