Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
1,316 |
public class SubmitReschedulingClusterInfoUpdatedJob implements Runnable {
@Override
public void run() {
if (logger.isTraceEnabled()) {
logger.trace("Submitting new rescheduling cluster info update job");
}
try {
threadPool.executor(executorName()).execute(new ClusterInfoUpdateJob(true));
} catch (EsRejectedExecutionException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Couldn't re-schedule cluster info update task - node might be shutting down", ex);
}
}
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_InternalClusterInfoService.java
|
110 |
class CreateTypeParameterProposal extends CorrectionProposal {
CreateTypeParameterProposal(String desc, Image image,
int offset, int length, TextFileChange change) {
super(desc, change, new Region(offset, length), image);
}
private static void addProposal(Collection<ICompletionProposal> proposals,
boolean wasNotGeneric, String def, String name, Image image,
Declaration dec, PhasedUnit unit, Tree.Declaration decNode,
int offset, String constraints) {
IFile file = getFile(unit);
TextFileChange change = new TextFileChange("Add Parameter", file);
change.setEdit(new MultiTextEdit());
IDocument doc = EditorUtil.getDocument(change);
HashSet<Declaration> decs = new HashSet<Declaration>();
CompilationUnit cu = unit.getCompilationUnit();
int il = applyImports(change, decs, cu, doc);
change.addEdit(new InsertEdit(offset, def));
if (constraints!=null) {
int loc = getConstraintLoc(decNode);
if (loc>=0) {
change.addEdit(new InsertEdit(loc, constraints));
}
}
String desc = "Add type parameter '" + name + "'" +
" to '" + dec.getName() + "'";
int off = wasNotGeneric?1:2;
proposals.add(new CreateTypeParameterProposal(desc,
image, offset+il+off, name.length(), change));
}
private static int getConstraintLoc(Tree.Declaration decNode) {
if( decNode instanceof Tree.ClassDefinition ) {
Tree.ClassDefinition classDefinition = (Tree.ClassDefinition) decNode;
return classDefinition.getClassBody().getStartIndex();
}
else if( decNode instanceof Tree.InterfaceDefinition ) {
Tree.InterfaceDefinition interfaceDefinition = (Tree.InterfaceDefinition) decNode;
return interfaceDefinition.getInterfaceBody().getStartIndex();
}
else if( decNode instanceof Tree.MethodDefinition ) {
Tree.MethodDefinition methodDefinition = (Tree.MethodDefinition) decNode;
return methodDefinition.getBlock().getStartIndex();
}
else if( decNode instanceof Tree.ClassDeclaration ) {
Tree.ClassDeclaration classDefinition = (Tree.ClassDeclaration) decNode;
return classDefinition.getClassSpecifier().getStartIndex();
}
else if( decNode instanceof Tree.InterfaceDefinition ) {
Tree.InterfaceDeclaration interfaceDefinition = (Tree.InterfaceDeclaration) decNode;
return interfaceDefinition.getTypeSpecifier().getStartIndex();
}
else if( decNode instanceof Tree.MethodDeclaration ) {
Tree.MethodDeclaration methodDefinition = (Tree.MethodDeclaration) decNode;
return methodDefinition.getSpecifierExpression().getStartIndex();
}
else {
return -1;
}
}
static void addCreateTypeParameterProposal(Collection<ICompletionProposal> proposals,
IProject project, Tree.CompilationUnit cu, final Tree.BaseType node,
String brokenName) {
class FilterExtendsSatisfiesVisitor extends Visitor {
boolean filter = false;
@Override
public void visit(Tree.ExtendedType that) {
super.visit(that);
if (that.getType()==node) {
filter = true;
}
}
@Override
public void visit(Tree.SatisfiedTypes that) {
super.visit(that);
for (Tree.Type t: that.getTypes()) {
if (t==node) {
filter = true;
}
}
}
@Override
public void visit(Tree.CaseTypes that) {
super.visit(that);
for (Tree.Type t: that.getTypes()) {
if (t==node) {
filter = true;
}
}
}
}
FilterExtendsSatisfiesVisitor v = new FilterExtendsSatisfiesVisitor();
v.visit(cu);
if (v.filter) {
return;
}
Tree.Declaration decl = findDeclarationWithBody(cu, node);
Declaration d = decl==null ? null : decl.getDeclarationModel();
if (d == null || d.isActual() ||
!(d instanceof Method || d instanceof ClassOrInterface)) {
return;
}
Tree.TypeParameterList paramList = getTypeParameters(decl);
String paramDef;
int offset;
//TODO: add bounds as default type arg?
if (paramList != null) {
paramDef = ", " + brokenName;
offset = paramList.getStopIndex();
}
else {
paramDef = "<" + brokenName + ">";
offset = Nodes.getIdentifyingNode(decl).getStopIndex()+1;
}
class FindTypeParameterConstraintVisitor extends Visitor {
List<ProducedType> result;
@Override
public void visit(Tree.SimpleType that) {
super.visit(that);
TypeDeclaration dm = that.getDeclarationModel();
if (dm!=null) {
List<TypeParameter> tps = dm.getTypeParameters();
Tree.TypeArgumentList tal = that.getTypeArgumentList();
if (tal!=null) {
List<Tree.Type> tas = tal.getTypes();
for (int i=0; i<tas.size(); i++) {
if (tas.get(i)==node) {
result = tps.get(i).getSatisfiedTypes();
}
}
}
}
}
@Override
public void visit(Tree.StaticMemberOrTypeExpression that) {
super.visit(that);
Declaration d = that.getDeclaration();
if (d instanceof Generic) {
List<TypeParameter> tps = ((Generic) d).getTypeParameters();
Tree.TypeArguments tal = that.getTypeArguments();
if (tal instanceof Tree.TypeArgumentList) {
List<Tree.Type> tas = ((Tree.TypeArgumentList) tal).getTypes();
for (int i=0; i<tas.size(); i++) {
if (tas.get(i)==node) {
result = tps.get(i).getSatisfiedTypes();
}
}
}
}
}
}
FindTypeParameterConstraintVisitor ftpcv =
new FindTypeParameterConstraintVisitor();
ftpcv.visit(cu);
String constraints;
if (ftpcv.result==null) {
constraints = null;
}
else {
String bounds = CorrectionUtil.asIntersectionTypeString(ftpcv.result);
if (bounds.isEmpty()) {
constraints = null;
}
else {
constraints = "given " + brokenName +
" satisfies " + bounds + " ";
}
}
for (PhasedUnit unit : getUnits(project)) {
if (unit.getUnit().equals(cu.getUnit())) {
addProposal(proposals, paramList==null,
paramDef, brokenName, ADD_CORR,
d, unit, decl, offset, constraints);
break;
}
}
}
private static Tree.TypeParameterList getTypeParameters(Tree.Declaration decl) {
if (decl instanceof Tree.ClassOrInterface) {
return ((Tree.ClassOrInterface) decl).getTypeParameterList();
}
else if (decl instanceof Tree.AnyMethod) {
return ((Tree.AnyMethod) decl).getTypeParameterList();
}
return null;
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_CreateTypeParameterProposal.java
|
575 |
public interface OIndex<T> {
/**
* Creates the index.
*
*
* @param name
*
* @param clusterIndexName
* Cluster name where to place the TreeMap
* @param clustersToIndex
* @param rebuild
* @param progressListener
*/
OIndex<T> create(String name, OIndexDefinition indexDefinition, String clusterIndexName, Set<String> clustersToIndex,
boolean rebuild, OProgressListener progressListener);
/**
* Unloads the index freeing the resource in memory.
*/
void unload();
String getDatabaseName();
/**
* Types of the keys that index can accept, if index contains composite key, list of types of elements from which this index
* consist will be returned, otherwise single element (key type obviously) will be returned.
*/
OType[] getKeyTypes();
/**
* Gets the set of records associated with the passed key.
*
* @param iKey
* The key to search
* @return The Record set if found, otherwise an empty Set
*/
T get(Object iKey);
/**
* Counts the elements associated with the passed key, if any.
*
* @param iKey
* The key to count
* @return The size of found records, otherwise 0 if the key is not found
*/
long count(Object iKey);
public long count(Object iRangeFrom, boolean iFromInclusive, Object iRangeTo, boolean iToInclusive, int maxValuesToFetch);
/**
* Tells if a key is contained in the index.
*
* @param iKey
* The key to search
* @return True if the key is contained, otherwise false
*/
boolean contains(Object iKey);
/**
* Inserts a new entry in the index. The behaviour depends by the index implementation.
*
* @param iKey
* Entry's key
* @param iValue
* Entry's value as OIdentifiable instance
* @return The index instance itself to allow in chain calls
*/
OIndex<T> put(Object iKey, OIdentifiable iValue);
/**
* Removes an entry by its key.
*
* @param key
* The entry's key to remove
* @return True if the entry has been found and removed, otherwise false
*/
boolean remove(Object key);
/**
* Removes an entry by its key and value.
*
* @param iKey
* The entry's key to remove
* @return True if the entry has been found and removed, otherwise false
*/
boolean remove(Object iKey, OIdentifiable iRID);
/**
* Clears the index removing all the entries in one shot.
*
* @return The index instance itself to allow in chain calls
*/
OIndex<T> clear();
/**
* @return number of entries in the index.
*/
long getSize();
/**
* @return Number of keys in index
*/
long getKeySize();
/**
* For unique indexes it will throw exception if passed in key is contained in index.
*
* @param iRecord
* @param iKey
*/
void checkEntry(OIdentifiable iRecord, Object iKey);
/**
* Flushes in-memory changes to disk.
*/
public void flush();
/**
* Delete the index.
*
* @return The index instance itself to allow in chain calls
*/
OIndex<T> delete();
void deleteWithoutIndexLoad(String indexName);
/**
* Returns the index name.
*
* @return The name of the index
*/
String getName();
/**
* Returns the type of the index as string.
*/
String getType();
/**
* Tells if the index is automatic. Automatic means it's maintained automatically by OrientDB. This is the case of indexes created
* against schema properties. Automatic indexes can always been rebuilt.
*
* @return True if the index is automatic, otherwise false
*/
boolean isAutomatic();
/**
* Rebuilds an automatic index.
*
* @return The number of entries rebuilt
*/
long rebuild();
/**
* Populate the index with all the existent records.
*/
long rebuild(OProgressListener iProgressListener);
/**
* Returns the index configuration.
*
* @return An ODocument object containing all the index properties
*/
ODocument getConfiguration();
/**
* Returns the internal index used.
*
*/
OIndexInternal<T> getInternal();
/**
* Returns set of records with keys in specific set
*
* @param iKeys
* Set of keys
* @return
*/
Collection<OIdentifiable> getValues(Collection<?> iKeys);
void getValues(Collection<?> iKeys, IndexValuesResultListener resultListener);
/**
* Returns a set of documents with keys in specific set
*
* @param iKeys
* Set of keys
* @return
*/
Collection<ODocument> getEntries(Collection<?> iKeys);
void getEntries(Collection<?> iKeys, IndexEntriesResultListener resultListener);
OIndexDefinition getDefinition();
/**
* Returns Names of clusters that will be indexed.
*
* @return Names of clusters that will be indexed.
*/
Set<String> getClusters();
/**
* Returns an iterator to walk across all the index items from the first to the latest one.
*
* @return
*/
public Iterator<Entry<Object, T>> iterator();
/**
* Returns an iterator to walk across all the index items from the last to the first one.
*
* @return
*/
public Iterator<Entry<Object, T>> inverseIterator();
/**
* Returns an iterator to walk across all the index values from the first to the latest one.
*
* @return
*/
public Iterator<OIdentifiable> valuesIterator();
/**
* Returns an iterator to walk across all the index values from the last to the first one.
*
* @return
*/
public Iterator<OIdentifiable> valuesInverseIterator();
/**
* Returns an Iterable instance of all the keys contained in the index.
*
* @return A Iterable<Object> that lazy load the entries once fetched
*/
public Iterable<Object> keys();
/**
* Returns a set of records with key between the range passed as parameter. Range bounds are included.
*
* In case of {@link com.orientechnologies.common.collection.OCompositeKey}s partial keys can be used as values boundaries.
*
* @param iRangeFrom
* Starting range
* @param iRangeTo
* Ending range
*
* @return a set of records with key between the range passed as parameter. Range bounds are included.
* @see com.orientechnologies.common.collection.OCompositeKey#compareTo(com.orientechnologies.common.collection.OCompositeKey)
* @see #getValuesBetween(Object, boolean, Object, boolean)
*/
public Collection<OIdentifiable> getValuesBetween(Object iRangeFrom, Object iRangeTo);
/**
* Returns a set of records with key between the range passed as parameter.
*
* In case of {@link com.orientechnologies.common.collection.OCompositeKey}s partial keys can be used as values boundaries.
*
* @param iRangeFrom
* Starting range
* @param iFromInclusive
* Indicates whether start range boundary is included in result.
* @param iRangeTo
* Ending range
* @param iToInclusive
* Indicates whether end range boundary is included in result.
*
* @return Returns a set of records with key between the range passed as parameter.
*
* @see com.orientechnologies.common.collection.OCompositeKey#compareTo(com.orientechnologies.common.collection.OCompositeKey)
*
*/
public Collection<OIdentifiable> getValuesBetween(Object iRangeFrom, boolean iFromInclusive, Object iRangeTo, boolean iToInclusive);
public void getValuesBetween(Object iRangeFrom, boolean iFromInclusive, Object iRangeTo, boolean iToInclusive,
IndexValuesResultListener resultListener);
/**
* Returns a set of records with keys greater than passed parameter.
*
* @param fromKey
* Starting key.
* @param isInclusive
* Indicates whether record with passed key will be included.
*
* @return set of records with keys greater than passed parameter.
*/
public abstract Collection<OIdentifiable> getValuesMajor(Object fromKey, boolean isInclusive);
public abstract void getValuesMajor(Object fromKey, boolean isInclusive, IndexValuesResultListener valuesResultListener);
/**
* Returns a set of records with keys less than passed parameter.
*
* @param toKey
* Ending key.
* @param isInclusive
* Indicates whether record with passed key will be included.
*
* @return set of records with keys less than passed parameter.
*/
public abstract Collection<OIdentifiable> getValuesMinor(Object toKey, boolean isInclusive);
public abstract void getValuesMinor(Object toKey, boolean isInclusive, IndexValuesResultListener valuesResultListener);
/**
* Returns a set of documents that contains fields ("key", "rid") where "key" - index key, "rid" - record id of records with keys
* greater than passed parameter.
*
* @param fromKey
* Starting key.
* @param isInclusive
* Indicates whether record with passed key will be included.
*
* @return set of records with key greater than passed parameter.
*/
public abstract Collection<ODocument> getEntriesMajor(Object fromKey, boolean isInclusive);
public abstract void getEntriesMajor(Object fromKey, boolean isInclusive, IndexEntriesResultListener entriesResultListener);
/**
* Returns a set of documents that contains fields ("key", "rid") where "key" - index key, "rid" - record id of records with keys
* less than passed parameter.
*
* @param toKey
* Ending key.
* @param isInclusive
* Indicates whether record with passed key will be included.
*
* @return set of records with key greater than passed parameter.
*/
public abstract Collection<ODocument> getEntriesMinor(Object toKey, boolean isInclusive);
public abstract void getEntriesMinor(Object toKey, boolean isInclusive, IndexEntriesResultListener entriesResultListener);
/**
* Returns a set of documents with key between the range passed as parameter.
*
* @param iRangeFrom
* Starting range
* @param iRangeTo
* Ending range
* @param iInclusive
* Include from/to bounds
* @see #getEntriesBetween(Object, Object)
* @return
*/
public abstract Collection<ODocument> getEntriesBetween(final Object iRangeFrom, final Object iRangeTo, final boolean iInclusive);
public abstract void getEntriesBetween(final Object iRangeFrom, final Object iRangeTo, final boolean iInclusive,
IndexEntriesResultListener entriesResultListener);
public Collection<ODocument> getEntriesBetween(Object iRangeFrom, Object iRangeTo);
/**
* Returns the Record Identity of the index if persistent.
*
* @return Valid ORID if it's persistent, otherwise ORID(-1:-1)
*/
public ORID getIdentity();
public boolean supportsOrderedIterations();
public boolean isRebuiding();
public interface IndexValuesResultListener {
boolean addResult(OIdentifiable value);
}
public interface IndexEntriesResultListener {
boolean addResult(ODocument entry);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndex.java
|
146 |
@Entity
@Table(name = "BLC_SC_ITEM_CRITERIA")
@Inheritance(strategy=InheritanceType.JOINED)
@AdminPresentationClass(friendlyName = "StructuredContentItemCriteriaImpl_baseStructuredContentItemCriteria")
public class StructuredContentItemCriteriaImpl implements StructuredContentItemCriteria {
public static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "SCItemCriteriaId")
@GenericGenerator(
name="SCItemCriteriaId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="StructuredContentItemCriteriaImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.page.domain.StructuredContentItemCriteriaImpl")
}
)
@Column(name = "SC_ITEM_CRITERIA_ID")
@AdminPresentation(friendlyName = "StructuredContentItemCriteriaImpl_Item_Criteria_Id", group = "StructuredContentItemCriteriaImpl_Description", visibility =VisibilityEnum.HIDDEN_ALL)
protected Long id;
@Column(name = "QUANTITY", nullable=false)
@AdminPresentation(friendlyName = "StructuredContentItemCriteriaImpl_Quantity", group = "StructuredContentItemCriteriaImpl_Description", visibility =VisibilityEnum.HIDDEN_ALL)
protected Integer quantity;
@Lob
@Type(type = "org.hibernate.type.StringClobType")
@Column(name = "ORDER_ITEM_MATCH_RULE", length = Integer.MAX_VALUE - 1)
@AdminPresentation(friendlyName = "StructuredContentItemCriteriaImpl_Order_Item_Match_Rule", group = "StructuredContentItemCriteriaImpl_Description", visibility = VisibilityEnum.HIDDEN_ALL)
protected String orderItemMatchRule;
@ManyToOne(targetEntity = StructuredContentImpl.class)
@JoinTable(name = "BLC_QUAL_CRIT_SC_XREF", joinColumns = @JoinColumn(name = "SC_ITEM_CRITERIA_ID"), inverseJoinColumns = @JoinColumn(name = "SC_ID"))
protected StructuredContent structuredContent;
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentItemCriteria#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentItemCriteria#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentItemCriteria#getReceiveQuantity()
*/
@Override
public Integer getQuantity() {
return quantity;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.offer.domain.StructuredContentItemCriteria#setReceiveQuantity(java.lang.Integer)
*/
@Override
public void setQuantity(Integer receiveQuantity) {
this.quantity = receiveQuantity;
}
@Override
public String getMatchRule() {
return orderItemMatchRule;
}
@Override
public void setMatchRule(String matchRule) {
this.orderItemMatchRule = matchRule;
}
@Override
public StructuredContent getStructuredContent() {
return structuredContent;
}
@Override
public void setStructuredContent(StructuredContent structuredContent) {
this.structuredContent = structuredContent;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((orderItemMatchRule == null) ? 0 : orderItemMatchRule.hashCode());
result = prime * result + ((quantity == null) ? 0 : quantity.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
StructuredContentItemCriteriaImpl other = (StructuredContentItemCriteriaImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (orderItemMatchRule == null) {
if (other.orderItemMatchRule != null)
return false;
} else if (!orderItemMatchRule.equals(other.orderItemMatchRule))
return false;
if (quantity == null) {
if (other.quantity != null)
return false;
} else if (!quantity.equals(other.quantity))
return false;
return true;
}
@Override
public StructuredContentItemCriteria cloneEntity() {
StructuredContentItemCriteriaImpl newField = new StructuredContentItemCriteriaImpl();
newField.quantity = quantity;
newField.orderItemMatchRule = orderItemMatchRule;
return newField;
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentItemCriteriaImpl.java
|
109 |
public static class Order {
public static final int Rules = 1000;
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageImpl.java
|
1,600 |
public class PersistencePerspective implements Serializable {
private static final long serialVersionUID = 1L;
protected String[] additionalNonPersistentProperties = new String[]{};
protected ForeignKey[] additionalForeignKeys = new ForeignKey[]{};
protected Map<PersistencePerspectiveItemType, PersistencePerspectiveItem> persistencePerspectiveItems = new HashMap<PersistencePerspectiveItemType, PersistencePerspectiveItem>();
protected OperationTypes operationTypes = new OperationTypes();
protected Boolean populateToOneFields = false;
protected String[] excludeFields = new String[]{};
protected String[] includeFields = new String[]{};
protected String configurationKey;
protected Boolean showArchivedFields = false;
protected Boolean useServerSideInspectionCache = true;
public PersistencePerspective() {
}
public PersistencePerspective(OperationTypes operationTypes, String[] additionalNonPersistentProperties, ForeignKey[] additionalForeignKeys) {
setAdditionalNonPersistentProperties(additionalNonPersistentProperties);
setAdditionalForeignKeys(additionalForeignKeys);
this.operationTypes = operationTypes;
}
public String[] getAdditionalNonPersistentProperties() {
return additionalNonPersistentProperties;
}
public void setAdditionalNonPersistentProperties(String[] additionalNonPersistentProperties) {
this.additionalNonPersistentProperties = additionalNonPersistentProperties;
Arrays.sort(this.additionalNonPersistentProperties);
}
public ForeignKey[] getAdditionalForeignKeys() {
return additionalForeignKeys;
}
public void setAdditionalForeignKeys(ForeignKey[] additionalForeignKeys) {
this.additionalForeignKeys = additionalForeignKeys;
Arrays.sort(this.additionalForeignKeys, new Comparator<ForeignKey>() {
public int compare(ForeignKey o1, ForeignKey o2) {
return o1.getManyToField().compareTo(o2.getManyToField());
}
});
}
public OperationTypes getOperationTypes() {
return operationTypes;
}
public void setOperationTypes(OperationTypes operationTypes) {
this.operationTypes = operationTypes;
}
public void addPersistencePerspectiveItem(PersistencePerspectiveItemType type, PersistencePerspectiveItem item) {
persistencePerspectiveItems.put(type, item);
}
public Map<PersistencePerspectiveItemType, PersistencePerspectiveItem> getPersistencePerspectiveItems() {
return persistencePerspectiveItems;
}
public void setPersistencePerspectiveItems(Map<PersistencePerspectiveItemType, PersistencePerspectiveItem> persistencePerspectiveItems) {
this.persistencePerspectiveItems = persistencePerspectiveItems;
}
/**
* Retrieves whether or not ManyToOne and OneToOne field boundaries
* will be traversed when retrieving and populating entity fields.
* Implementation should use the @AdminPresentationClass annotation
* instead.
*
* @return Whether or not ManyToOne and OneToOne field boundaries will be crossed.
*/
@Deprecated
public Boolean getPopulateToOneFields() {
return populateToOneFields;
}
/**
* Sets whether or not ManyToOne and OneToOne field boundaries
* will be traversed when retrieving and populating entity fields.
* Implementation should use the @AdminPresentationClass annotation
* instead.
*
* @return Whether or not ManyToOne and OneToOne field boundaries will be crossed.
*/
@Deprecated
public void setPopulateToOneFields(Boolean populateToOneFields) {
this.populateToOneFields = populateToOneFields;
}
/**
* Retrieve the list of fields to exclude from the admin presentation.
* Implementations should use the excluded property of the AdminPresentation
* annotation instead, or use an AdminPresentationOverride if re-enabling a
* Broadleaf field is desired. If multiple datasources point to the same
* entity, but different exclusion behavior is required, a custom persistence
* handler may be employed with different inspect method implementations to
* account for the variations.
*
* @return list of fields to exclude from the admin
*/
@Deprecated
public String[] getExcludeFields() {
return excludeFields;
}
/**
* Set the list of fields to exclude from the admin presentation.
* Implementations should use the excluded property of the AdminPresentation
* annotation instead, or use an AdminPresentationOverride if re-enabling a
* Broadleaf field is desired. If multiple datasources point to the same
* entity, but different exclusion behavior is required, a custom persistence
* handler may be employed with different inspect method implementations to
* account for the variations.
*
* @param excludeManyToOneFields
*/
@Deprecated
public void setExcludeFields(String[] excludeManyToOneFields) {
this.excludeFields = excludeManyToOneFields;
Arrays.sort(this.excludeFields);
}
/**
* Get the list of fields to include in the admin presentation.
* Implementations should use excludeFields instead.
*
* @return list of fields to include in the admin
*/
@Deprecated
public String[] getIncludeFields() {
return includeFields;
}
/**
* Set the list of fields to include in the admin presentation.
* Implementations should use excludeFields instead.
*
* @param includeManyToOneFields
*/
@Deprecated
public void setIncludeFields(String[] includeManyToOneFields) {
this.includeFields = includeManyToOneFields;
Arrays.sort(this.includeFields);
}
public String getConfigurationKey() {
return configurationKey;
}
public void setConfigurationKey(String configurationKey) {
this.configurationKey = configurationKey;
}
public Boolean getShowArchivedFields() {
return showArchivedFields;
}
public void setShowArchivedFields(Boolean showArchivedFields) {
this.showArchivedFields = showArchivedFields;
}
public Boolean getUseServerSideInspectionCache() {
return useServerSideInspectionCache;
}
public void setUseServerSideInspectionCache(Boolean useServerSideInspectionCache) {
this.useServerSideInspectionCache = useServerSideInspectionCache;
}
public PersistencePerspective clonePersistencePerspective() {
PersistencePerspective persistencePerspective = new PersistencePerspective();
persistencePerspective.operationTypes = operationTypes.cloneOperationTypes();
if (additionalNonPersistentProperties != null) {
persistencePerspective.additionalNonPersistentProperties = new String[additionalNonPersistentProperties.length];
System.arraycopy(additionalNonPersistentProperties, 0, persistencePerspective.additionalNonPersistentProperties, 0, additionalNonPersistentProperties.length);
}
if (additionalForeignKeys != null) {
persistencePerspective.additionalForeignKeys = new ForeignKey[additionalForeignKeys.length];
for (int j=0; j<additionalForeignKeys.length;j++){
persistencePerspective.additionalForeignKeys[j] = additionalForeignKeys[j].cloneForeignKey();
}
}
if (this.persistencePerspectiveItems != null) {
Map<PersistencePerspectiveItemType, PersistencePerspectiveItem> persistencePerspectiveItems = new HashMap<PersistencePerspectiveItemType, PersistencePerspectiveItem>(this.persistencePerspectiveItems.size());
for (Map.Entry<PersistencePerspectiveItemType, PersistencePerspectiveItem> entry : this.persistencePerspectiveItems.entrySet()) {
persistencePerspectiveItems.put(entry.getKey(), entry.getValue().clonePersistencePerspectiveItem());
}
persistencePerspective.persistencePerspectiveItems = persistencePerspectiveItems;
}
persistencePerspective.populateToOneFields = populateToOneFields;
persistencePerspective.configurationKey = configurationKey;
persistencePerspective.showArchivedFields = showArchivedFields;
persistencePerspective.useServerSideInspectionCache = useServerSideInspectionCache;
if (excludeFields != null) {
persistencePerspective.excludeFields = new String[excludeFields.length];
System.arraycopy(excludeFields, 0, persistencePerspective.excludeFields, 0, excludeFields.length);
}
if (includeFields != null) {
persistencePerspective.includeFields = new String[includeFields.length];
System.arraycopy(includeFields, 0, persistencePerspective.includeFields, 0, includeFields.length);
}
return persistencePerspective;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof PersistencePerspective)) return false;
PersistencePerspective that = (PersistencePerspective) o;
if (!Arrays.equals(additionalForeignKeys, that.additionalForeignKeys)) return false;
if (!Arrays.equals(additionalNonPersistentProperties, that.additionalNonPersistentProperties))
return false;
if (configurationKey != null ? !configurationKey.equals(that.configurationKey) : that.configurationKey != null)
return false;
if (!Arrays.equals(excludeFields, that.excludeFields)) return false;
if (!Arrays.equals(includeFields, that.includeFields)) return false;
if (operationTypes != null ? !operationTypes.equals(that.operationTypes) : that.operationTypes != null)
return false;
if (persistencePerspectiveItems != null ? !persistencePerspectiveItems.equals(that.persistencePerspectiveItems) : that.persistencePerspectiveItems != null)
return false;
if (populateToOneFields != null ? !populateToOneFields.equals(that.populateToOneFields) : that.populateToOneFields != null)
return false;
if (showArchivedFields != null ? !showArchivedFields.equals(that.showArchivedFields) : that.showArchivedFields != null)
return false;
if (useServerSideInspectionCache != null ? !useServerSideInspectionCache.equals(that.useServerSideInspectionCache) : that.useServerSideInspectionCache != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = additionalNonPersistentProperties != null ? Arrays.hashCode(additionalNonPersistentProperties) : 0;
result = 31 * result + (additionalForeignKeys != null ? Arrays.hashCode(additionalForeignKeys) : 0);
result = 31 * result + (persistencePerspectiveItems != null ? persistencePerspectiveItems.hashCode() : 0);
result = 31 * result + (operationTypes != null ? operationTypes.hashCode() : 0);
result = 31 * result + (populateToOneFields != null ? populateToOneFields.hashCode() : 0);
result = 31 * result + (excludeFields != null ? Arrays.hashCode(excludeFields) : 0);
result = 31 * result + (includeFields != null ? Arrays.hashCode(includeFields) : 0);
result = 31 * result + (configurationKey != null ? configurationKey.hashCode() : 0);
result = 31 * result + (showArchivedFields != null ? showArchivedFields.hashCode() : 0);
result = 31 * result + (useServerSideInspectionCache != null ? useServerSideInspectionCache.hashCode() : 0);
return result;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_PersistencePerspective.java
|
2,857 |
@edu.umd.cs.findbugs.annotations.SuppressWarnings("EI_EXPOSE_REP")
public class ReplicaSyncResponse extends Operation
implements PartitionAwareOperation, BackupOperation, UrgentSystemOperation {
private byte[] data;
private long[] replicaVersions;
private boolean compressed;
public ReplicaSyncResponse() {
}
public ReplicaSyncResponse(byte[] data, long[] replicaVersions, boolean compressed) {
this.data = data;
this.replicaVersions = replicaVersions;
this.compressed = compressed;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
InternalPartitionServiceImpl partitionService = (InternalPartitionServiceImpl) nodeEngine.getPartitionService();
SerializationService serializationService = nodeEngine.getSerializationService();
int partitionId = getPartitionId();
int replicaIndex = getReplicaIndex();
BufferObjectDataInput in = null;
try {
if (data != null && data.length > 0) {
logApplyReplicaSync(partitionId, replicaIndex);
byte[] taskData = compressed ? IOUtil.decompress(data) : data;
in = serializationService.createObjectDataInput(taskData);
int size = in.readInt();
for (int i = 0; i < size; i++) {
Operation op = (Operation) serializationService.readObject(in);
try {
ErrorLoggingResponseHandler responseHandler
= new ErrorLoggingResponseHandler(nodeEngine.getLogger(op.getClass()));
op.setNodeEngine(nodeEngine)
.setPartitionId(partitionId)
.setReplicaIndex(replicaIndex)
.setResponseHandler(responseHandler);
op.beforeRun();
op.run();
op.afterRun();
} catch (Throwable e) {
logException(op, e);
}
}
}
} finally {
closeResource(in);
partitionService.finalizeReplicaSync(partitionId, replicaVersions);
}
}
private void logException(Operation op, Throwable e) {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
ILogger logger = nodeEngine.getLogger(getClass());
Level level = nodeEngine.isActive() ? Level.WARNING : Level.FINEST;
if (logger.isLoggable(level)) {
logger.log(level, "While executing " + op, e);
}
}
private void logApplyReplicaSync(int partitionId, int replicaIndex) {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
ILogger logger = nodeEngine.getLogger(getClass());
if (logger.isFinestEnabled()) {
logger.finest("Applying replica sync for partition: " + partitionId + ", replica: " + replicaIndex);
}
}
@Override
public void afterRun() throws Exception {
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
public Object getResponse() {
return null;
}
@Override
public boolean validatesTarget() {
return true;
}
@Override
public void logError(Throwable e) {
ReplicaErrorLogger.log(e, getLogger());
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
IOUtil.writeByteArray(out, data);
out.writeLongArray(replicaVersions);
out.writeBoolean(compressed);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
data = IOUtil.readByteArray(in);
replicaVersions = in.readLongArray();
compressed = in.readBoolean();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("ReplicaSyncResponse");
sb.append("{partition=").append(getPartitionId());
sb.append(", replica=").append(getReplicaIndex());
sb.append(", version=").append(Arrays.toString(replicaVersions));
sb.append('}');
return sb.toString();
}
private static final class ErrorLoggingResponseHandler implements ResponseHandler {
private final ILogger logger;
private ErrorLoggingResponseHandler(ILogger logger) {
this.logger = logger;
}
@Override
public void sendResponse(final Object obj) {
if (obj instanceof Throwable) {
Throwable t = (Throwable) obj;
logger.severe(t);
}
}
@Override
public boolean isLocal() {
return true;
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_ReplicaSyncResponse.java
|
10 |
public interface DataArchive extends FeedDataArchive {
/**
* Return the level of service of this data archive.
*
* @return the level of service of this data archive
*/
public LOS getLOS();
}
| 0true
|
mctcore_src_main_java_gov_nasa_arc_mct_api_feed_DataArchive.java
|
602 |
public class OIndexFullText extends OIndexMultiValues {
private static final String CONFIG_STOP_WORDS = "stopWords";
private static final String CONFIG_SEPARATOR_CHARS = "separatorChars";
private static final String CONFIG_IGNORE_CHARS = "ignoreChars";
private static String DEF_SEPARATOR_CHARS = " \r\n\t:;,.|+*/\\=!?[]()";
private static String DEF_IGNORE_CHARS = "'\"";
private static String DEF_STOP_WORDS = "the in a at as and or for his her " + "him this that what which while "
+ "up with be was is";
private final String separatorChars = DEF_SEPARATOR_CHARS;
private final String ignoreChars = DEF_IGNORE_CHARS;
private final Set<String> stopWords;
public OIndexFullText(String typeId, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine,
String valueContainerAlgorithm) {
super(typeId, algorithm, indexEngine, valueContainerAlgorithm);
stopWords = new HashSet<String>(OStringSerializerHelper.split(DEF_STOP_WORDS, ' '));
}
/**
* Indexes a value and save the index. Splits the value in single words and index each one. Save of the index is responsibility of
* the caller.
*/
@Override
public OIndexFullText put(Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
if (key == null)
return this;
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
final List<String> words = splitIntoWords(key.toString());
// FOREACH WORD CREATE THE LINK TO THE CURRENT DOCUMENT
for (final String word : words) {
acquireExclusiveLock();
try {
Set<OIdentifiable> refs;
// SEARCH FOR THE WORD
refs = indexEngine.get(word);
if (refs == null) {
// WORD NOT EXISTS: CREATE THE KEYWORD CONTAINER THE FIRST TIME THE WORD IS FOUND
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
refs = new OIndexRIDContainer(getName());
} else {
refs = new OMVRBTreeRIDSet();
((OMVRBTreeRIDSet) refs).setAutoConvertToRecord(false);
}
}
// ADD THE CURRENT DOCUMENT AS REF FOR THAT WORD
refs.add(iSingleValue);
// SAVE THE INDEX ENTRY
indexEngine.put(word, refs);
} finally {
releaseExclusiveLock();
}
}
return this;
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void putInSnapshot(Object key, OIdentifiable value, Map<Object, Object> snapshot) {
if (key == null)
return;
key = getCollatingValue(key);
final List<String> words = splitIntoWords(key.toString());
// FOREACH WORD CREATE THE LINK TO THE CURRENT DOCUMENT
for (final String word : words) {
Set<OIdentifiable> refs;
final Object snapshotValue = snapshot.get(word);
if (snapshotValue == null)
refs = indexEngine.get(word);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
refs = null;
else
refs = (Set<OIdentifiable>) snapshotValue;
if (refs == null) {
// WORD NOT EXISTS: CREATE THE KEYWORD CONTAINER THE FIRST TIME THE WORD IS FOUND
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
refs = new OIndexRIDContainer(getName());
} else {
refs = new OMVRBTreeRIDSet();
((OMVRBTreeRIDSet) refs).setAutoConvertToRecord(false);
}
snapshot.put(word, refs);
}
// ADD THE CURRENT DOCUMENT AS REF FOR THAT WORD
refs.add(value.getIdentity());
}
}
/**
* Splits passed in key on several words and remove records with keys equals to any item of split result and values equals to
* passed in value.
*
* @param key
* Key to remove.
* @param value
* Value to remove.
* @return <code>true</code> if at least one record is removed.
*/
@Override
public boolean remove(Object key, final OIdentifiable value) {
checkForRebuild();
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
final List<String> words = splitIntoWords(key.toString());
boolean removed = false;
for (final String word : words) {
acquireExclusiveLock();
try {
final Set<OIdentifiable> recs = indexEngine.get(word);
if (recs != null && !recs.isEmpty()) {
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(word);
else
indexEngine.put(word, recs);
removed = true;
}
}
} finally {
releaseExclusiveLock();
}
}
return removed;
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void removeFromSnapshot(Object key, OIdentifiable value, Map<Object, Object> snapshot) {
key = getCollatingValue(key);
final List<String> words = splitIntoWords(key.toString());
for (final String word : words) {
final Set<OIdentifiable> recs;
final Object snapshotValue = snapshot.get(word);
if (snapshotValue == null)
recs = indexEngine.get(word);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
recs = null;
else
recs = (Set<OIdentifiable>) snapshotValue;
if (recs != null && !recs.isEmpty()) {
if (recs.remove(value)) {
if (recs.isEmpty())
snapshot.put(word, RemovedValue.INSTANCE);
else
snapshot.put(word, recs);
}
}
}
}
@Override
public OIndexInternal<?> create(String name, OIndexDefinition indexDefinition, String clusterIndexName,
Set<String> clustersToIndex, boolean rebuild, OProgressListener progressListener, OStreamSerializer valueSerializer) {
if (indexDefinition.getFields().size() > 1) {
throw new OIndexException(type + " indexes cannot be used as composite ones.");
}
return super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener, valueSerializer);
}
@Override
public OIndexMultiValues create(String name, OIndexDefinition indexDefinition, String clusterIndexName,
Set<String> clustersToIndex, boolean rebuild, OProgressListener progressListener) {
if (indexDefinition.getFields().size() > 1) {
throw new OIndexException(type + " indexes cannot be used as composite ones.");
}
return super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener);
}
@Override
public ODocument updateConfiguration() {
super.updateConfiguration();
configuration.setInternalStatus(ORecordElement.STATUS.UNMARSHALLING);
try {
configuration.field(CONFIG_SEPARATOR_CHARS, separatorChars);
configuration.field(CONFIG_IGNORE_CHARS, ignoreChars);
configuration.field(CONFIG_STOP_WORDS, stopWords);
} finally {
configuration.setInternalStatus(ORecordElement.STATUS.LOADED);
}
return configuration;
}
private List<String> splitIntoWords(final String iKey) {
final List<String> result = new ArrayList<String>();
final List<String> words = (List<String>) OStringSerializerHelper.split(new ArrayList<String>(), iKey, 0, -1, separatorChars);
final StringBuilder buffer = new StringBuilder();
// FOREACH WORD CREATE THE LINK TO THE CURRENT DOCUMENT
char c;
boolean ignore;
for (String word : words) {
buffer.setLength(0);
for (int i = 0; i < word.length(); ++i) {
c = word.charAt(i);
ignore = false;
for (int k = 0; k < ignoreChars.length(); ++k)
if (c == ignoreChars.charAt(k)) {
ignore = true;
break;
}
if (!ignore)
buffer.append(c);
}
word = buffer.toString();
// CHECK IF IT'S A STOP WORD
if (stopWords.contains(word))
continue;
result.add(word);
}
return result;
}
public boolean canBeUsedInEqualityOperators() {
return false;
}
public boolean supportsOrderedIterations() {
return false;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexFullText.java
|
3,677 |
public class RoutingFieldMapper extends AbstractFieldMapper<String> implements InternalMapper, RootMapper {
public static final String NAME = "_routing";
public static final String CONTENT_TYPE = "_routing";
public static class Defaults extends AbstractFieldMapper.Defaults {
public static final String NAME = "_routing";
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.setStored(true);
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
FIELD_TYPE.freeze();
}
public static final boolean REQUIRED = false;
public static final String PATH = null;
}
public static class Builder extends AbstractFieldMapper.Builder<Builder, RoutingFieldMapper> {
private boolean required = Defaults.REQUIRED;
private String path = Defaults.PATH;
public Builder() {
super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
}
public Builder required(boolean required) {
this.required = required;
return builder;
}
public Builder path(String path) {
this.path = path;
return builder;
}
@Override
public RoutingFieldMapper build(BuilderContext context) {
return new RoutingFieldMapper(fieldType, required, path, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
RoutingFieldMapper.Builder builder = routing();
parseField(builder, builder.name, node, parserContext);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("required")) {
builder.required(nodeBooleanValue(fieldNode));
} else if (fieldName.equals("path")) {
builder.path(fieldNode.toString());
}
}
return builder;
}
}
private boolean required;
private final String path;
public RoutingFieldMapper() {
this(new FieldType(Defaults.FIELD_TYPE), Defaults.REQUIRED, Defaults.PATH, null, null, null, ImmutableSettings.EMPTY);
}
protected RoutingFieldMapper(FieldType fieldType, boolean required, String path, PostingsFormatProvider postingsProvider,
DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), 1.0f, fieldType, null, Lucene.KEYWORD_ANALYZER,
Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings);
this.required = required;
this.path = path;
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return new FieldDataType("string");
}
@Override
public boolean hasDocValues() {
return false;
}
public void markAsRequired() {
this.required = true;
}
public boolean required() {
return this.required;
}
public String path() {
return this.path;
}
public String value(Document document) {
Field field = (Field) document.getField(names.indexName());
return field == null ? null : value(field);
}
@Override
public String value(Object value) {
if (value == null) {
return null;
}
return value.toString();
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
String routing = context.sourceToParse().routing();
if (path != null && routing != null) {
// we have a path, check if we can validate we have the same routing value as the one in the doc...
String value = null;
Field field = (Field) context.doc().getField(path);
if (field != null) {
value = field.stringValue();
if (value == null) {
// maybe its a numeric field...
if (field instanceof NumberFieldMapper.CustomNumericField) {
value = ((NumberFieldMapper.CustomNumericField) field).numericAsString();
}
}
}
if (value == null) {
value = context.ignoredValue(path);
}
if (!routing.equals(value)) {
throw new MapperParsingException("External routing [" + routing + "] and document path routing [" + value + "] mismatch");
}
}
}
@Override
public void preParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
public void postParse(ParseContext context) throws IOException {
}
@Override
public void parse(ParseContext context) throws IOException {
// no need ot parse here, we either get the routing in the sourceToParse
// or we don't have routing, if we get it in sourceToParse, we process it in preParse
// which will always be called
}
@Override
public boolean includeInObject() {
return true;
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
if (context.sourceToParse().routing() != null) {
String routing = context.sourceToParse().routing();
if (routing != null) {
if (!fieldType.indexed() && !fieldType.stored()) {
context.ignoredValue(names.indexName(), routing);
return;
}
fields.add(new Field(names.indexName(), routing, fieldType));
}
}
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
// if all are defaults, no sense to write it at all
if (!includeDefaults && fieldType.indexed() == Defaults.FIELD_TYPE.indexed() &&
fieldType.stored() == Defaults.FIELD_TYPE.stored() && required == Defaults.REQUIRED && path == Defaults.PATH) {
return builder;
}
builder.startObject(CONTENT_TYPE);
if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
}
if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
builder.field("store", fieldType.stored());
}
if (includeDefaults || required != Defaults.REQUIRED) {
builder.field("required", required);
}
if (includeDefaults || path != Defaults.PATH) {
builder.field("path", path);
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
// do nothing here, no merging, but also no exception
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_internal_RoutingFieldMapper.java
|
684 |
@Entity
@Polymorphism(type = PolymorphismType.EXPLICIT)
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CATEGORY_PRODUCT_XREF")
@AdminPresentationClass(excludeFromPolymorphism = false)
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
public class CategoryProductXrefImpl implements CategoryProductXref {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@EmbeddedId
CategoryProductXrefPK categoryProductXref = new CategoryProductXrefPK();
public CategoryProductXrefPK getCategoryProductXref() {
return categoryProductXref;
}
public void setCategoryProductXref(CategoryProductXrefPK categoryProductXref) {
this.categoryProductXref = categoryProductXref;
}
/** The display order. */
@Column(name = "DISPLAY_ORDER")
@AdminPresentation(visibility = VisibilityEnum.HIDDEN_ALL)
protected Long displayOrder;
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.CategoryProductXref#getDisplayOrder()
*/
public Long getDisplayOrder() {
return displayOrder;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.CategoryProductXref#setDisplayOrder(java.lang.Integer)
*/
public void setDisplayOrder(Long displayOrder) {
this.displayOrder = displayOrder;
}
/**
* @return
* @see org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl.CategoryProductXrefPK#getCategory()
*/
public Category getCategory() {
return categoryProductXref.getCategory();
}
/**
* @param category
* @see org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl.CategoryProductXrefPK#setCategory(org.broadleafcommerce.core.catalog.domain.Category)
*/
public void setCategory(Category category) {
categoryProductXref.setCategory(category);
}
/**
* @return
* @see org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl.CategoryProductXrefPK#getProduct()
*/
public Product getProduct() {
return categoryProductXref.getProduct();
}
/**
* @param product
* @see org.broadleafcommerce.core.catalog.domain.CategoryProductXrefImpl.CategoryProductXrefPK#setProduct(org.broadleafcommerce.core.catalog.domain.Product)
*/
public void setProduct(Product product) {
categoryProductXref.setProduct(product);
}
@Override
public boolean equals(Object o) {
if (o instanceof CategoryProductXrefImpl) {
CategoryProductXrefImpl that = (CategoryProductXrefImpl) o;
return new EqualsBuilder()
.append(categoryProductXref, that.categoryProductXref)
.build();
}
return false;
}
@Override
public int hashCode() {
int result = categoryProductXref != null ? categoryProductXref.hashCode() : 0;
return result;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_CategoryProductXrefImpl.java
|
84 |
class CollectParametersProposal implements ICompletionProposal,
ICompletionProposalExtension6 {
private final CeylonEditor editor;
CollectParametersProposal(CeylonEditor editor) {
this.editor = editor;
}
@Override
public Point getSelection(IDocument doc) {
return null;
}
@Override
public Image getImage() {
return COMPOSITE_CHANGE;
}
@Override
public String getDisplayString() {
return "Collect selected parameters into new class";
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument doc) {
new CollectParametersRefactoringAction(editor).run();
}
@Override
public StyledString getStyledDisplayString() {
return Highlights.styleProposal(getDisplayString(), false);
}
public static void add(Collection<ICompletionProposal> proposals,
CeylonEditor editor) {
CollectParametersRefactoring cpr = new CollectParametersRefactoring(editor);
if (cpr.isEnabled()) {
proposals.add(new CollectParametersProposal(editor));
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_CollectParametersProposal.java
|
967 |
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
listener.onResponse(newResponse(request, responses));
}
});
| 0true
|
src_main_java_org_elasticsearch_action_support_nodes_TransportNodesOperationAction.java
|
98 |
public class ODirectMemoryViolationException extends OException {
public ODirectMemoryViolationException(String message) {
super(message);
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_directmemory_ODirectMemoryViolationException.java
|
73 |
public interface TransactionBuilder {
/**
* Makes the transaction read only. Any writes will cause an exception.
* Read-only transactions do not have to maintain certain data structures and can hence be more efficient.
*
* @return
*/
public TransactionBuilder readOnly();
/**
* Enabling batch loading disables a number of consistency checks inside Titan to speed up the ingestion of
* data under the assumptions that inconsistencies are resolved prior to loading.
*
* @return
*/
public TransactionBuilder enableBatchLoading();
/**
* Disables batch loading by ensuring that consistency checks are applied in this transaction. This allows
* an individual transaction to use consistency checks when the graph as a whole is configured to not use them,
* which is useful when defining schema elements in a graph with batch-loading enabled.
*
* @return
*/
public TransactionBuilder disableBatchLoading();
/**
* Configures the size of the internal caches used in the transaction.
*
* @param size
* @return
*/
public TransactionBuilder setVertexCacheSize(int size);
/**
* Configures the initial size of the map of modified vertices held by this
* transaction. This is a performance hint, not a hard upper bound. The map
* will grow if the transaction ends up modifying more vertices than
* expected.
*
* @param size initial size of the transaction's dirty vertex collection
* @return
*/
public TransactionBuilder setDirtyVertexSize(int size);
/**
* Enables/disables checks that verify that each vertex actually exists in the underlying data store when it is retrieved.
* This might be useful to address common data degradation issues but has adverse impacts on performance due to
* repeated existence checks.
* <p/>
* Note, that these checks apply to vertex retrievals inside the query execution engine and not to vertex ids provided
* by the user.
*
* @param enabled
* @return
*/
public TransactionBuilder checkInternalVertexExistence(boolean enabled);
/**
* Enables/disables checking whether the vertex with a user provided id indeed exists. If the user is absolutely sure
* that the vertices for the ids provided in this transaction exist in the underlying data store, then disabling the
* vertex existence check will improve performance because it eliminates a database call.
* However, if a provided vertex id does not exist in the database and checking is disabled, Titan will assume it
* exists which can lead to data and query inconsistencies.
*
* @param enabled
* @return
*/
public TransactionBuilder checkExternalVertexExistence(boolean enabled);
/**
* Enables/disables consistency checking and locking for this transaction. Disabling consistency checks improves
* performance but requires that the user ensures consistency at the application level. Use with great care.
*
* @param enabled
* @return
*/
public TransactionBuilder consistencyChecks(boolean enabled);
/**
* Sets the timestamp for this transaction. The transaction will be recorded
* with this timestamp in those storage backends where the timestamp is
* recorded.
*
* @param timestampSinceEpoch
* number of units elapsed since the UNIX Epoch, that is,
* 00:00:00 UTC, Thursday, 1 January 1970
* @param unit
* units of the {@code timestampSinceEpoch argument}
* @return
*/
public TransactionBuilder setCommitTime(long timestampSinceEpoch, TimeUnit unit);
/**
* Sets the group name for this transaction which provides a way for gathering
* reporting on multiple transactions into one group.
*
* By setting a group one enables Metrics for this transaction, and defines what string
* should start the transaction's metric names.
* <p>
* If null, Metrics collection is totally disabled for this transaction.
* <p>
* If empty, Metrics collection is enabled, but there will be no prefix.
* Where the default setting would generate metrics names in the form
* "prefix.x.y.z", this transaction will instead use metric names in the
* form "x.y.z".
* <p>
* If nonempty, Metrics collection is enabled and the prefix will be used
* for all of this transaction's measurements.
* <p>
* Note: setting this to a non-null value only partially overrides
* {@link GraphDatabaseConfiguration#BASIC_METRICS} = false in the graph
* database configuration. When Metrics are disabled at the graph level and
* enabled at the transaction level, storage backend timings and counters
* will remain disabled.
* <p>
* The default value is
* {@link GraphDatabaseConfiguration#METRICS_PREFIX_DEFAULT}.
*
* Sets the name prefix used for Metrics recorded by this transaction. If
* metrics is enabled via {@link GraphDatabaseConfiguration#BASIC_METRICS},
* this string will be prepended to all Titan metric names.
*
* @param name
* Metric name prefix for this transaction
* @return
*/
public TransactionBuilder setGroupName(String name);
/**
* Name of the log to be used for logging the mutations in this transaction. If no log identifier is set,
* then this transaction will not be logged.
*
* @param logName
* @return
*/
public TransactionBuilder setLogIdentifier(String logName);
/**
* Configures this transaction such that queries against partitioned vertices are
* restricted to the given partitions.
*
* @param partitions
* @return
*/
public TransactionBuilder setRestrictedPartitions(int[] partitions);
/**
* Configures a custom option on this transaction which will be passed through to the storage and indexing backends.
* @param k
* @param v
* @return
*/
public TransactionBuilder setCustomOption(String k, Object v);
/**
* Starts and returns the transaction build by this builder
*
* @return A new transaction configured according to this builder
*/
public TitanTransaction start();
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TransactionBuilder.java
|
770 |
public class IndexRequestBuilder extends ShardReplicationOperationRequestBuilder<IndexRequest, IndexResponse, IndexRequestBuilder> {
public IndexRequestBuilder(Client client) {
super((InternalClient) client, new IndexRequest());
}
public IndexRequestBuilder(Client client, @Nullable String index) {
super((InternalClient) client, new IndexRequest(index));
}
/**
* Sets the type to index the document to.
*/
public IndexRequestBuilder setType(String type) {
request.type(type);
return this;
}
/**
* Sets the id to index the document under. Optional, and if not set, one will be automatically
* generated.
*/
public IndexRequestBuilder setId(String id) {
request.id(id);
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
public IndexRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* Sets the parent id of this document. If routing is not set, automatically set it as the
* routing as well.
*/
public IndexRequestBuilder setParent(String parent) {
request.parent(parent);
return this;
}
/**
* Sets the source.
*/
public IndexRequestBuilder setSource(BytesReference source, boolean unsafe) {
request.source(source, unsafe);
return this;
}
/**
* Sets the source.
*/
public IndexRequestBuilder setSource(BytesReference source) {
request.source(source, false);
return this;
}
/**
* Index the Map as a JSON.
*
* @param source The map to index
*/
public IndexRequestBuilder setSource(Map<String, Object> source) {
request.source(source);
return this;
}
/**
* Index the Map as the provided content type.
*
* @param source The map to index
*/
public IndexRequestBuilder setSource(Map<String, Object> source, XContentType contentType) {
request.source(source, contentType);
return this;
}
/**
* Sets the document source to index.
* <p/>
* <p>Note, its preferable to either set it using {@link #setSource(org.elasticsearch.common.xcontent.XContentBuilder)}
* or using the {@link #setSource(byte[])}.
*/
public IndexRequestBuilder setSource(String source) {
request.source(source);
return this;
}
/**
* Sets the content source to index.
*/
public IndexRequestBuilder setSource(XContentBuilder sourceBuilder) {
request.source(sourceBuilder);
return this;
}
/**
* Sets the document to index in bytes form.
*/
public IndexRequestBuilder setSource(byte[] source) {
request.source(source);
return this;
}
/**
* Sets the document to index in bytes form (assumed to be safe to be used from different
* threads).
*
* @param source The source to index
* @param offset The offset in the byte array
* @param length The length of the data
*/
public IndexRequestBuilder setSource(byte[] source, int offset, int length) {
request.source(source, offset, length);
return this;
}
/**
* Sets the document to index in bytes form.
*
* @param source The source to index
* @param offset The offset in the byte array
* @param length The length of the data
* @param unsafe Is the byte array safe to be used form a different thread
*/
public IndexRequestBuilder setSource(byte[] source, int offset, int length, boolean unsafe) {
request.source(source, offset, length, unsafe);
return this;
}
/**
* Constructs a simple document with a field and a value.
*/
public IndexRequestBuilder setSource(String field1, Object value1) {
request.source(field1, value1);
return this;
}
/**
* Constructs a simple document with a field and value pairs.
*/
public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2) {
request.source(field1, value1, field2, value2);
return this;
}
/**
* Constructs a simple document with a field and value pairs.
*/
public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2, String field3, Object value3) {
request.source(field1, value1, field2, value2, field3, value3);
return this;
}
/**
* Constructs a simple document with a field and value pairs.
*/
public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2, String field3, Object value3, String field4, Object value4) {
request.source(field1, value1, field2, value2, field3, value3, field4, value4);
return this;
}
/**
* Constructs a simple document with a field name and value pairs.
* <b>Note: the number of objects passed to this method must be and even number.</b>
*/
public IndexRequestBuilder setSource(Object... source) {
request.source(source);
return this;
}
/**
* The content type that will be used to generate a document from user provided objects (like Map).
*/
public IndexRequestBuilder setContentType(XContentType contentType) {
request.contentType(contentType);
return this;
}
/**
* Sets the type of operation to perform.
*/
public IndexRequestBuilder setOpType(IndexRequest.OpType opType) {
request.opType(opType);
return this;
}
/**
* Sets a string representation of the {@link #setOpType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can
* be either "index" or "create".
*/
public IndexRequestBuilder setOpType(String opType) {
request.opType(opType);
return this;
}
/**
* Set to <tt>true</tt> to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
*/
public IndexRequestBuilder setCreate(boolean create) {
request.create(create);
return this;
}
/**
* Should a refresh be executed post this index operation causing the operation to
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public IndexRequestBuilder setRefresh(boolean refresh) {
request.refresh(refresh);
return this;
}
/**
* Set the replication type for this operation.
*/
public IndexRequestBuilder setReplicationType(ReplicationType replicationType) {
request.replicationType(replicationType);
return this;
}
/**
* Sets the consistency level. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}.
*/
public IndexRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
request.consistencyLevel(consistencyLevel);
return this;
}
/**
* Set the replication type for this operation.
*/
public IndexRequestBuilder setReplicationType(String replicationType) {
request.replicationType(replicationType);
return this;
}
/**
* Sets the version, which will cause the index operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public IndexRequestBuilder setVersion(long version) {
request.version(version);
return this;
}
/**
* Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
*/
public IndexRequestBuilder setVersionType(VersionType versionType) {
request.versionType(versionType);
return this;
}
/**
* Sets the timestamp either as millis since the epoch, or, in the configured date format.
*/
public IndexRequestBuilder setTimestamp(String timestamp) {
request.timestamp(timestamp);
return this;
}
// Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise.
public IndexRequestBuilder setTTL(long ttl) {
request.ttl(ttl);
return this;
}
@Override
protected void doExecute(ActionListener<IndexResponse> listener) {
((Client) client).index(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_index_IndexRequestBuilder.java
|
605 |
public class TransportUpdateSettingsAction extends TransportMasterNodeOperationAction<UpdateSettingsRequest, UpdateSettingsResponse> {
private final MetaDataUpdateSettingsService updateSettingsService;
@Inject
public TransportUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
MetaDataUpdateSettingsService updateSettingsService) {
super(settings, transportService, clusterService, threadPool);
this.updateSettingsService = updateSettingsService;
}
@Override
protected String executor() {
// we go async right away....
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return UpdateSettingsAction.NAME;
}
@Override
protected UpdateSettingsRequest newRequest() {
return new UpdateSettingsRequest();
}
@Override
protected UpdateSettingsResponse newResponse() {
return new UpdateSettingsResponse();
}
@Override
protected void doExecute(UpdateSettingsRequest request, ActionListener<UpdateSettingsResponse> listener) {
request.indices(clusterService.state().metaData().concreteIndices(request.indices(), request.indicesOptions()));
super.doExecute(request, listener);
}
@Override
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) throws ElasticsearchException {
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
.indices(request.indices())
.settings(request.settings())
.ackTimeout(request.timeout())
.masterNodeTimeout(request.masterNodeTimeout());
updateSettingsService.updateSettings(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new UpdateSettingsResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
logger.debug("failed to update settings on indices [{}]", t, request.indices());
listener.onFailure(t);
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_settings_put_TransportUpdateSettingsAction.java
|
557 |
public class TQRestriction {
protected String expression;
protected String operation;
protected Object parameter;
protected Mode joinMode;
protected List<TQRestriction> restrictions = new ArrayList<TQRestriction>();
/**
* Creates a simple restriction. As there is no value associated, it is expected that the operation does not require
* a parameter value, such as IS NULL.
*
* @param expression
* @param operation
*/
public TQRestriction(String expression, String operation) {
this.expression = expression;
this.operation = operation;
}
/**
* Creates a simple restriction.
*
* @param expression
* @param operation
* @param parameter
*/
public TQRestriction(String expression, String operation, Object parameter) {
this(expression, operation);
this.parameter = parameter;
}
/**
* Creates an empty restriction node with the specified join mode. It is expected that this restriction would then
* have at least 2 items in the restrictions list.
*
* @param joinMode
*/
public TQRestriction(Mode joinMode) {
this.joinMode = joinMode;
}
/**
* Adds a child restriction to the restrictions list
*
* @param r
* @return this
*/
public TQRestriction addChildRestriction(TQRestriction r) {
restrictions.add(r);
return this;
}
/**
* Recursively generates a query string representation of this restriction along with any child restrictions
* that this object may have.
*
* It will also populate the paramMap for the appropriate values as it's iterating through the restrictions.
*
* @param parameterName
* @param paramMap
* @return the query language string
*/
public String toQl(String parameterName, Map<String, Object> paramMap) {
StringBuilder sb = new StringBuilder("(");
if (expression != null && operation != null) {
sb.append(expression).append(" ").append(operation);
if (parameter != null) {
sb.append(' ');
String pname = ':' + parameterName;
if (operation.equals("in")) {
pname = "(" + pname + ")";
}
sb.append(pname);
paramMap.put(parameterName, parameter);
}
}
if (CollectionUtils.isNotEmpty(restrictions)) {
for (int i = 0; i < restrictions.size(); i++) {
TQRestriction r = restrictions.get(i);
String internalParamName = parameterName + "_" + i;
sb.append(r.toQl(internalParamName, paramMap));
paramMap.put(internalParamName, r.parameter);
if (restrictions.size() - 1 != i) {
sb.append(joinMode == Mode.OR ? " OR " : " AND ");
}
}
}
return sb.append(")").toString();
}
public enum Mode {
OR, AND
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_util_dao_TQRestriction.java
|
268 |
public class NullEmailServiceImpl implements EmailService {
@Override
public boolean sendTemplateEmail(String emailAddress, EmailInfo emailInfo, HashMap<String, Object> props) {
return true;
}
@Override
public boolean sendTemplateEmail(EmailTarget emailTarget, EmailInfo emailInfo, HashMap<String, Object> props) {
return true;
}
@Override
public boolean sendBasicEmail(EmailInfo emailInfo, EmailTarget emailTarget, HashMap<String, Object> props) {
return true;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_NullEmailServiceImpl.java
|
541 |
public interface ORecordHook {
public enum DISTRIBUTED_EXECUTION_MODE {
TARGET_NODE, SOURCE_NODE, BOTH
}
public enum HOOK_POSITION {
FIRST, EARLY, REGULAR, LATE, LAST
}
public enum TYPE {
ANY, BEFORE_CREATE, BEFORE_READ, BEFORE_UPDATE, BEFORE_DELETE, AFTER_CREATE, AFTER_READ, AFTER_UPDATE, AFTER_DELETE, CREATE_FAILED, READ_FAILED, UPDATE_FAILED, DELETE_FAILED, CREATE_REPLICATED, READ_REPLICATED, UPDATE_REPLICATED, DELETE_REPLICATED, BEFORE_REPLICA_ADD, AFTER_REPLICA_ADD, BEFORE_REPLICA_UPDATE, AFTER_REPLICA_UPDATE, BEFORE_REPLICA_DELETE, AFTER_REPLICA_DELETE, REPLICA_ADD_FAILED, REPLICA_UPDATE_FAILED, REPLICA_DELETE_FAILED
}
public enum RESULT {
RECORD_NOT_CHANGED, RECORD_CHANGED, SKIP
}
public RESULT onTrigger(TYPE iType, ORecord<?> iRecord);
public DISTRIBUTED_EXECUTION_MODE getDistributedExecutionMode();
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_hook_ORecordHook.java
|
505 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(NightlyTest.class)
public class AtomicLongStableReadStressTest extends StressTestSupport {
public static final int CLIENT_THREAD_COUNT = 5;
public static final int REFERENCE_COUNT = 10 * 1000;
private HazelcastInstance client;
private IAtomicLong[] references;
private StressThread[] stressThreads;
@Before
public void setUp() {
super.setUp();
ClientConfig clientConfig = new ClientConfig();
clientConfig.setRedoOperation(true);
client = HazelcastClient.newHazelcastClient(clientConfig);
references = new IAtomicLong[REFERENCE_COUNT];
for (int k = 0; k < references.length; k++) {
references[k] = client.getAtomicLong("atomicreference:" + k);
}
stressThreads = new StressThread[CLIENT_THREAD_COUNT];
for (int k = 0; k < stressThreads.length; k++) {
stressThreads[k] = new StressThread();
stressThreads[k].start();
}
}
@After
public void tearDown() {
super.tearDown();
if (client != null) {
client.shutdown();
}
}
@Test
public void testChangingCluster() {
test(true);
}
@Test
public void testFixedCluster() {
test(false);
}
public void test(boolean clusterChangeEnabled) {
setClusterChangeEnabled(clusterChangeEnabled);
initializeReferences();
startAndWaitForTestCompletion();
joinAll(stressThreads);
}
private void initializeReferences() {
System.out.println("==================================================================");
System.out.println("Initializing references");
System.out.println("==================================================================");
for (int k = 0; k < references.length; k++) {
references[k].set(k);
}
System.out.println("==================================================================");
System.out.println("Completed with initializing references");
System.out.println("==================================================================");
}
public class StressThread extends TestThread {
@Override
public void doRun() throws Exception {
while (!isStopped()) {
int key = random.nextInt(REFERENCE_COUNT);
IAtomicLong reference = references[key];
long value = reference.get();
assertEquals(format("The value for atomic reference: %s was not consistent", reference), key, value);
}
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_stress_AtomicLongStableReadStressTest.java
|
1,102 |
public class OSQLFunctionLast extends OSQLFunctionConfigurableAbstract {
public static final String NAME = "last";
private Object last;
public OSQLFunctionLast() {
super(NAME, 1, 1);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters,
final OCommandContext iContext) {
Object value = iParameters[0];
if (value instanceof OSQLFilterItem)
value = ((OSQLFilterItem) value).getValue(iCurrentRecord, iContext);
if (OMultiValue.isMultiValue(value))
value = OMultiValue.getLastValue(value);
last = value;
return value;
}
public boolean aggregateResults() {
return configuredParameters.length == 1;
}
@Override
public Object getResult() {
return last;
}
@Override
public boolean filterResult() {
return true;
}
public String getSyntax() {
return "Syntax error: last(<field>)";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_coll_OSQLFunctionLast.java
|
901 |
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime), new SearchServiceListener<FirstResult>() {
@Override
public void onResult(FirstResult result) {
onFirstPhaseResult(shardIndex, shard, result, shardIt);
}
@Override
public void onFailure(Throwable t) {
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchTypeAction.java
|
579 |
private ThreadLocal<IndexTxSnapshot> txSnapshot = new ThreadLocal<IndexTxSnapshot>() {
@Override
protected IndexTxSnapshot initialValue() {
return new IndexTxSnapshot();
}
};
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexAbstract.java
|
758 |
static final class Fields {
static final XContentBuilderString DOCS = new XContentBuilderString("docs");
static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
static final XContentBuilderString _ID = new XContentBuilderString("_id");
static final XContentBuilderString ERROR = new XContentBuilderString("error");
}
| 0true
|
src_main_java_org_elasticsearch_action_get_MultiGetResponse.java
|
170 |
class ValueFunctionDefinitionGenerator extends DefinitionGenerator {
private final String brokenName;
private final MemberOrTypeExpression node;
private final CompilationUnit rootNode;
private final String desc;
private final Image image;
private final ProducedType returnType;
private final LinkedHashMap<String, ProducedType> parameters;
private final Boolean isVariable;
@Override
String getBrokenName() {
return brokenName;
}
@Override
ProducedType getReturnType() {
return returnType;
}
@Override
LinkedHashMap<String, ProducedType> getParameters() {
return parameters;
}
@Override
String getDescription() {
return desc;
}
@Override
Image getImage() {
return image;
}
@Override
Tree.CompilationUnit getRootNode() {
return rootNode;
}
@Override
Node getNode() {
return node;
}
private ValueFunctionDefinitionGenerator(String brokenName,
Tree.MemberOrTypeExpression node,
Tree.CompilationUnit rootNode,
String desc,
Image image,
ProducedType returnType,
LinkedHashMap<String, ProducedType> paramTypes,
Boolean isVariable) {
this.brokenName = brokenName;
this.node = node;
this.rootNode = rootNode;
this.desc = desc;
this.image = image;
this.returnType = returnType;
this.parameters = paramTypes;
this.isVariable = isVariable;
}
String generateShared(String indent, String delim) {
return "shared " + generate(indent, delim);
}
String generate(String indent, String delim) {
StringBuffer def = new StringBuffer();
boolean isVoid = returnType==null;
Unit unit = node.getUnit();
if (parameters!=null) {
List<TypeParameter> typeParams = new ArrayList<TypeParameter>();
StringBuilder typeParamDef = new StringBuilder();
StringBuilder typeParamConstDef = new StringBuilder();
appendTypeParams(typeParams, typeParamDef, typeParamConstDef, returnType);
appendTypeParams(typeParams, typeParamDef, typeParamConstDef, parameters.values());
if (typeParamDef.length() > 0) {
typeParamDef.insert(0, "<");
typeParamDef.setLength(typeParamDef.length() - 1);
typeParamDef.append(">");
}
if (isVoid) {
def.append("void");
}
else {
if (isTypeUnknown(returnType)) {
def.append("function");
}
else {
def.append(returnType.getProducedTypeName(unit));
}
}
def.append(" ")
.append(brokenName).append(typeParamDef);
appendParameters(parameters, def);
def.append(typeParamConstDef);
if (isVoid) {
def.append(" {}");
}
else {
//removed because it's ugly for parameters:
//delim + indent + defIndent + defIndent +
def.append(" => ")
.append(defaultValue(unit, returnType))
.append(";");
}
}
else {
if(isVariable){
def.append("variable ");
}
if (isVoid) {
def.append("Anything");
}
else {
if (isTypeUnknown(returnType)) {
def.append("value");
}
else {
def.append(returnType.getProducedTypeName(unit));
}
}
def.append(" ")
.append(brokenName)
.append(" = ")
.append(defaultValue(unit, returnType))
.append(";");
}
return def.toString();
}
Set<Declaration> getImports() {
Set<Declaration> imports = new HashSet<Declaration>();
importType(imports, returnType, rootNode);
if (parameters!=null) {
importTypes(imports, parameters.values(), rootNode);
}
return imports;
}
static ValueFunctionDefinitionGenerator create(String brokenName,
Tree.MemberOrTypeExpression node,
Tree.CompilationUnit rootNode) {
boolean isUpperCase = Character.isUpperCase(brokenName.charAt(0));
if (isUpperCase) return null;
FindValueFunctionVisitor fav = new FindValueFunctionVisitor(node);
rootNode.visit(fav);
ProducedType et = fav.expectedType;
final boolean isVoid = et==null;
ProducedType returnType = isVoid ? null : node.getUnit().denotableType(et);
StringBuilder params = new StringBuilder();
LinkedHashMap<String, ProducedType> paramTypes = getParameters(fav);
if (paramTypes!=null) {
String desc = "function '" + brokenName + params + "'";
return new ValueFunctionDefinitionGenerator(brokenName, node, rootNode,
desc, LOCAL_METHOD, returnType, paramTypes, null);
}
else {
String desc = "value '" + brokenName + "'";
return new ValueFunctionDefinitionGenerator(brokenName, node, rootNode,
desc, LOCAL_ATTRIBUTE, returnType, null, fav.isVariable);
}
}
private static class FindValueFunctionVisitor extends FindArgumentsVisitor{
boolean isVariable = false;
FindValueFunctionVisitor(MemberOrTypeExpression smte) {
super(smte);
}
@Override
public void visit(AssignmentOp that) {
isVariable = ((Tree.AssignmentOp) that).getLeftTerm() == smte;
super.visit(that);
}
@Override
public void visit(UnaryOperatorExpression that) {
isVariable = ((Tree.UnaryOperatorExpression) that).getTerm() == smte;
super.visit(that);
}
@Override
public void visit(SpecifierStatement that) {
isVariable = ((Tree.SpecifierStatement) that).getBaseMemberExpression() == smte;
super.visit(that);
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ValueFunctionDefinitionGenerator.java
|
266 |
public class ElasticsearchExceptionTests extends ElasticsearchTestCase {
@Test
public void testStatus() {
ElasticsearchException exception = new ElasticsearchException("test");
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
exception = new ElasticsearchException("test", new RuntimeException());
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
exception = new ElasticsearchException("test", new IndexMissingException(new Index("test")));
assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
exception = new RemoteTransportException("test", new IndexMissingException(new Index("test")));
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
}
}
| 0true
|
src_test_java_org_elasticsearch_ElasticsearchExceptionTests.java
|
309 |
new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
322 |
int pos = Arrays.binarySearch(explodedView, new MergeHandlerAdapter() {
@Override
public String getName() {
return parentName;
}
}, nameCompare);
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_MergeManager.java
|
3,246 |
abstract class DoubleValuesComparatorBase<T extends Number> extends NumberComparatorBase<T> {
protected final IndexNumericFieldData<?> indexFieldData;
protected final double missingValue;
protected double bottom;
protected DoubleValues readerValues;
protected final SortMode sortMode;
public DoubleValuesComparatorBase(IndexNumericFieldData<?> indexFieldData, double missingValue, SortMode sortMode) {
this.indexFieldData = indexFieldData;
this.missingValue = missingValue;
this.sortMode = sortMode;
}
@Override
public final int compareBottom(int doc) throws IOException {
final double v2 = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(bottom, v2);
}
@Override
public final int compareDocToValue(int doc, T valueObj) throws IOException {
final double value = valueObj.doubleValue();
final double docValue = sortMode.getRelevantValue(readerValues, doc, missingValue);
return compare(docValue, value);
}
@Override
public final FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
readerValues = indexFieldData.load(context).getDoubleValues();
return this;
}
@Override
public int compareBottomMissing() {
return compare(bottom, missingValue);
}
static final int compare(double left, double right) {
return Double.compare(left, right);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_DoubleValuesComparatorBase.java
|
5,088 |
transportService.sendRequest(node, SearchDfsTransportHandler.ACTION, request, new BaseTransportResponseHandler<DfsSearchResult>() {
@Override
public DfsSearchResult newInstance() {
return new DfsSearchResult();
}
@Override
public void handleResponse(DfsSearchResult response) {
listener.onResult(response);
}
@Override
public void handleException(TransportException exp) {
listener.onFailure(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
| 1no label
|
src_main_java_org_elasticsearch_search_action_SearchServiceTransportAction.java
|
1,228 |
THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return threadLocal(dequeFactory(c, limit / estimatedThreadPoolSize));
}
},
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_PageCacheRecycler.java
|
1,371 |
public static class Builder {
private Set<ClusterBlock> global = Sets.newHashSet();
private Map<String, Set<ClusterBlock>> indices = Maps.newHashMap();
public Builder() {
}
public Builder blocks(ClusterBlocks blocks) {
global.addAll(blocks.global());
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks.indices().entrySet()) {
if (!indices.containsKey(entry.getKey())) {
indices.put(entry.getKey(), Sets.<ClusterBlock>newHashSet());
}
indices.get(entry.getKey()).addAll(entry.getValue());
}
return this;
}
public Builder addBlocks(IndexMetaData indexMetaData) {
if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
addIndexBlock(indexMetaData.index(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_WRITE_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_METADATA_BLOCK);
}
return this;
}
public Builder addGlobalBlock(ClusterBlock block) {
global.add(block);
return this;
}
public Builder removeGlobalBlock(ClusterBlock block) {
global.remove(block);
return this;
}
public Builder addIndexBlock(String index, ClusterBlock block) {
if (!indices.containsKey(index)) {
indices.put(index, Sets.<ClusterBlock>newHashSet());
}
indices.get(index).add(block);
return this;
}
public Builder removeIndexBlocks(String index) {
if (!indices.containsKey(index)) {
return this;
}
indices.remove(index);
return this;
}
public Builder removeIndexBlock(String index, ClusterBlock block) {
if (!indices.containsKey(index)) {
return this;
}
indices.get(index).remove(block);
if (indices.get(index).isEmpty()) {
indices.remove(index);
}
return this;
}
public ClusterBlocks build() {
ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
for (Map.Entry<String, Set<ClusterBlock>> entry : indices.entrySet()) {
indicesBuilder.put(entry.getKey(), ImmutableSet.copyOf(entry.getValue()));
}
return new ClusterBlocks(ImmutableSet.copyOf(global), indicesBuilder.build());
}
public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException {
ImmutableSet<ClusterBlock> global = readBlockSet(in);
ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
int size = in.readVInt();
for (int j = 0; j < size; j++) {
indicesBuilder.put(in.readString().intern(), readBlockSet(in));
}
return new ClusterBlocks(global, indicesBuilder.build());
}
public static void writeClusterBlocks(ClusterBlocks blocks, StreamOutput out) throws IOException {
writeBlockSet(blocks.global(), out);
out.writeVInt(blocks.indices().size());
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks.indices().entrySet()) {
out.writeString(entry.getKey());
writeBlockSet(entry.getValue(), out);
}
}
private static void writeBlockSet(ImmutableSet<ClusterBlock> blocks, StreamOutput out) throws IOException {
out.writeVInt(blocks.size());
for (ClusterBlock block : blocks) {
block.writeTo(out);
}
}
private static ImmutableSet<ClusterBlock> readBlockSet(StreamInput in) throws IOException {
ImmutableSet.Builder<ClusterBlock> builder = ImmutableSet.builder();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.add(ClusterBlock.readClusterBlock(in));
}
return builder.build();
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_block_ClusterBlocks.java
|
1,320 |
public class FieldType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, FieldType> TYPES = new LinkedHashMap<String, FieldType>();
public static final FieldType ID = new FieldType("id", "ID");
public static final FieldType CATEGORY = new FieldType("category", "Category");
public static final FieldType INT = new FieldType("i", "Integer");
public static final FieldType INTS = new FieldType("is", "Integer (Multi)");
public static final FieldType STRING = new FieldType("s", "String");
public static final FieldType STRINGS = new FieldType("ss", "String (Multi)");
public static final FieldType LONG = new FieldType("l", "Long");
public static final FieldType LONGS = new FieldType("ls", "Long (Multi)");
public static final FieldType TEXT = new FieldType("t", "Text");
public static final FieldType TEXTS = new FieldType("txt", "Text (Multi)");
public static final FieldType BOOLEAN = new FieldType("b", "Boolean");
public static final FieldType BOOLEANS = new FieldType("bs", "Boolean (Multi)");
public static final FieldType DOUBLE = new FieldType("d", "Double");
public static final FieldType DOUBLES = new FieldType("ds", "Double (Multi)");
public static final FieldType PRICE = new FieldType("p", "Price");
public static final FieldType DATE = new FieldType("dt", "Date");
public static final FieldType DATES = new FieldType("dts", "Date (Multi)");
public static final FieldType TRIEINT = new FieldType("ti", "Trie Integer");
public static final FieldType TRIELONG = new FieldType("tl", "Trie Long");
public static final FieldType TRIEDOUBLE = new FieldType("td", "Trie Double");
public static final FieldType TRIEDATE = new FieldType("tdt", "Trie Date");
public static FieldType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public FieldType() {
//do nothing
}
public FieldType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
@Override
public String getType() {
return type;
}
@Override
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
FieldType other = (FieldType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_domain_solr_FieldType.java
|
5,083 |
transportService.sendRequest(node, SearchFreeContextTransportHandler.ACTION, new SearchFreeContextRequest(request, contextId), new TransportResponseHandler<TransportResponse>() {
@Override
public TransportResponse newInstance() {
return TransportResponse.Empty.INSTANCE;
}
@Override
public void handleResponse(TransportResponse response) {
actionListener.onResponse(true);
}
@Override
public void handleException(TransportException exp) {
actionListener.onFailure(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
| 1no label
|
src_main_java_org_elasticsearch_search_action_SearchServiceTransportAction.java
|
110 |
private class TxHook implements javax.transaction.Synchronization
{
boolean gotBefore = false;
boolean gotAfter = false;
int statusBefore = -1;
int statusAfter = -1;
Transaction txBefore = null;
Transaction txAfter = null;
public void beforeCompletion()
{
try
{
statusBefore = tm.getStatus();
txBefore = tm.getTransaction();
gotBefore = true;
}
catch ( Exception e )
{
throw new RuntimeException( "" + e );
}
}
public void afterCompletion( int status )
{
try
{
statusAfter = status;
txAfter = tm.getTransaction();
assertTrue( status == tm.getStatus() );
gotAfter = true;
}
catch ( Exception e )
{
throw new RuntimeException( "" + e );
}
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestJtaCompliance.java
|
113 |
{
@Override
public Object doWork( Void state )
{
try
{
tm.begin();
tm.getTransaction().registerSynchronization( hook );
return null;
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
}
};
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestJtaCompliance.java
|
297 |
public interface RootCauseAccessor {
public Throwable getRootCause();
public String getRootCauseMessage();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_exception_RootCauseAccessor.java
|
312 |
static final class Fields {
static final XContentBuilderString STATUS = new XContentBuilderString("status");
static final XContentBuilderString NUMBER_OF_SHARDS = new XContentBuilderString("number_of_shards");
static final XContentBuilderString NUMBER_OF_REPLICAS = new XContentBuilderString("number_of_replicas");
static final XContentBuilderString ACTIVE_PRIMARY_SHARDS = new XContentBuilderString("active_primary_shards");
static final XContentBuilderString ACTIVE_SHARDS = new XContentBuilderString("active_shards");
static final XContentBuilderString RELOCATING_SHARDS = new XContentBuilderString("relocating_shards");
static final XContentBuilderString INITIALIZING_SHARDS = new XContentBuilderString("initializing_shards");
static final XContentBuilderString UNASSIGNED_SHARDS = new XContentBuilderString("unassigned_shards");
static final XContentBuilderString VALIDATION_FAILURES = new XContentBuilderString("validation_failures");
static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
static final XContentBuilderString PRIMARY_ACTIVE = new XContentBuilderString("primary_active");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterIndexHealth.java
|
356 |
public class NodesStatsAction extends ClusterAction<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
public static final NodesStatsAction INSTANCE = new NodesStatsAction();
public static final String NAME = "cluster/nodes/stats";
private NodesStatsAction() {
super(NAME);
}
@Override
public NodesStatsResponse newResponse() {
return new NodesStatsResponse();
}
@Override
public NodesStatsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new NodesStatsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_stats_NodesStatsAction.java
|
37 |
public class TitanSpecificBlueprintsTestSuite extends TestSuite {
public TitanSpecificBlueprintsTestSuite(final GraphTest graphTest) {
super(graphTest);
}
public void testVertexReattachment() {
TransactionalGraph graph = (TransactionalGraph) graphTest.generateGraph();
Vertex a = graph.addVertex(null);
Vertex b = graph.addVertex(null);
Edge e = graph.addEdge(null, a, b, "friend");
graph.commit();
a = graph.getVertex(a);
Assert.assertNotNull(a);
Assert.assertEquals(1, BaseTest.count(a.getVertices(Direction.OUT)));
graph.shutdown();
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_blueprints_TitanSpecificBlueprintsTestSuite.java
|
87 |
class ConvertIfElseToThenElse extends CorrectionProposal {
ConvertIfElseToThenElse(int offset, TextChange change) {
super("Convert to then-else", change, new Region(offset, 0));
}
static void addConvertToThenElseProposal(CompilationUnit cu, IDocument doc,
Collection<ICompletionProposal> proposals, IFile file,
Statement statement) {
TextChange change = createTextChange(cu, doc, statement, file);
if (change != null) {
proposals.add(new ConvertIfElseToThenElse(change.getEdit().getOffset(), change));
}
}
static TextChange createTextChange(CompilationUnit cu,
IDocument doc, Statement statement, IFile file) {
if (! (statement instanceof Tree.IfStatement)) {
return null;
}
IfStatement ifStmt = (IfStatement) statement;
if (ifStmt.getElseClause() == null) {
return null;
}
Block ifBlock = ifStmt.getIfClause().getBlock();
if (ifBlock.getStatements().size() != 1) {
return null;
}
Block elseBlock = ifStmt.getElseClause().getBlock();
if (elseBlock.getStatements().size() != 1) {
return null;
}
Node ifBlockNode = ifBlock.getStatements().get(0);
Node elseBlockNode = elseBlock.getStatements().get(0);
List<Condition> conditions = ifStmt.getIfClause()
.getConditionList().getConditions();
if (conditions.size()!=1) {
return null;
}
Condition condition = conditions.get(0);
Integer replaceFrom = statement.getStartIndex();
String test = removeEnclosingParentesis(getTerm(doc, condition));
String thenStr = null;
String elseStr = null;
String attributeIdentifier = null;
String operator = null;
String action;
if (ifBlockNode instanceof Tree.Return) {
Tree.Return ifReturn = (Tree.Return) ifBlockNode;
if (! (elseBlockNode instanceof Tree.Return)) {
return null;
}
Tree.Return elseReturn = (Tree.Return) elseBlockNode;
action = "return ";
thenStr = getOperands(doc, ifReturn.getExpression());
elseStr = getOperands(doc, elseReturn.getExpression());
} else if (ifBlockNode instanceof Tree.SpecifierStatement) {
SpecifierStatement ifSpecifierStmt = (Tree.SpecifierStatement) ifBlockNode;
attributeIdentifier = getTerm(doc, ifSpecifierStmt.getBaseMemberExpression());
operator = " = ";
action = attributeIdentifier + operator;
if (!(elseBlockNode instanceof Tree.SpecifierStatement)) {
return null;
}
String elseId = getTerm(doc, ((Tree.SpecifierStatement)elseBlockNode).getBaseMemberExpression());
if (!attributeIdentifier.equals(elseId)) {
return null;
}
thenStr = getOperands(doc, ifSpecifierStmt.getSpecifierExpression().getExpression().getTerm());
elseStr = getOperands(doc, ((Tree.SpecifierStatement) elseBlockNode).getSpecifierExpression().getExpression().getTerm());
} else if (ifBlockNode instanceof Tree.ExpressionStatement) {
if (!(elseBlockNode instanceof Tree.ExpressionStatement)) {
return null;
}
Term ifOperator = ((Tree.ExpressionStatement) ifBlockNode).getExpression().getTerm();
if (!(ifOperator instanceof AssignOp)) {
return null;
}
Term elseOperator = ((Tree.ExpressionStatement) elseBlockNode).getExpression().getTerm();
if (!(elseOperator instanceof AssignOp)) {
return null;
}
AssignOp ifAssign = (AssignOp) ifOperator;
AssignOp elseAssign = (AssignOp) elseOperator;
attributeIdentifier = getTerm(doc, ifAssign.getLeftTerm());
String elseId = getTerm(doc, elseAssign.getLeftTerm());
if (!attributeIdentifier.equals(elseId)) {
return null;
}
operator = " = ";
action = attributeIdentifier + operator;
thenStr = getOperands(doc, ifAssign.getRightTerm());
elseStr = getOperands(doc, elseAssign.getRightTerm());
} else {
return null;
}
if (attributeIdentifier != null) {
Statement prevStatement = findPreviousStatement(cu, doc, statement);
if (prevStatement != null) {
if (prevStatement instanceof AttributeDeclaration) {
AttributeDeclaration attrDecl = (AttributeDeclaration) prevStatement;
if (attributeIdentifier.equals(getTerm(doc, attrDecl.getIdentifier()))) {
action = removeSemiColon(getTerm(doc, attrDecl)) + operator;
replaceFrom = attrDecl.getStartIndex();
}
}
}
}
if (condition instanceof Tree.ExistsCondition) {
Tree.ExistsCondition existsCond = (Tree.ExistsCondition) condition;
Variable variable = existsCond.getVariable();
if (thenStr.equals(getTerm(doc, variable.getIdentifier()))) {
Expression existsExpr = variable.getSpecifierExpression().getExpression();
test = getTerm(doc, existsExpr);
thenStr = null;
} else {
return null; //Disabling because type narrowing does not work with then.
}
} else if (! (condition instanceof Tree.BooleanCondition)) {
return null; //Disabling because type narrowing does not work with then.
} else if (((BooleanCondition)condition).getExpression().getTerm() instanceof IsOp){
return null; //Disabling because type narrowing does not work with then.
}
StringBuilder replace = new StringBuilder();
replace.append(action).append(test);
if (thenStr != null) {
replace.append(" then ").append(thenStr);
}
if (!elseStr.equals("null")) {
replace.append(" else ").append(elseStr);
}
replace.append(";");
TextChange change = new TextFileChange("Convert to then-else", file);
// TextChange change = new DocumentChange("Convert to then-else", doc);
change.setEdit(new ReplaceEdit(replaceFrom,
statement.getStopIndex() - replaceFrom + 1,
replace.toString()));
return change;
}
private static String getOperands(IDocument doc, Term operand) {
String term = getTerm(doc, operand);
if (hasLowerPrecedenceThenElse(operand)) {
return "(" + term + ")";
}
return term;
}
private static boolean hasLowerPrecedenceThenElse(Term operand) {
Term node;
if (operand instanceof Tree.Expression) {
Tree.Expression exp = (Tree.Expression) operand;
node = exp.getTerm();
} else {
node = operand;
}
return node instanceof Tree.DefaultOp ||
node instanceof ThenOp ||
node instanceof AssignOp;
}
private static String removeSemiColon(String term) {
if (term.endsWith(";")) {
return term.substring(0, term.length() - 1);
}
return term;
}
private static Statement findPreviousStatement(CompilationUnit cu, IDocument doc,
Statement statement) {
try {
int previousLineNo = doc.getLineOfOffset(statement.getStartIndex());
while (previousLineNo > 1) {
previousLineNo--;
IRegion lineInfo = doc.getLineInformation(previousLineNo);
String prevLine = doc.get(lineInfo.getOffset(), lineInfo.getLength());
Matcher m = Pattern.compile("(\\s*)\\w+").matcher(prevLine);
if (m.find()) {
int whitespaceLen = m.group(1).length();
Node node = Nodes.findNode(cu, lineInfo.getOffset() + whitespaceLen,
lineInfo.getOffset() + whitespaceLen + 1);
return Nodes.findStatement(cu, node);
}
}
} catch (BadLocationException e) {
e.printStackTrace();
}
return null;
}
private static String removeEnclosingParentesis(String s) {
if (s.charAt(0) == '(' && s.charAt(s.length() - 1) == ')') {
return s.substring(1, s.length() - 1);
}
return s;
}
private static String getTerm(IDocument doc, Node node) {
try {
return doc.get(node.getStartIndex(),
node.getStopIndex() - node.getStartIndex() + 1);
} catch (BadLocationException e) {
throw new RuntimeException(e);
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ConvertIfElseToThenElse.java
|
457 |
public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder<PendingClusterTasksRequest, PendingClusterTasksResponse, PendingClusterTasksRequestBuilder> {
public PendingClusterTasksRequestBuilder(ClusterAdminClient client) {
super((InternalClusterAdminClient) client, new PendingClusterTasksRequest());
}
@Override
protected void doExecute(ActionListener<PendingClusterTasksResponse> listener) {
((InternalClusterAdminClient) client).pendingClusterTasks(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_tasks_PendingClusterTasksRequestBuilder.java
|
560 |
public class RandomLB extends AbstractLoadBalancer {
private final Random random = new Random();
@Override
public Member next() {
Member[] members = getMembers();
if (members == null || members.length == 0) {
return null;
}
int index = random.nextInt(members.length);
return members[index];
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_util_RandomLB.java
|
445 |
String id = queue.addItemListener(new ItemListener() {
public void itemAdded(ItemEvent itemEvent) {
itemAddedLatch.countDown();
}
public void itemRemoved(ItemEvent item) {
itemRemovedLatch.countDown();
}
}, true);
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_queue_ClientQueueTest.java
|
330 |
map.addEntryListener(new EntryAdapter<String, String>() {
public void entryEvicted(EntryEvent<String, String> event) {
latch.countDown();
}
}, true);
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
1,505 |
public class DeadNodesAllocationTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(DeadNodesAllocationTests.class);
@Test
public void simpleDeadNodeOnStartedPrimaryShard() {
AllocationService allocation = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("--> building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting primaries
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting replicas
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
logger.info("--> verifying all is allocated");
assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
logger.info("--> fail node with primary");
String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode(nodeIdRemaining))
).build();
rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).primary(), equalTo(true));
assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).state(), equalTo(STARTED));
}
@Test
public void deadNodeWhileRelocatingOnToNode() {
AllocationService allocation = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("--> building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting primaries
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting replicas
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
logger.info("--> verifying all is allocated");
assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
logger.info("--> adding additional node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
).build();
rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
logger.info("--> moving primary shard to node3");
rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
);
assertThat(rerouteResult.changed(), equalTo(true));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
logger.info("--> fail primary shard recovering instance on node3 being initialized by killing node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode(origPrimaryNodeId))
.put(newNode(origReplicaNodeId))
).build();
rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
}
@Test
public void deadNodeWhileRelocatingOnFromNode() {
AllocationService allocation = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.build());
logger.info("--> building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting primaries
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
// starting replicas
rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
logger.info("--> verifying all is allocated");
assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
logger.info("--> adding additional node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
).build();
rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
logger.info("--> moving primary shard to node3");
rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
);
assertThat(rerouteResult.changed(), equalTo(true));
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
logger.info("--> fail primary shard recovering instance on 'origPrimaryNodeId' being relocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node3"))
.put(newNode(origReplicaNodeId))
).build();
rerouteResult = allocation.reroute(clusterState);
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
}
}
| 0true
|
src_test_java_org_elasticsearch_cluster_routing_allocation_DeadNodesAllocationTests.java
|
664 |
public interface SkuDao {
/**
* Retrieve a {@code Sku} instance by its primary key
*
* @param skuId the primary key of the sku
* @return the sku at the primary key
*/
public Sku readSkuById(Long skuId);
/**
* Persist a {@code Sku} instance to the datastore
*
* @param sku the sku to persist
* @return the saved state of the passed in sku
*/
public Sku save(Sku sku);
public SkuFee saveSkuFee(SkuFee fee);
/**
* Retrieve the {@code Sku} instance whose primary key is the smallest
* of all skus in the datastore
*
* @return the sku with the smallest primary key
*/
public Sku readFirstSku();
/**
* Retrieve all {@code Sku} instances from the datastore
*
* @return the list of all skus
*/
public List<Sku> readAllSkus();
/**
* Find all the {@code Sku} instances whose primary key matches
* one of the values from the passed in list
*
* @param ids the list of primary key values
* @return the list of skus that match the list of primary key values
*/
public List<Sku> readSkusById(List<Long> ids);
/**
* Remove the {@code Sku} instance from the datastore
*
* @param sku the sku to remove
*/
public void delete(Sku sku);
/**
* Create a new {@code Sku} instance. The system will use the configuration in
* {@code /BroadleafCommerce/core/BroadleafCommerceFramework/src/main/resources/bl-framework-applicationContext-entity.xml}
* to determine which polymorphic version of {@code Sku} to instantiate. To make Broadleaf instantiate your
* extension of {@code Sku} by default, include an entity configuration bean in your application context xml similar to:
* <p>
* {@code
* <bean id="blEntityConfiguration" class="org.broadleafcommerce.common.persistence.EntityConfiguration">
* <property name="entityContexts">
* <list>
* <value>classpath:myCompany-applicationContext-entity.xml</value>
* </list>
* </property>
* </bean>
* }
* </p>
* Declare the same key for your desired entity in your entity xml that is used in the Broadleaf entity xml, but change the value to the fully
* qualified classname of your entity extension.
*
* @return a {@code Sku} instance based on the Broadleaf entity configuration.
*/
public Sku create();
}
| 0true
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_dao_SkuDao.java
|
3,859 |
public class HasChildQueryParser implements QueryParser {
public static final String NAME = "has_child";
@Inject
public HasChildQueryParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query innerQuery = null;
boolean queryFound = false;
float boost = 1.0f;
String childType = null;
ScoreType scoreType = null;
int shortCircuitParentDocSet = 8192;
String queryName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
// TODO we need to set the type, but, `query` can come before `type`... (see HasChildFilterParser)
// since we switch types, make sure we change the context
String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
try {
innerQuery = parseContext.parseInnerQuery();
queryFound = true;
} finally {
QueryParseContext.setTypes(origTypes);
}
} else {
throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) {
childType = parser.text();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_child] query has been removed, use a filter as a facet_filter in the relevant global facet");
} else if ("score_type".equals(currentFieldName) || "scoreType".equals(currentFieldName)) {
String scoreTypeValue = parser.text();
if (!"none".equals(scoreTypeValue)) {
scoreType = ScoreType.fromString(scoreTypeValue);
}
} else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
String scoreModeValue = parser.text();
if (!"none".equals(scoreModeValue)) {
scoreType = ScoreType.fromString(scoreModeValue);
}
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("short_circuit_cutoff".equals(currentFieldName)) {
shortCircuitParentDocSet = parser.intValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound) {
throw new QueryParsingException(parseContext.index(), "[has_child] requires 'query' field");
}
if (innerQuery == null) {
return null;
}
if (childType == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] requires 'type' field");
}
innerQuery.setBoost(boost);
DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
if (childDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] No mapping for for type [" + childType + "]");
}
if (!childDocMapper.parentFieldMapper().active()) {
throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] does not have parent mapping");
}
String parentType = childDocMapper.parentFieldMapper().type();
DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
if (parentDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]");
}
Filter nonNestedDocsFilter = null;
if (parentDocMapper.hasNestedObjects()) {
nonNestedDocsFilter = parseContext.cacheFilter(NonNestedDocsFilter.INSTANCE, null);
}
// wrap the query with type query
innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
Query query;
Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null);
if (!deleteByQuery && scoreType != null) {
query = new ChildrenQuery(parentType, childType, parentFilter, innerQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
} else {
query = new ChildrenConstantScoreQuery(innerQuery, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
if (deleteByQuery) {
query = new XConstantScoreQuery(new DeleteByQueryWrappingFilter(query));
}
}
if (queryName != null) {
parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
}
query.setBoost(boost);
return query;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_HasChildQueryParser.java
|
335 |
public class LiberalNodeReplaceInsert extends NodeReplaceInsert {
protected boolean checkNode(List<Node> usedNodes, Node[] primaryNodes, Node node) {
//find matching nodes based on id
if (replaceNode(primaryNodes, node, "id", usedNodes)) {
return true;
}
//find matching nodes based on name
if (replaceNode(primaryNodes, node, "name", usedNodes)) {
return true;
}
if (replaceNode(primaryNodes, node, "class", usedNodes)) {
usedNodes.add(node);
return true;
}
//check if this same node already exists
if (exactNodeExists(primaryNodes, node, usedNodes)) {
return true;
}
return false;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_handlers_LiberalNodeReplaceInsert.java
|
1,128 |
public class NativeNaiveTFIDFScoreScript extends AbstractSearchScript {
public static final String NATIVE_NAIVE_TFIDF_SCRIPT_SCORE = "native_naive_tfidf_script_score";
String field = null;
String[] terms = null;
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeNaiveTFIDFScoreScript(params);
}
}
private NativeNaiveTFIDFScoreScript(Map<String, Object> params) {
params.entrySet();
terms = new String[params.size()];
field = params.keySet().iterator().next();
Object o = params.get(field);
ArrayList<String> arrayList = (ArrayList<String>) o;
terms = arrayList.toArray(new String[arrayList.size()]);
}
@Override
public Object run() {
float score = 0;
IndexField indexField = indexLookup().get(field);
for (int i = 0; i < terms.length; i++) {
IndexFieldTerm indexFieldTerm = indexField.get(terms[i]);
try {
if (indexFieldTerm.tf() != 0) {
score += indexFieldTerm.tf() * indexField.docCount() / indexFieldTerm.df();
}
} catch (IOException e) {
throw new RuntimeException();
}
}
return score;
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_scripts_score_script_NativeNaiveTFIDFScoreScript.java
|
318 |
new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
364 |
public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
/**
* Constructs unregister repository request builder
*
* @param clusterAdminClient cluster admin client
*/
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest());
}
/**
* Constructs unregister repository request builder with specified repository name
*
* @param clusterAdminClient cluster adming client
*/
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest(name));
}
/**
* Sets the repository name
*
* @param name the repository name
*/
public DeleteRepositoryRequestBuilder setName(String name) {
request.name(name);
return this;
}
@Override
protected void doExecute(ActionListener<DeleteRepositoryResponse> listener) {
((ClusterAdminClient) client).deleteRepository(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_delete_DeleteRepositoryRequestBuilder.java
|
51 |
public class InstanceId implements Externalizable, Comparable<InstanceId>
{
private int serverId;
public InstanceId()
{}
public InstanceId( int serverId )
{
this.serverId = serverId;
}
@Override
public int compareTo( InstanceId o )
{
return serverId - o.serverId;
}
@Override
public int hashCode()
{
return serverId;
}
@Override
public String toString()
{
return Integer.toString( serverId );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
InstanceId instanceId1 = (InstanceId) o;
if ( serverId != instanceId1.serverId )
{
return false;
}
return true;
}
@Override
public void writeExternal( ObjectOutput out ) throws IOException
{
out.writeInt( serverId );
}
@Override
public void readExternal( ObjectInput in ) throws IOException, ClassNotFoundException
{
serverId = in.readInt();
}
public int toIntegerIndex()
{
return serverId;
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_InstanceId.java
|
296 |
new Thread() {
public void run() {
try {
if (!l.tryLock(2, TimeUnit.SECONDS)) {
latch.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java
|
318 |
public class NodesHotThreadsRequest extends NodesOperationRequest<NodesHotThreadsRequest> {
int threads = 3;
String type = "cpu";
TimeValue interval = new TimeValue(500, TimeUnit.MILLISECONDS);
int snapshots = 10;
/**
* Get hot threads from nodes based on the nodes ids specified. If none are passed, hot
* threads for all nodes is used.
*/
public NodesHotThreadsRequest(String... nodesIds) {
super(nodesIds);
}
public int threads() {
return this.threads;
}
public NodesHotThreadsRequest threads(int threads) {
this.threads = threads;
return this;
}
public NodesHotThreadsRequest type(String type) {
this.type = type;
return this;
}
public String type() {
return this.type;
}
public NodesHotThreadsRequest interval(TimeValue interval) {
this.interval = interval;
return this;
}
public TimeValue interval() {
return this.interval;
}
public int snapshots() {
return this.snapshots;
}
public NodesHotThreadsRequest snapshots(int snapshots) {
this.snapshots = snapshots;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
threads = in.readInt();
type = in.readString();
interval = TimeValue.readTimeValue(in);
snapshots = in.readInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeInt(threads);
out.writeString(type);
interval.writeTo(out);
out.writeInt(snapshots);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_NodesHotThreadsRequest.java
|
3,327 |
public abstract class FloatArrayAtomicFieldData extends AbstractAtomicNumericFieldData {
public static FloatArrayAtomicFieldData empty(int numDocs) {
return new Empty(numDocs);
}
private final int numDocs;
protected long size = -1;
public FloatArrayAtomicFieldData(int numDocs) {
super(true);
this.numDocs = numDocs;
}
@Override
public void close() {
}
@Override
public int getNumDocs() {
return numDocs;
}
static class Empty extends FloatArrayAtomicFieldData {
Empty(int numDocs) {
super(numDocs);
}
@Override
public LongValues getLongValues() {
return LongValues.EMPTY;
}
@Override
public DoubleValues getDoubleValues() {
return DoubleValues.EMPTY;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public long getNumberUniqueValues() {
return 0;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getMemorySizeInBytes() {
return 0;
}
@Override
public BytesValues getBytesValues(boolean needsHashes) {
return BytesValues.EMPTY;
}
@Override
public ScriptDocValues getScriptValues() {
return ScriptDocValues.EMPTY;
}
}
public static class WithOrdinals extends FloatArrayAtomicFieldData {
private final Ordinals ordinals;
private final BigFloatArrayList values;
public WithOrdinals(BigFloatArrayList values, int numDocs, Ordinals ordinals) {
super(numDocs);
this.values = values;
this.ordinals = ordinals;
}
@Override
public boolean isMultiValued() {
return ordinals.isMultiValued();
}
@Override
public boolean isValuesOrdered() {
return true;
}
@Override
public long getNumberUniqueValues() {
return ordinals.getNumOrds();
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + values.sizeInBytes() + ordinals.getMemorySizeInBytes();
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values, ordinals.ordinals());
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values, ordinals.ordinals());
}
static class LongValues extends org.elasticsearch.index.fielddata.LongValues.WithOrdinals {
private final BigFloatArrayList values;
LongValues(BigFloatArrayList values, Ordinals.Docs ordinals) {
super(ordinals);
this.values = values;
}
@Override
public long getValueByOrd(long ord) {
assert ord != Ordinals.MISSING_ORDINAL;
return (long) values.get(ord);
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues.WithOrdinals {
private final BigFloatArrayList values;
DoubleValues(BigFloatArrayList values, Ordinals.Docs ordinals) {
super(ordinals);
this.values = values;
}
@Override
public double getValueByOrd(long ord) {
return values.get(ord);
}
}
}
/**
* A single valued case, where not all values are "set", so we have a FixedBitSet that
* indicates which values have an actual value.
*/
public static class SingleFixedSet extends FloatArrayAtomicFieldData {
private final BigFloatArrayList values;
private final FixedBitSet set;
private final long numOrd;
public SingleFixedSet(BigFloatArrayList values, int numDocs, FixedBitSet set, long numOrd) {
super(numDocs);
this.values = values;
this.set = set;
this.numOrd = numOrd;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return numOrd;
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes() + RamUsageEstimator.sizeOf(set.getBits());
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values, set);
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values, set);
}
static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
private final BigFloatArrayList values;
private final FixedBitSet set;
LongValues(BigFloatArrayList values, FixedBitSet set) {
super(false);
this.values = values;
this.set = set;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return set.get(docId) ? 1 : 0;
}
@Override
public long nextValue() {
return (long) values.get(docId);
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
private final BigFloatArrayList values;
private final FixedBitSet set;
DoubleValues(BigFloatArrayList values, FixedBitSet set) {
super(false);
this.values = values;
this.set = set;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return set.get(docId) ? 1 : 0;
}
@Override
public double nextValue() {
return values.get(docId);
}
}
}
/**
* Assumes all the values are "set", and docId is used as the index to the value array.
*/
public static class Single extends FloatArrayAtomicFieldData {
private final BigFloatArrayList values;
private final long numOrd;
/**
* Note, here, we assume that there is no offset by 1 from docId, so position 0
* is the value for docId 0.
*/
public Single(BigFloatArrayList values, int numDocs, long numOrd) {
super(numDocs);
this.values = values;
this.numOrd = numOrd;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return numOrd;
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes();
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values);
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values);
}
static class LongValues extends DenseLongValues {
private final BigFloatArrayList values;
LongValues(BigFloatArrayList values) {
super(false);
this.values = values;
}
@Override
public long nextValue() {
return (long) values.get(docId);
}
}
static class DoubleValues extends DenseDoubleValues {
private final BigFloatArrayList values;
DoubleValues(BigFloatArrayList values) {
super(false);
this.values = values;
}
@Override
public double nextValue() {
return values.get(docId);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_plain_FloatArrayAtomicFieldData.java
|
430 |
trackedMap.addChangeListener(new OMultiValueChangeListener<Object, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<Object, String> event) {
firedEvents.add(event);
}
});
| 0true
|
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedMapTest.java
|
553 |
public class WeightUnitOfMeasureType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, WeightUnitOfMeasureType> TYPES = new LinkedHashMap<String, WeightUnitOfMeasureType>();
public static final WeightUnitOfMeasureType POUNDS = new WeightUnitOfMeasureType("POUNDS", "Pounds");
public static final WeightUnitOfMeasureType KILOGRAMS = new WeightUnitOfMeasureType("KILOGRAMS", "Kilograms");
public static WeightUnitOfMeasureType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public WeightUnitOfMeasureType() {
//do nothing
}
public WeightUnitOfMeasureType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)){
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
WeightUnitOfMeasureType other = (WeightUnitOfMeasureType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_util_WeightUnitOfMeasureType.java
|
26 |
final class SubMapEntryIterator extends SubMapIterator<Map.Entry<K, V>> {
SubMapEntryIterator(final OMVRBTreeEntryPosition<K, V> first, final OMVRBTreeEntryPosition<K, V> fence) {
super(first, fence);
}
public Map.Entry<K, V> next() {
final Map.Entry<K, V> e = OMVRBTree.exportEntry(next);
nextEntry();
return e;
}
public void remove() {
removeAscending();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
342 |
Comparator<Node> idCompare = new Comparator<Node>() {
public int compare(Node arg0, Node arg1) {
Node id1 = arg0.getAttributes().getNamedItem(attribute);
Node id2 = arg1.getAttributes().getNamedItem(attribute);
String idVal1 = id1.getNodeValue();
String idVal2 = id2.getNodeValue();
return idVal1.compareTo(idVal2);
}
};
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_handlers_NodeReplaceInsert.java
|
842 |
searchServiceTransportAction.sendFreeContext(node, target.v2(), request, new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean success) {
onFreedContext();
}
@Override
public void onFailure(Throwable e) {
onFailedFreedContext(e, node);
}
});
| 1no label
|
src_main_java_org_elasticsearch_action_search_TransportClearScrollAction.java
|
3,765 |
public class LogDocMergePolicyProvider extends AbstractMergePolicyProvider<LogDocMergePolicy> {
private final IndexSettingsService indexSettingsService;
private volatile int minMergeDocs;
private volatile int maxMergeDocs;
private volatile int mergeFactor;
private final boolean calibrateSizeByDeletes;
private boolean asyncMerge;
private final Set<CustomLogDocMergePolicy> policies = new CopyOnWriteArraySet<CustomLogDocMergePolicy>();
private final ApplySettings applySettings = new ApplySettings();
@Inject
public LogDocMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
super(store);
Preconditions.checkNotNull(store, "Store must be provided to merge policy");
this.indexSettingsService = indexSettingsService;
this.minMergeDocs = componentSettings.getAsInt("min_merge_docs", LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS);
this.maxMergeDocs = componentSettings.getAsInt("max_merge_docs", LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS);
this.mergeFactor = componentSettings.getAsInt("merge_factor", LogDocMergePolicy.DEFAULT_MERGE_FACTOR);
this.calibrateSizeByDeletes = componentSettings.getAsBoolean("calibrate_size_by_deletes", true);
this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
logger.debug("using [log_doc] merge policy with merge_factor[{}], min_merge_docs[{}], max_merge_docs[{}], calibrate_size_by_deletes[{}], async_merge[{}]",
mergeFactor, minMergeDocs, maxMergeDocs, calibrateSizeByDeletes, asyncMerge);
indexSettingsService.addListener(applySettings);
}
@Override
public void close() throws ElasticsearchException {
indexSettingsService.removeListener(applySettings);
}
@Override
public LogDocMergePolicy newMergePolicy() {
CustomLogDocMergePolicy mergePolicy;
if (asyncMerge) {
mergePolicy = new EnableMergeLogDocMergePolicy(this);
} else {
mergePolicy = new CustomLogDocMergePolicy(this);
}
mergePolicy.setMinMergeDocs(minMergeDocs);
mergePolicy.setMaxMergeDocs(maxMergeDocs);
mergePolicy.setMergeFactor(mergeFactor);
mergePolicy.setCalibrateSizeByDeletes(calibrateSizeByDeletes);
mergePolicy.setNoCFSRatio(noCFSRatio);
policies.add(mergePolicy);
return mergePolicy;
}
public static final String INDEX_MERGE_POLICY_MIN_MERGE_DOCS = "index.merge.policy.min_merge_docs";
public static final String INDEX_MERGE_POLICY_MAX_MERGE_DOCS = "index.merge.policy.max_merge_docs";
public static final String INDEX_MERGE_POLICY_MERGE_FACTOR = "index.merge.policy.merge_factor";
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int minMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MIN_MERGE_DOCS, LogDocMergePolicyProvider.this.minMergeDocs);
if (minMergeDocs != LogDocMergePolicyProvider.this.minMergeDocs) {
logger.info("updating min_merge_docs from [{}] to [{}]", LogDocMergePolicyProvider.this.minMergeDocs, minMergeDocs);
LogDocMergePolicyProvider.this.minMergeDocs = minMergeDocs;
for (CustomLogDocMergePolicy policy : policies) {
policy.setMinMergeDocs(minMergeDocs);
}
}
int maxMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogDocMergePolicyProvider.this.maxMergeDocs);
if (maxMergeDocs != LogDocMergePolicyProvider.this.maxMergeDocs) {
logger.info("updating max_merge_docs from [{}] to [{}]", LogDocMergePolicyProvider.this.maxMergeDocs, maxMergeDocs);
LogDocMergePolicyProvider.this.maxMergeDocs = maxMergeDocs;
for (CustomLogDocMergePolicy policy : policies) {
policy.setMaxMergeDocs(maxMergeDocs);
}
}
int mergeFactor = settings.getAsInt(INDEX_MERGE_POLICY_MERGE_FACTOR, LogDocMergePolicyProvider.this.mergeFactor);
if (mergeFactor != LogDocMergePolicyProvider.this.mergeFactor) {
logger.info("updating merge_factor from [{}] to [{}]", LogDocMergePolicyProvider.this.mergeFactor, mergeFactor);
LogDocMergePolicyProvider.this.mergeFactor = mergeFactor;
for (CustomLogDocMergePolicy policy : policies) {
policy.setMergeFactor(mergeFactor);
}
}
final double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(LogDocMergePolicyProvider.this.noCFSRatio)));
final boolean compoundFormat = noCFSRatio != 0.0;
if (noCFSRatio != LogDocMergePolicyProvider.this.noCFSRatio) {
logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(LogDocMergePolicyProvider.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
LogDocMergePolicyProvider.this.noCFSRatio = noCFSRatio;
for (CustomLogDocMergePolicy policy : policies) {
policy.setNoCFSRatio(noCFSRatio);
}
}
}
}
public static class CustomLogDocMergePolicy extends LogDocMergePolicy {
private final LogDocMergePolicyProvider provider;
public CustomLogDocMergePolicy(LogDocMergePolicyProvider provider) {
super();
this.provider = provider;
}
@Override
public void close() {
super.close();
provider.policies.remove(this);
}
}
public static class EnableMergeLogDocMergePolicy extends CustomLogDocMergePolicy {
public EnableMergeLogDocMergePolicy(LogDocMergePolicyProvider provider) {
super(provider);
}
@Override
public MergeSpecification findMerges(MergeTrigger trigger, SegmentInfos infos) throws IOException {
// we don't enable merges while indexing documents, we do them in the background
if (trigger == MergeTrigger.SEGMENT_FLUSH) {
return null;
}
return super.findMerges(trigger, infos);
}
@Override
public MergePolicy clone() {
// Lucene IW makes a clone internally but since we hold on to this instance
// the clone will just be the identity.
return this;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_merge_policy_LogDocMergePolicyProvider.java
|
7 |
@Component("blCustomerPasswordCustomPersistenceHandler")
public class CustomerPasswordCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
@Resource(name="blCustomerService")
protected CustomerService customerService;
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
String[] customCriteria = persistencePackage.getCustomCriteria();
return customCriteria != null && customCriteria.length > 0 && customCriteria[0].equals("passwordUpdate");
}
@Override
public Entity update(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
Entity entity = persistencePackage.getEntity();
Customer customer = customerService.readCustomerByUsername(entity.findProperty("username").getValue());
if (StringUtils.isEmpty(customer.getEmailAddress())) {
throw new ServiceException("Unable to update password because an email address is not available for this customer. An email address is required to send the customer the new system generated password.");
}
PasswordReset passwordReset = new PasswordReset();
passwordReset.setUsername(entity.findProperty("username").getValue());
passwordReset.setPasswordChangeRequired(false);
passwordReset.setEmail(customer.getEmailAddress());
passwordReset.setPasswordLength(22);
passwordReset.setSendResetEmailReliableAsync(false);
customer = customerService.resetPassword(passwordReset);
return entity;
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_handler_CustomerPasswordCustomPersistenceHandler.java
|
355 |
public interface OUserObject2RecordHandler {
/**
* Returns the record associated to a user object. If iCreateIfNotAvailable is true, then a new record instance will be created
* transparently.
*
* @param iUserObject
* User object
* @param iCreateIfNotAvailable
* Create the record if not available
* @return The record associated
*/
public ORecordInternal<?> getRecordByUserObject(Object iUserObject, boolean iCreateIfNotAvailable);
/**
* Returns the user object associated to a record. If the record is not loaded yet, iFetchPlan will be used as fetch plan.
*
* @param iRecord
* Record
* @param iFetchPlan
* If the record is not loaded yet, use this as fetch plan
* @return The user object associated
*/
public Object getUserObjectByRecord(OIdentifiable iRecord, String iFetchPlan);
/**
* Tells if e user object exists for a certain RecordId.
*/
public boolean existsUserObjectByRID(ORID iRID);
/**
* Registers the association between a user object and a record.
*
* @param iUserObject
* User object
* @param iRecord
* record
*/
public void registerUserObject(final Object iUserObject, final ORecordInternal<?> iRecord);
/**
* Registers the saved linked record. Needed only to make the old object database implementation work
*
* @param iUserObject
* User object
* @param iRecord
* record
*/
public void registerUserObjectAfterLinkSave(final ORecordInternal<?> iRecord);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_OUserObject2RecordHandler.java
|
57 |
public class HighAvailabilityConsoleLogger
implements ClusterMemberListener, ClusterListener, AvailabilityGuard.AvailabilityListener
{
private ConsoleLogger console;
private InstanceId myId;
private URI myUri;
public HighAvailabilityConsoleLogger( ConsoleLogger console, InstanceId myId )
{
this.console = console;
this.myId = myId;
}
// Cluster events
/**
* Logged when the instance itself joins or rejoins a cluster
*
* @param clusterConfiguration
*/
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
myUri = clusterConfiguration.getUriForId( myId );
console.log( String.format( "Instance %s joined the cluster", printId( myId, myUri )) );
}
/**
* Logged when the instance itself leaves the cluster
*/
@Override
public void leftCluster()
{
console.log( String.format( "Instance %s left the cluster", printId( myId, myUri ) ) );
}
/**
* Logged when another instance joins the cluster
*
* @param instanceId
* @param member
*/
@Override
public void joinedCluster( InstanceId instanceId, URI member )
{
console.log( "Instance " + printId(instanceId, member) + " joined the cluster" );
}
/**
* Logged when another instance leaves the cluster
*
* @param instanceId
*/
@Override
public void leftCluster( InstanceId instanceId )
{
console.log( "Instance " + instanceId + " has left the cluster" );
}
/**
* Logged when an instance is elected for a role, such as coordinator of a cluster.
*
* @param role
* @param instanceId
* @param electedMember
*/
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
console.log( "Instance " + printId( instanceId, electedMember ) + "was elected as " + role );
}
/**
* Logged when an instance is demoted from a role.
*
* @param role
* @param instanceId
* @param electedMember
*/
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
console.log( "Instance " + printId( instanceId, electedMember ) + "was demoted as " + role );
}
// HA events
@Override
public void coordinatorIsElected( InstanceId coordinatorId )
{
}
/**
* Logged when a member becomes available as a role, such as MASTER or SLAVE.
*
* @param role
* @param availableId the role connection information for the new role holder
* @param atUri the URI at which the instance is available at
*/
@Override
public void memberIsAvailable( String role, InstanceId availableId, URI atUri )
{
console.log( "Instance " + printId( availableId, atUri ) + "is available as " + role + " at " + atUri.toASCIIString() );
}
/**
* Logged when a member becomes unavailable as a role, such as MASTER or SLAVE.
*
* @param role The role for which the member is unavailable
* @param unavailableId The id of the member which became unavailable for that role
*/
@Override
public void memberIsUnavailable( String role, InstanceId unavailableId )
{
console.log( "Instance " + printId( unavailableId, null ) + "is unavailable as " + role );
}
/**
* Logged when another instance is detected as being failed.
*
* @param instanceId
*/
@Override
public void memberIsFailed( InstanceId instanceId )
{
console.log( "Instance " + printId( instanceId, null ) + "has failed" );
}
/**
* Logged when another instance is detected as being alive again.
*
* @param instanceId
*/
@Override
public void memberIsAlive( InstanceId instanceId )
{
console.log( "Instance " + printId( instanceId, null ) + "is alive" );
}
// InstanceAccessGuard events
/**
* Logged when users are allowed to access the database for transactions.
*/
@Override
public void available()
{
console.log( "Database available for write transactions" );
}
/**
* Logged when users are not allowed to access the database for transactions.
*/
@Override
public void unavailable()
{
console.log( "Write transactions to database disabled" );
}
private String printId( InstanceId id, URI member )
{
String memberName = member == null ? null : parameter( "memberName" ).apply( member );
String memberNameOrId = memberName == null ? id.toString() : memberName;
return memberNameOrId + (id.equals( myId ) ? " (this server) " : " ");
}
}
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighAvailabilityConsoleLogger.java
|
337 |
public class NodesRestartResponse extends NodesOperationResponse<NodesRestartResponse.NodeRestartResponse> {
NodesRestartResponse() {
}
public NodesRestartResponse(ClusterName clusterName, NodeRestartResponse[] nodes) {
super(clusterName, nodes);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeRestartResponse[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeRestartResponse.readNodeRestartResponse(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeRestartResponse node : nodes) {
node.writeTo(out);
}
}
public static class NodeRestartResponse extends NodeOperationResponse {
NodeRestartResponse() {
}
public NodeRestartResponse(DiscoveryNode node) {
super(node);
}
public static NodeRestartResponse readNodeRestartResponse(StreamInput in) throws IOException {
NodeRestartResponse res = new NodeRestartResponse();
res.readFrom(in);
return res;
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_restart_NodesRestartResponse.java
|
38 |
new Visitor() {
@Override
public void visit(Tree.StaticMemberOrTypeExpression that) {
Tree.TypeArguments tal = that.getTypeArguments();
Integer startIndex = tal==null ? null : tal.getStartIndex();
if (startIndex!=null && startIndex2!=null &&
startIndex.intValue()==startIndex2.intValue()) {
addMemberNameProposal(offset, "", that, result);
}
super.visit(that);
}
public void visit(Tree.SimpleType that) {
Tree.TypeArgumentList tal = that.getTypeArgumentList();
Integer startIndex = tal==null ? null : tal.getStartIndex();
if (startIndex!=null && startIndex2!=null &&
startIndex.intValue()==startIndex2.intValue()) {
addMemberNameProposal(offset, "", that, result);
}
super.visit(that);
}
}.visit(cpc.getRootNode());
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_MemberNameCompletions.java
|
1,439 |
public class RepositoryMetaData {
private final String name;
private final String type;
private final Settings settings;
/**
* Constructs new repository metadata
*
* @param name repository name
* @param type repository type
* @param settings repository settings
*/
public RepositoryMetaData(String name, String type, Settings settings) {
this.name = name;
this.type = type;
this.settings = settings;
}
/**
* Returns repository name
*
* @return repository name
*/
public String name() {
return this.name;
}
/**
* Returns repository type
*
* @return repository type
*/
public String type() {
return this.type;
}
/**
* Returns repository settings
*
* @return repository settings
*/
public Settings settings() {
return this.settings;
}
/**
* Reads repository metadata from stream input
*
* @param in stream input
* @return repository metadata
* @throws IOException
*/
public static RepositoryMetaData readFrom(StreamInput in) throws IOException {
String name = in.readString();
String type = in.readString();
Settings settings = ImmutableSettings.readSettingsFromStream(in);
return new RepositoryMetaData(name, type, settings);
}
/**
* Writes repository metadata to stream output
*
* @param out stream output
* @throws IOException
*/
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeString(type);
ImmutableSettings.writeSettingsToStream(settings, out);
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_RepositoryMetaData.java
|
274 |
public interface JMSEmailServiceProducer extends EmailServiceProducer {
/**
* @return the emailServiceTemplate
*/
public JmsTemplate getEmailServiceTemplate();
/**
* @param emailServiceTemplate the emailServiceTemplate to set
*/
public void setEmailServiceTemplate(JmsTemplate emailServiceTemplate);
/**
* @return the emailServiceDestination
*/
public Destination getEmailServiceDestination();
/**
* @param emailServiceDestination the emailServiceDestination to set
*/
public void setEmailServiceDestination(Destination emailServiceDestination);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_jms_JMSEmailServiceProducer.java
|
776 |
public class ORecordIteratorClusters<REC extends ORecordInternal<?>> extends OIdentifiableIterator<REC> {
protected int[] clusterIds;
protected int currentClusterIdx;
protected ORecord<?> currentRecord;
protected ORID beginRange;
protected ORID endRange;
public ORecordIteratorClusters(final ODatabaseRecord iDatabase, final ODatabaseRecord iLowLevelDatabase, final int[] iClusterIds,
final boolean iUseCache, final boolean iterateThroughTombstones) {
super(iDatabase, iLowLevelDatabase, iUseCache, iterateThroughTombstones);
clusterIds = iClusterIds;
config();
}
protected ORecordIteratorClusters(final ODatabaseRecord iDatabase, final ODatabaseRecord iLowLevelDatabase,
final boolean iUseCache, final boolean iterateThroughTombstones) {
super(iDatabase, iLowLevelDatabase, iUseCache, iterateThroughTombstones);
}
public ORecordIteratorClusters<REC> setRange(final ORID iBegin, final ORID iEnd) {
beginRange = iBegin;
endRange = iEnd;
if (currentRecord != null && outsideOfTheRange(currentRecord.getIdentity())) {
currentRecord = null;
}
return this;
}
@Override
public boolean hasPrevious() {
checkDirection(false);
if (currentRecord != null)
return true;
if (limit > -1 && browsedRecords >= limit)
// LIMIT REACHED
return false;
if (browsedRecords >= totalAvailableRecords)
return false;
if (liveUpdated)
updateClusterRange();
ORecordInternal<?> record = getRecord();
// ITERATE UNTIL THE PREVIOUS GOOD RECORD
while (currentClusterIdx > -1) {
while (prevPosition()) {
currentRecord = readCurrentRecord(record, 0);
if (currentRecord != null)
if (include(currentRecord))
// FOUND
return true;
}
// CLUSTER EXHAUSTED, TRY WITH THE PREVIOUS ONE
currentClusterIdx--;
updateClusterRange();
}
if (txEntries != null && txEntries.size() - (currentTxEntryPosition + 1) > 0)
return true;
currentRecord = null;
return false;
}
public boolean hasNext() {
checkDirection(true);
if (Thread.interrupted())
// INTERRUPTED
return false;
if (currentRecord != null)
return true;
if (limit > -1 && browsedRecords >= limit)
// LIMIT REACHED
return false;
if (browsedRecords >= totalAvailableRecords)
return false;
// COMPUTE THE NUMBER OF RECORDS TO BROWSE
if (liveUpdated)
updateClusterRange();
ORecordInternal<?> record = getRecord();
// ITERATE UNTIL THE NEXT GOOD RECORD
while (currentClusterIdx < clusterIds.length) {
while (nextPosition()) {
if (outsideOfTheRange(current))
continue;
currentRecord = readCurrentRecord(record, 0);
if (currentRecord != null)
if (include(currentRecord))
// FOUND
return true;
}
// CLUSTER EXHAUSTED, TRY WITH THE NEXT ONE
currentClusterIdx++;
if (currentClusterIdx >= clusterIds.length)
break;
updateClusterRange();
}
// CHECK IN TX IF ANY
if (txEntries != null && txEntries.size() - (currentTxEntryPosition + 1) > 0)
return true;
currentRecord = null;
return false;
}
private boolean outsideOfTheRange(ORID orid) {
if (beginRange != null && orid.compareTo(beginRange) < 0)
return true;
if (endRange != null && orid.compareTo(endRange) > 0)
return true;
return false;
}
/**
* Return the element at the current position and move forward the cursor to the next position available.
*
* @return the next record found, otherwise the NoSuchElementException exception is thrown when no more records are found.
*/
@SuppressWarnings("unchecked")
public REC next() {
checkDirection(true);
if (currentRecord != null)
try {
// RETURN LAST LOADED RECORD
return (REC) currentRecord;
} finally {
currentRecord = null;
}
ORecordInternal<?> record;
// MOVE FORWARD IN THE CURRENT CLUSTER
while (hasNext()) {
if (currentRecord != null)
try {
// RETURN LAST LOADED RECORD
return (REC) currentRecord;
} finally {
currentRecord = null;
}
record = getTransactionEntry();
if (record == null)
record = readCurrentRecord(null, +1);
if (record != null)
// FOUND
if (include(record))
return (REC) record;
}
record = getTransactionEntry();
if (record != null)
return (REC) record;
throw new NoSuchElementException("Direction: forward, last position was: " + current + ", range: " + beginRange + "-"
+ endRange);
}
/**
* Return the element at the current position and move backward the cursor to the previous position available.
*
* @return the previous record found, otherwise the NoSuchElementException exception is thrown when no more records are found.
*/
@SuppressWarnings("unchecked")
@Override
public REC previous() {
checkDirection(false);
if (currentRecord != null)
try {
// RETURN LAST LOADED RECORD
return (REC) currentRecord;
} finally {
currentRecord = null;
}
ORecordInternal<?> record = getRecord();
// MOVE BACKWARD IN THE CURRENT CLUSTER
while (hasPrevious()) {
if (currentRecord != null)
try {
// RETURN LAST LOADED RECORD
return (REC) currentRecord;
} finally {
currentRecord = null;
}
if (record == null)
record = readCurrentRecord(null, -1);
if (record != null)
// FOUND
if (include(record))
return (REC) record;
}
record = getTransactionEntry();
if (record != null)
return (REC) record;
return null;
}
protected boolean include(final ORecord<?> iRecord) {
return true;
}
/**
* Move the iterator to the begin of the range. If no range was specified move to the first record of the cluster.
*
* @return The object itself
*/
@Override
public ORecordIteratorClusters<REC> begin() {
currentClusterIdx = 0;
current.clusterId = clusterIds[currentClusterIdx];
if (liveUpdated)
updateClusterRange();
resetCurrentPosition();
nextPosition();
final ORecordInternal<?> record = getRecord();
currentRecord = readCurrentRecord(record, 0);
if (currentRecord != null && !include(currentRecord)) {
currentRecord = null;
hasNext();
}
return this;
}
/**
* Move the iterator to the end of the range. If no range was specified move to the last record of the cluster.
*
* @return The object itself
*/
@Override
public ORecordIteratorClusters<REC> last() {
currentClusterIdx = clusterIds.length - 1;
if (liveUpdated)
updateClusterRange();
current.clusterId = currentClusterIdx;
resetCurrentPosition();
prevPosition();
final ORecordInternal<?> record = getRecord();
currentRecord = readCurrentRecord(record, 0);
if (currentRecord != null && !include(currentRecord)) {
currentRecord = null;
hasPrevious();
}
return this;
}
/**
* Tell to the iterator that the upper limit must be checked at every cycle. Useful when concurrent deletes or additions change
* the size of the cluster while you're browsing it. Default is false.
*
* @param iLiveUpdated
* True to activate it, otherwise false (default)
* @see #isLiveUpdated()
*/
@Override
public ORecordIteratorClusters<REC> setLiveUpdated(boolean iLiveUpdated) {
super.setLiveUpdated(iLiveUpdated);
if (iLiveUpdated) {
firstClusterEntry = OClusterPositionFactory.INSTANCE.valueOf(0);
lastClusterEntry = OClusterPositionFactory.INSTANCE.getMaxValue();
} else {
updateClusterRange();
}
return this;
}
protected void updateClusterRange() {
current.clusterId = clusterIds[currentClusterIdx];
final OClusterPosition[] range = database.getStorage().getClusterDataRange(current.clusterId);
firstClusterEntry = range[0];
lastClusterEntry = range[1];
resetCurrentPosition();
}
protected void config() {
if (clusterIds.length == 0)
return;
currentClusterIdx = 0; // START FROM THE FIRST CLUSTER
updateClusterRange();
totalAvailableRecords = database.countClusterElements(clusterIds, isIterateThroughTombstones());
txEntries = database.getTransaction().getNewRecordEntriesByClusterIds(clusterIds);
if (txEntries != null)
// ADJUST TOTAL ELEMENT BASED ON CURRENT TRANSACTION'S ENTRIES
for (ORecordOperation entry : txEntries) {
if (entry.getRecord().getIdentity().isTemporary() && entry.type != ORecordOperation.DELETED)
totalAvailableRecords++;
else if (entry.type == ORecordOperation.DELETED)
totalAvailableRecords--;
}
begin();
}
@Override
public String toString() {
return String.format("ORecordIteratorCluster.clusters(%s).currentRecord(%s).range(%s-%s)", Arrays.toString(clusterIds),
currentRecord, beginRange, endRange);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_iterator_ORecordIteratorClusters.java
|
194 |
public class TruncateTokenFilterTests extends ElasticsearchTestCase {
@Test
public void simpleTest() throws IOException {
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName,
Reader reader) {
Tokenizer t = new WhitespaceTokenizer(Lucene.VERSION, reader);
return new TokenStreamComponents(t, new TruncateTokenFilter(t, 3));
}
};
TokenStream test = analyzer.tokenStream("test", "a bb ccc dddd eeeee");
test.reset();
CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class);
assertThat(test.incrementToken(), equalTo(true));
assertThat(termAttribute.toString(), equalTo("a"));
assertThat(test.incrementToken(), equalTo(true));
assertThat(termAttribute.toString(), equalTo("bb"));
assertThat(test.incrementToken(), equalTo(true));
assertThat(termAttribute.toString(), equalTo("ccc"));
assertThat(test.incrementToken(), equalTo(true));
assertThat(termAttribute.toString(), equalTo("ddd"));
assertThat(test.incrementToken(), equalTo(true));
assertThat(termAttribute.toString(), equalTo("eee"));
assertThat(test.incrementToken(), equalTo(false));
}
}
| 0true
|
src_test_java_org_apache_lucene_analysis_miscellaneous_TruncateTokenFilterTests.java
|
369 |
public class GetRepositoriesRequest extends MasterNodeReadOperationRequest<GetRepositoriesRequest> {
private String[] repositories = Strings.EMPTY_ARRAY;
GetRepositoriesRequest() {
}
/**
* Constructs a new get repositories request with a list of repositories.
* <p/>
* If the list of repositories is empty or it contains a single element "_all", all registered repositories
* are returned.
*
* @param repositories list of repositories
*/
public GetRepositoriesRequest(String[] repositories) {
this.repositories = repositories;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (repositories == null) {
validationException = addValidationError("repositories is null", validationException);
}
return validationException;
}
/**
* The names of the repositories.
*
* @return list of repositories
*/
public String[] repositories() {
return this.repositories;
}
/**
* Sets the list or repositories.
* <p/>
* If the list of repositories is empty or it contains a single element "_all", all registered repositories
* are returned.
*
* @param repositories list of repositories
* @return this request
*/
public GetRepositoriesRequest repositories(String[] repositories) {
this.repositories = repositories;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
repositories = in.readStringArray();
readLocal(in, Version.V_1_0_0_RC2);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(repositories);
writeLocal(out, Version.V_1_0_0_RC2);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_get_GetRepositoriesRequest.java
|
317 |
public class NodesHotThreadsAction extends ClusterAction<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction();
public static final String NAME = "cluster/nodes/hot_threads";
private NodesHotThreadsAction() {
super(NAME);
}
@Override
public NodesHotThreadsResponse newResponse() {
return new NodesHotThreadsResponse();
}
@Override
public NodesHotThreadsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new NodesHotThreadsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_NodesHotThreadsAction.java
|
1,967 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CHALLENGE_QUESTION")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@AdminPresentationClass(friendlyName = "ChallengeQuestionImpl_baseChallengeQuestion")
public class ChallengeQuestionImpl implements ChallengeQuestion {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "ChallengeQuestionId")
@GenericGenerator(
name="ChallengeQuestionId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="ChallengeQuestionImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.profile.core.domain.ChallengeQuestionImpl")
}
)
@Column(name = "QUESTION_ID")
protected Long id;
@Column(name = "QUESTION", nullable=false)
@AdminPresentation(friendlyName = "ChallengeQuestionImpl_Challenge_Question", group = "ChallengeQuestionImpl_Customer")
protected String question;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getQuestion() {
return question;
}
@Override
public void setQuestion(String question) {
this.question = question;
}
@Override
public String toString() {
return question;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((question == null) ? 0 : question.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ChallengeQuestionImpl other = (ChallengeQuestionImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (question == null) {
if (other.question != null)
return false;
} else if (!question.equals(other.question))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_domain_ChallengeQuestionImpl.java
|
295 |
public abstract class OTraverseAbstractProcess<T> extends OCommandProcess<OTraverse, T, OIdentifiable> {
public OTraverseAbstractProcess(final OTraverse iCommand, final T iTarget) {
super(iCommand, iTarget);
command.getContext().push(this);
}
public abstract String getStatus();
public OIdentifiable drop() {
command.getContext().pop();
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_command_traverse_OTraverseAbstractProcess.java
|
534 |
@Deprecated
public class GatewaySnapshotResponse extends BroadcastOperationResponse {
GatewaySnapshotResponse() {
}
GatewaySnapshotResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_gateway_snapshot_GatewaySnapshotResponse.java
|
797 |
public class OFunction {
public static final String CLASS_NAME = "OFunction";
protected ODocument document;
/**
* Creates a new function.
*/
public OFunction() {
document = new ODocument(CLASS_NAME);
setLanguage("SQL");
}
/**
* Creates a new function wrapping the saved document.
*
* @param iDocument
* Document to assign
*/
public OFunction(final ODocument iDocument) {
document = iDocument;
}
/**
* Loads a function.
*
* @param iRid
* RID of the function to load
*/
public OFunction(final ORecordId iRid) {
document = ODatabaseRecordThreadLocal.INSTANCE.get().load(iRid);
}
public String getName() {
return document.field("name");
}
public OFunction setName(final String iName) {
document.field("name", iName);
return this;
}
public String getCode() {
return document.field("code");
}
public OFunction setCode(final String iCode) {
document.field("code", iCode);
saveChanges();
return this;
}
public String getLanguage() {
return document.field("language");
}
public OFunction setLanguage(final String iLanguage) {
document.field("language", iLanguage);
return this;
}
public List<String> getParameters() {
return document.field("parameters");
}
public OFunction setParameters(final List<String> iParameters) {
document.field("parameters", iParameters);
return this;
}
public boolean isIdempotent() {
final Boolean idempotent = document.field("idempotent");
return idempotent != null && idempotent;
}
public OFunction setIdempotent(final boolean iIdempotent) {
document.field("idempotent", iIdempotent);
saveChanges();
return this;
}
public Object execute(final Object... iArgs) {
return executeInContext(null, iArgs);
}
public Object executeInContext(final OCommandContext iContext, final Object... iArgs) {
final OCommandExecutorFunction command = new OCommandExecutorFunction();
command.parse(new OCommandFunction(getName()));
final List<String> params = getParameters();
// CONVERT PARAMETERS IN A MAP
Map<Object, Object> args = null;
if (iArgs.length > 0) {
args = new LinkedHashMap<Object, Object>();
for (int i = 0; i < iArgs.length; ++i) {
// final Object argValue = ORecordSerializerStringAbstract.getTypeValue(iArgs[i].toString());
final Object argValue = iArgs[i];
if (params != null && i < params.size())
args.put(params.get(i), argValue);
else
args.put("param" + i, argValue);
}
}
return command.executeInContext(iContext, args);
}
public Object execute(final Map<Object, Object> iArgs) {
final long start = Orient.instance().getProfiler().startChrono();
final OCommandExecutorScript command = new OCommandExecutorScript();
command.parse(new OCommandScript(getLanguage(), getCode()));
final Object result = command.execute(iArgs);
if (Orient.instance().getProfiler().isRecording())
Orient
.instance()
.getProfiler()
.stopChrono("db." + ODatabaseRecordThreadLocal.INSTANCE.get().getName() + ".function.execute",
"Time to execute a function", start, "db.*.function.execute");
return result;
}
public ORID getId() {
return document.getIdentity();
}
@Override
public String toString() {
return getName();
}
/**
* Save pending changes if any.
*/
private void saveChanges() {
document.save();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_metadata_function_OFunction.java
|
3,433 |
private static class NoResponseHandler implements ResponseHandler {
@Override
public void sendResponse(final Object obj) {
}
@Override
public boolean isLocal() {
return false;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_ResponseHandlerFactory.java
|
869 |
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchDfsQueryThenFetchAction.java
|
232 |
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertTrue(map.containsKey(member1.getUuid()));
assertTrue(map.containsKey(member3.getUuid()));
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceExecuteTest.java
|
286 |
public class PassthroughEncryptionModule implements EncryptionModule {
protected static final Logger LOG = LogManager.getLogger(PassthroughEncryptionModule.class);
protected RuntimeEnvironmentKeyResolver keyResolver = new SystemPropertyRuntimeEnvironmentKeyResolver();
public PassthroughEncryptionModule() {
if ("production".equals(keyResolver.resolveRuntimeEnvironmentKey())) {
LOG.warn("This passthrough encryption module provides NO ENCRYPTION and should NOT be used in production.");
}
}
public String decrypt(String cipherText) {
return cipherText;
}
public String encrypt(String plainText) {
return plainText;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_encryption_PassthroughEncryptionModule.java
|
2,051 |
public class EvictOperation extends LockAwareOperation implements BackupAwareOperation {
boolean evicted = false;
boolean asyncBackup = false;
public EvictOperation(String name, Data dataKey, boolean asyncBackup) {
super(name, dataKey);
this.asyncBackup = asyncBackup;
}
public EvictOperation() {
}
public void run() {
dataValue = mapService.toData(recordStore.evict(dataKey));
evicted = dataValue != null;
}
@Override
public Object getResponse() {
return evicted;
}
@Override
public void onWaitExpire() {
getResponseHandler().sendResponse(false);
}
public Operation getBackupOperation() {
return new RemoveBackupOperation(name, dataKey);
}
public int getAsyncBackupCount() {
if (asyncBackup) {
return mapService.getMapContainer(name).getTotalBackupCount();
} else {
return mapService.getMapContainer(name).getAsyncBackupCount();
}
}
public int getSyncBackupCount() {
if (asyncBackup) {
return 0;
} else {
return mapService.getMapContainer(name).getBackupCount();
}
}
public boolean shouldBackup() {
return evicted;
}
public void afterRun() {
if (evicted) {
mapService.interceptAfterRemove(name, dataValue);
EntryEventType eventType = EntryEventType.EVICTED;
mapService.publishEvent(getCallerAddress(), name, eventType, dataKey, dataValue, null);
invalidateNearCaches();
}
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeBoolean(asyncBackup);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
asyncBackup = in.readBoolean();
}
@Override
public String toString() {
return "EvictOperation{" + name + "}";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_EvictOperation.java
|
1,227 |
SOFT_THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
return threadLocal(softFactory(dequeFactory(c, limit / estimatedThreadPoolSize)));
}
},
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_PageCacheRecycler.java
|
343 |
public class NodeValueMerge extends BaseHandler {
protected String delimiter = " ";
protected String regex = "[\\s\\n\\r]+";
public Node[] merge(List<Node> nodeList1, List<Node> nodeList2, List<Node> exhaustedNodes) {
if (CollectionUtils.isEmpty(nodeList1) || CollectionUtils.isEmpty(nodeList2)) {
return null;
}
Node node1 = nodeList1.get(0);
Node node2 = nodeList2.get(0);
String[] items1 = node1.getNodeValue().split(getRegEx());
String[] items2 = node2.getNodeValue().split(getRegEx());
Set<String> finalItems = new LinkedHashSet<String>();
for (String anItems1 : items1) {
finalItems.add(anItems1.trim());
}
for (String anItems2 : items2) {
finalItems.add(anItems2.trim());
}
StringBuilder sb = new StringBuilder();
Iterator<String> itr = finalItems.iterator();
while (itr.hasNext()) {
sb.append(itr.next());
if (itr.hasNext()) {
sb.append(getDelimiter());
}
}
node1.setNodeValue(sb.toString());
node2.setNodeValue(sb.toString());
Node[] response = new Node[nodeList2.size()];
for (int j=0;j<response.length;j++){
response[j] = nodeList2.get(j);
}
return response;
}
public String getDelimiter() {
return delimiter;
}
public String getRegEx() {
return regex;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_handlers_NodeValueMerge.java
|
1,348 |
private final class LogSegment implements Comparable<LogSegment> {
private final RandomAccessFile rndFile;
private final File file;
private long filledUpTo;
private final long order;
private final int maxPagesCacheSize;
private boolean closed;
private OWALPage currentPage;
private final ConcurrentLinkedQueue<OWALPage> pagesCache = new ConcurrentLinkedQueue<OWALPage>();
private long nextPositionToFlush;
private long flushId;
private final ScheduledExecutorService commitExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
thread.setName("WAL Flush Task");
return thread;
}
});
private OLogSequenceNumber last = null;
private volatile boolean flushNewData = true;
private LogSegment(File file, int maxPagesCacheSize) throws IOException {
this.file = file;
this.maxPagesCacheSize = maxPagesCacheSize;
order = extractOrder(file.getName());
closed = false;
rndFile = new RandomAccessFile(file, "rw");
}
public void startFlush() {
if (commitDelay > 0)
commitExecutor.scheduleAtFixedRate(new FlushTask(), commitDelay, commitDelay, TimeUnit.MILLISECONDS);
}
public void stopFlush(boolean flush) {
if (flush)
flush();
if (!commitExecutor.isShutdown()) {
commitExecutor.shutdown();
try {
if (!commitExecutor
.awaitTermination(OGlobalConfiguration.WAL_SHUTDOWN_TIMEOUT.getValueAsInteger(), TimeUnit.MILLISECONDS))
throw new OStorageException("WAL flush task for " + getPath() + " segment can not be stopped.");
} catch (InterruptedException e) {
OLogManager.instance().error(this, "Can not shutdown background WAL commit thread.");
}
}
}
public long getOrder() {
return order;
}
public void init() throws IOException {
selfCheck();
initPageCache();
initLastPage();
}
private void initLastPage() throws IOException {
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
long currentPage = pagesCount - 1;
if (currentPage < 0)
return;
do {
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(content);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
try {
final OWALPage page = new OWALPage(pointer, false);
int lastPosition = findLastRecord(page, true);
if (lastPosition > -1) {
last = new OLogSequenceNumber(order, currentPage * OWALPage.PAGE_SIZE + lastPosition);
return;
}
currentPage--;
} finally {
pointer.free();
}
} while (currentPage >= 0);
}
}
private void initPageCache() throws IOException {
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
if (pagesCount == 0)
return;
rndFile.seek((pagesCount - 1) * OWALPage.PAGE_SIZE);
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(content);
flushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
currentPage = new OWALPage(pointer, false);
filledUpTo = (pagesCount - 1) * OWALPage.PAGE_SIZE + currentPage.getFilledUpTo();
nextPositionToFlush = (pagesCount - 1) * OWALPage.PAGE_SIZE;
pagesCache.add(currentPage);
}
}
private long extractOrder(String name) {
int walOrderStartIndex = name.indexOf('.') + 1;
int walOrderEndIndex = name.indexOf('.', walOrderStartIndex);
String walOrder = name.substring(walOrderStartIndex, walOrderEndIndex);
try {
return Long.parseLong(walOrder);
} catch (NumberFormatException e) {
// never happen
throw new IllegalStateException(e);
}
}
@Override
public int compareTo(LogSegment other) {
final long otherOrder = other.order;
if (order > otherOrder)
return 1;
else if (order < otherOrder)
return -1;
return 0;
}
public long filledUpTo() throws IOException {
return filledUpTo;
}
public OLogSequenceNumber begin() throws IOException {
if (!pagesCache.isEmpty())
return new OLogSequenceNumber(order, OWALPage.RECORDS_OFFSET);
if (rndFile.length() > 0)
return new OLogSequenceNumber(order, OWALPage.RECORDS_OFFSET);
return null;
}
public OLogSequenceNumber end() {
return last;
}
private int findLastRecord(OWALPage page, boolean skipTailRecords) {
int prevOffset = OWALPage.RECORDS_OFFSET;
int pageOffset = OWALPage.RECORDS_OFFSET;
int maxOffset = page.getFilledUpTo();
while (pageOffset < maxOffset) {
prevOffset = pageOffset;
pageOffset += page.getSerializedRecordSize(pageOffset);
}
if (skipTailRecords && page.recordTail(prevOffset))
return -1;
return prevOffset;
}
public void delete(boolean flush) throws IOException {
close(flush);
boolean deleted = file.delete();
while (!deleted) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
deleted = !file.exists() || file.delete();
}
}
public String getPath() {
return file.getAbsolutePath();
}
public OLogSequenceNumber logRecord(byte[] record) throws IOException {
flushNewData = true;
int pageOffset = (int) (filledUpTo % OWALPage.PAGE_SIZE);
long pageIndex = filledUpTo / OWALPage.PAGE_SIZE;
if (pageOffset == 0 && pageIndex > 0)
pageIndex--;
int pos = 0;
boolean firstChunk = true;
OLogSequenceNumber lsn = null;
while (pos < record.length) {
if (currentPage == null) {
ODirectMemoryPointer pointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
currentPage = new OWALPage(pointer, true);
pagesCache.add(currentPage);
filledUpTo += OWALPage.RECORDS_OFFSET;
}
int freeSpace = currentPage.getFreeSpace();
if (freeSpace < OWALPage.MIN_RECORD_SIZE) {
filledUpTo += freeSpace + OWALPage.RECORDS_OFFSET;
ODirectMemoryPointer pointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
currentPage = new OWALPage(pointer, true);
pagesCache.add(currentPage);
pageIndex++;
freeSpace = currentPage.getFreeSpace();
}
final OWALPage walPage = currentPage;
synchronized (walPage) {
final int entrySize = OWALPage.calculateSerializedSize(record.length - pos);
int addedChunkOffset;
if (entrySize <= freeSpace) {
if (pos == 0)
addedChunkOffset = walPage.appendRecord(record, false, !firstChunk);
else
addedChunkOffset = walPage.appendRecord(Arrays.copyOfRange(record, pos, record.length), false, !firstChunk);
pos = record.length;
} else {
int chunkSize = OWALPage.calculateRecordSize(freeSpace);
if (chunkSize > record.length - pos)
chunkSize = record.length - pos;
addedChunkOffset = walPage.appendRecord(Arrays.copyOfRange(record, pos, pos + chunkSize), true, !firstChunk);
pos += chunkSize;
}
if (firstChunk) {
lsn = new OLogSequenceNumber(order, pageIndex * OWALPage.PAGE_SIZE + addedChunkOffset);
}
int spaceDiff = freeSpace - walPage.getFreeSpace();
filledUpTo += spaceDiff;
firstChunk = false;
}
}
if (pagesCache.size() > maxPagesCacheSize) {
OLogManager.instance().info(this, "Max cache limit is reached (%d vs. %d), sync flush is performed.", maxPagesCacheSize,
pagesCache.size());
flush();
}
last = lsn;
return last;
}
public byte[] readRecord(OLogSequenceNumber lsn) throws IOException {
assert lsn.getSegment() == order;
if (lsn.getPosition() >= filledUpTo)
return null;
if (flushedLsn == null || flushedLsn.compareTo(lsn) < 0)
flush();
byte[] record = null;
long pageIndex = lsn.getPosition() / OWALPage.PAGE_SIZE;
int pageOffset = (int) (lsn.getPosition() % OWALPage.PAGE_SIZE);
long pageCount = (filledUpTo + OWALPage.PAGE_SIZE - 1) / OWALPage.PAGE_SIZE;
while (pageIndex < pageCount) {
synchronized (rndFile) {
byte[] pageContent = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(pageIndex * OWALPage.PAGE_SIZE);
rndFile.readFully(pageContent);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(pageContent);
try {
OWALPage page = new OWALPage(pointer, false);
byte[] content = page.getRecord(pageOffset);
if (record == null)
record = content;
else {
byte[] oldRecord = record;
record = new byte[record.length + content.length];
System.arraycopy(oldRecord, 0, record, 0, oldRecord.length);
System.arraycopy(content, 0, record, oldRecord.length, record.length - oldRecord.length);
}
if (page.mergeWithNextPage(pageOffset)) {
pageOffset = OWALPage.RECORDS_OFFSET;
pageIndex++;
} else
break;
} finally {
pointer.free();
}
}
}
return record;
}
public OLogSequenceNumber getNextLSN(OLogSequenceNumber lsn) throws IOException {
final byte[] record = readRecord(lsn);
if (record == null)
return null;
long pos = lsn.getPosition();
long pageIndex = pos / OWALPage.PAGE_SIZE;
int pageOffset = (int) (pos - pageIndex * OWALPage.PAGE_SIZE);
int restOfRecord = record.length;
while (restOfRecord > 0) {
int entrySize = OWALPage.calculateSerializedSize(restOfRecord);
if (entrySize + pageOffset < OWALPage.PAGE_SIZE) {
if (entrySize + pageOffset <= OWALPage.PAGE_SIZE - OWALPage.MIN_RECORD_SIZE)
pos += entrySize;
else
pos += OWALPage.PAGE_SIZE - pageOffset + OWALPage.RECORDS_OFFSET;
break;
} else if (entrySize + pageOffset == OWALPage.PAGE_SIZE) {
pos += entrySize + OWALPage.RECORDS_OFFSET;
break;
} else {
int chunkSize = OWALPage.calculateRecordSize(OWALPage.PAGE_SIZE - pageOffset);
restOfRecord -= chunkSize;
pos += OWALPage.PAGE_SIZE - pageOffset + OWALPage.RECORDS_OFFSET;
pageOffset = OWALPage.RECORDS_OFFSET;
}
}
if (pos >= filledUpTo)
return null;
return new OLogSequenceNumber(order, pos);
}
public void close(boolean flush) throws IOException {
if (!closed) {
stopFlush(flush);
rndFile.close();
closed = true;
if (!pagesCache.isEmpty()) {
for (OWALPage page : pagesCache)
page.getPagePointer().free();
}
currentPage = null;
}
}
private void selfCheck() throws IOException {
if (!pagesCache.isEmpty())
throw new IllegalStateException("WAL cache is not empty, we can not verify WAL after it was started to be used");
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
if (rndFile.length() % OWALPage.PAGE_SIZE > 0) {
OLogManager.instance().error(this, "Last WAL page was written partially, auto fix.");
rndFile.setLength(OWALPage.PAGE_SIZE * pagesCount);
}
long currentPage = pagesCount - 1;
CRC32 crc32 = new CRC32();
while (currentPage >= 0) {
crc32.reset();
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
int pageCRC = OIntegerSerializer.INSTANCE.deserializeNative(content, 0);
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
int calculatedCRC = (int) crc32.getValue();
if (pageCRC != calculatedCRC) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
} else
break;
}
if (currentPage < 0)
return;
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
currentPage--;
long intialFlushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
long loadedFlushId = intialFlushId;
int flushedPagesCount = 1;
while (currentPage >= 0) {
content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
crc32.reset();
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
int calculatedCRC = (int) crc32.getValue();
int pageCRC = OIntegerSerializer.INSTANCE.deserializeNative(content, 0);
if (pageCRC != calculatedCRC) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
flushedPagesCount = 0;
} else {
loadedFlushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
if (loadedFlushId == intialFlushId) {
flushedPagesCount++;
currentPage--;
} else
break;
}
}
if (flushedPagesCount != 0) {
content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek((currentPage + 1) * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
final int firstFlushIndex = OIntegerSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_INDEX_OFFSET);
if (firstFlushIndex != 0) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage + 1);
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
flushedPagesCount = 0;
}
}
currentPage += flushedPagesCount;
while (currentPage >= 0) {
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
try {
OWALPage page = new OWALPage(pointer, false);
int pageOffset = findLastRecord(page, false);
if (pageOffset >= 0) {
if (page.mergeWithNextPage(pageOffset)) {
page.truncateTill(pageOffset);
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
content = pointer.get(0, OWALPage.PAGE_SIZE);
rndFile.write(content);
if (page.isEmpty()) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
} else
break;
} else
break;
} else
break;
} finally {
pointer.free();
}
}
rndFile.getFD().sync();
}
}
public OLogSequenceNumber readFlushedLSN() throws IOException {
long pages = rndFile.length() / OWALPage.PAGE_SIZE;
if (pages == 0)
return null;
long pageIndex = pages - 1;
while (true) {
rndFile.seek(pageIndex * OWALPage.PAGE_SIZE);
byte[] pageContent = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(pageContent);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(pageContent);
try {
OWALPage page = new OWALPage(pointer, false);
int pageOffset = findLastRecord(page, true);
if (pageOffset < 0) {
pageIndex--;
if (pageIndex < 0)
return null;
continue;
}
return new OLogSequenceNumber(order, pageIndex * OWALPage.PAGE_SIZE + pageOffset);
} finally {
pointer.free();
}
}
}
public void flush() {
if (!commitExecutor.isShutdown()) {
try {
commitExecutor.submit(new FlushTask()).get();
} catch (InterruptedException e) {
Thread.interrupted();
throw new OStorageException("Thread was interrupted during flush", e);
} catch (ExecutionException e) {
throw new OStorageException("Error during WAL segment " + getPath() + " flush.");
}
} else {
new FlushTask().run();
}
}
private final class FlushTask implements Runnable {
private FlushTask() {
}
@Override
public void run() {
try {
commit();
} catch (Throwable e) {
OLogManager.instance().error(this, "Error during WAL background flush", e);
}
}
private void commit() throws IOException {
if (pagesCache.isEmpty())
return;
if (!flushNewData)
return;
flushNewData = false;
final int maxSize = pagesCache.size();
ODirectMemoryPointer[] pagesToFlush = new ODirectMemoryPointer[maxSize];
long filePointer = nextPositionToFlush;
int lastRecordOffset = -1;
long lastPageIndex = -1;
int flushedPages = 0;
Iterator<OWALPage> pageIterator = pagesCache.iterator();
while (flushedPages < maxSize) {
final OWALPage page = pageIterator.next();
synchronized (page) {
ODirectMemoryPointer dataPointer;
if (flushedPages == maxSize - 1) {
dataPointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
page.getPagePointer().moveData(0, dataPointer, 0, OWALPage.PAGE_SIZE);
} else {
dataPointer = page.getPagePointer();
}
pagesToFlush[flushedPages] = dataPointer;
int recordOffset = findLastRecord(page, true);
if (recordOffset >= 0) {
lastRecordOffset = recordOffset;
lastPageIndex = flushedPages;
}
}
flushedPages++;
}
flushId++;
synchronized (rndFile) {
rndFile.seek(filePointer);
for (int i = 0; i < pagesToFlush.length; i++) {
ODirectMemoryPointer dataPointer = pagesToFlush[i];
byte[] pageContent = dataPointer.get(0, OWALPage.PAGE_SIZE);
if (i == pagesToFlush.length - 1)
dataPointer.free();
OLongSerializer.INSTANCE.serializeNative(flushId, pageContent, OWALPage.FLUSH_ID_OFFSET);
OIntegerSerializer.INSTANCE.serializeNative(i, pageContent, OWALPage.FLUSH_INDEX_OFFSET);
flushPage(pageContent);
filePointer += OWALPage.PAGE_SIZE;
}
rndFile.getFD().sync();
}
long oldPositionToFlush = nextPositionToFlush;
nextPositionToFlush = filePointer - OWALPage.PAGE_SIZE;
if (lastRecordOffset >= 0)
flushedLsn = new OLogSequenceNumber(order, oldPositionToFlush + lastPageIndex * OWALPage.PAGE_SIZE + lastRecordOffset);
for (int i = 0; i < flushedPages - 1; i++) {
OWALPage page = pagesCache.poll();
page.getPagePointer().free();
}
assert !pagesCache.isEmpty();
}
private void flushPage(byte[] content) throws IOException {
CRC32 crc32 = new CRC32();
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
OIntegerSerializer.INSTANCE.serializeNative((int) crc32.getValue(), content, 0);
rndFile.write(content);
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OWriteAheadLog.java
|
5,067 |
public class SearchService extends AbstractLifecycleComponent<SearchService> {
public static final String NORMS_LOADING_KEY = "index.norms.loading";
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final IndicesService indicesService;
private final IndicesWarmer indicesWarmer;
private final ScriptService scriptService;
private final CacheRecycler cacheRecycler;
private final PageCacheRecycler pageCacheRecycler;
private final DfsPhase dfsPhase;
private final QueryPhase queryPhase;
private final FetchPhase fetchPhase;
private final long defaultKeepAlive;
private final ScheduledFuture<?> keepAliveReaper;
private final AtomicLong idGenerator = new AtomicLong();
private final ConcurrentMapLong<SearchContext> activeContexts = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
private final ImmutableMap<String, SearchParseElement> elementParsers;
@Inject
public SearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesLifecycle indicesLifecycle, IndicesWarmer indicesWarmer, ThreadPool threadPool,
ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.indicesService = indicesService;
this.indicesWarmer = indicesWarmer;
this.scriptService = scriptService;
this.cacheRecycler = cacheRecycler;
this.pageCacheRecycler = pageCacheRecycler;
this.dfsPhase = dfsPhase;
this.queryPhase = queryPhase;
this.fetchPhase = fetchPhase;
TimeValue keepAliveInterval = componentSettings.getAsTime("keep_alive_interval", timeValueMinutes(1));
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
this.defaultKeepAlive = componentSettings.getAsTime("default_keep_alive", timeValueMinutes(5)).millis();
Map<String, SearchParseElement> elementParsers = new HashMap<String, SearchParseElement>();
elementParsers.putAll(dfsPhase.parseElements());
elementParsers.putAll(queryPhase.parseElements());
elementParsers.putAll(fetchPhase.parseElements());
elementParsers.put("stats", new StatsGroupsParseElement());
this.elementParsers = ImmutableMap.copyOf(elementParsers);
this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval);
this.indicesWarmer.addListener(new NormsWarmer());
this.indicesWarmer.addListener(new FieldDataWarmer());
this.indicesWarmer.addListener(new SearchWarmer());
}
@Override
protected void doStart() throws ElasticsearchException {
}
@Override
protected void doStop() throws ElasticsearchException {
for (SearchContext context : activeContexts.values()) {
freeContext(context);
}
activeContexts.clear();
}
@Override
protected void doClose() throws ElasticsearchException {
keepAliveReaper.cancel(false);
}
public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
try {
contextProcessing(context);
dfsPhase.execute(context);
contextProcessedSuccessfully(context);
return context.dfsResult();
} catch (Throwable e) {
logger.trace("Dfs phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QuerySearchResult executeScan(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
assert context.searchType() == SearchType.SCAN;
context.searchType(SearchType.COUNT); // move to COUNT, and then, when scrolling, move to SCAN
assert context.searchType() == SearchType.COUNT;
try {
if (context.scroll() == null) {
throw new ElasticsearchException("Scroll must be provided when scanning...");
}
contextProcessing(context);
queryPhase.execute(context);
contextProcessedSuccessfully(context);
return context.queryResult();
} catch (Throwable e) {
logger.trace("Scan phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
processScroll(request, context);
if (context.searchType() == SearchType.COUNT) {
// first scanning, reset the from to 0
context.searchType(SearchType.SCAN);
context.from(0);
}
queryPhase.execute(context);
shortcutDocIdsToLoadForScanning(context);
fetchPhase.execute(context);
if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget());
} catch (Throwable e) {
logger.trace("Scan phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QuerySearchResult executeQueryPhase(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
contextProcessing(context);
queryPhase.execute(context);
if (context.searchType() == SearchType.COUNT) {
freeContext(context.id());
} else {
contextProcessedSuccessfully(context);
}
context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
return context.queryResult();
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
logger.trace("Query phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
contextProcessing(context);
processScroll(request, context);
queryPhase.execute(context);
contextProcessedSuccessfully(context);
context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
logger.trace("Query phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QuerySearchResult executeQueryPhase(QuerySearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity()));
} catch (Throwable e) {
freeContext(context);
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
queryPhase.execute(context);
contextProcessedSuccessfully(context);
context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
return context.queryResult();
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
logger.trace("Query phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
contextProcessing(context);
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
try {
queryPhase.execute(context);
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
long time2 = System.nanoTime();
context.indexShard().searchService().onQueryPhase(context, time2 - time);
context.indexShard().searchService().onPreFetchPhase(context);
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(context.id());
} else {
contextProcessedSuccessfully(context);
}
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
} catch (Throwable e) {
logger.trace("Fetch phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity()));
} catch (Throwable e) {
freeContext(context);
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
try {
queryPhase.execute(context);
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
long time2 = System.nanoTime();
context.indexShard().searchService().onQueryPhase(context, time2 - time);
context.indexShard().searchService().onPreFetchPhase(context);
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
} catch (Throwable e) {
logger.trace("Fetch phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
processScroll(request, context);
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
try {
queryPhase.execute(context);
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
long time2 = System.nanoTime();
context.indexShard().searchService().onQueryPhase(context, time2 - time);
context.indexShard().searchService().onPreFetchPhase(context);
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget());
} catch (Throwable e) {
logger.trace("Fetch phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public FetchSearchResult executeFetchPhase(FetchSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
context.indexShard().searchService().onPreFetchPhase(context);
long time = System.nanoTime();
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time);
return context.fetchResult();
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
logger.trace("Fetch phase failed", e);
freeContext(context); // we just try to make sure this is freed - rethrow orig exception.
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
private SearchContext findContext(long id) throws SearchContextMissingException {
SearchContext context = activeContexts.get(id);
if (context == null) {
throw new SearchContextMissingException(id);
}
SearchContext.setCurrent(context);
return context;
}
SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createContext(request);
activeContexts.put(context.id(), context);
context.indexShard().searchService().onNewContext(context);
return context;
}
SearchContext createContext(ShardSearchRequest request) throws ElasticsearchException {
return createContext(request, null);
}
SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws ElasticsearchException {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(request.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, cacheRecycler, pageCacheRecycler);
SearchContext.setCurrent(context);
try {
context.scroll(request.scroll());
parseSource(context, request.source());
parseSource(context, request.extraSource());
// if the from and size are still not set, default them
if (context.from() == -1) {
context.from(0);
}
if (context.size() == -1) {
context.size(10);
}
// pre process
dfsPhase.preProcess(context);
queryPhase.preProcess(context);
fetchPhase.preProcess(context);
// compute the context keep alive
long keepAlive = defaultKeepAlive;
if (request.scroll() != null && request.scroll().keepAlive() != null) {
keepAlive = request.scroll().keepAlive().millis();
}
context.keepAlive(keepAlive);
} catch (Throwable e) {
context.release();
throw ExceptionsHelper.convertToRuntime(e);
}
return context;
}
public void freeContext(long id) {
SearchContext context = activeContexts.remove(id);
if (context == null) {
return;
}
context.indexShard().searchService().onFreeContext(context);
context.release();
}
private void freeContext(SearchContext context) {
SearchContext removed = activeContexts.remove(context.id());
if (removed != null) {
removed.indexShard().searchService().onFreeContext(removed);
}
context.release();
}
public void freeAllScrollContexts() {
for (SearchContext searchContext : activeContexts.values()) {
if (searchContext.scroll() != null) {
freeContext(searchContext);
}
}
}
private void contextProcessing(SearchContext context) {
// disable timeout while executing a search
context.accessed(-1);
}
private void contextProcessedSuccessfully(SearchContext context) {
context.accessed(threadPool.estimatedTimeInMillis());
}
private void cleanContext(SearchContext context) {
SearchContext.removeCurrent();
}
private void parseSource(SearchContext context, BytesReference source) throws SearchParseException {
// nothing to parse...
if (source == null || source.length() == 0) {
return;
}
XContentParser parser = null;
try {
parser = XContentFactory.xContent(source).createParser(source);
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
parser.nextToken();
SearchParseElement element = elementParsers.get(fieldName);
if (element == null) {
throw new SearchParseException(context, "No parser for element [" + fieldName + "]");
}
element.parse(parser, context);
} else if (token == null) {
break;
}
}
} catch (Throwable e) {
String sSource = "_na_";
try {
sSource = XContentHelper.convertToJson(source, false);
} catch (Throwable e1) {
// ignore
}
throw new SearchParseException(context, "Failed to parse source [" + sSource + "]", e);
} finally {
if (parser != null) {
parser.close();
}
}
}
private static final int[] EMPTY_DOC_IDS = new int[0];
/**
* Shortcut ids to load, we load only "from" and up to "size". The phase controller
* handles this as well since the result is always size * shards for Q_A_F
*/
private void shortcutDocIdsToLoad(SearchContext context) {
TopDocs topDocs = context.queryResult().topDocs();
if (topDocs.scoreDocs.length < context.from()) {
// no more docs...
context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
return;
}
int totalSize = context.from() + context.size();
int[] docIdsToLoad = new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size())];
int counter = 0;
for (int i = context.from(); i < totalSize; i++) {
if (i < topDocs.scoreDocs.length) {
docIdsToLoad[counter] = topDocs.scoreDocs[i].doc;
} else {
break;
}
counter++;
}
context.docIdsToLoad(docIdsToLoad, 0, counter);
}
private void shortcutDocIdsToLoadForScanning(SearchContext context) {
TopDocs topDocs = context.queryResult().topDocs();
if (topDocs.scoreDocs.length == 0) {
// no more docs...
context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
return;
}
int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
for (int i = 0; i < docIdsToLoad.length; i++) {
docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
}
context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
}
private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
// process scroll
context.from(context.from() + context.size());
context.scroll(request.scroll());
// update the context keep alive based on the new scroll value
if (request.scroll() != null && request.scroll().keepAlive() != null) {
context.keepAlive(request.scroll().keepAlive().millis());
}
}
static class NormsWarmer extends IndicesWarmer.Listener {
@Override
public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
final Loading defaultLoading = Loading.parse(indexMetaData.settings().get(NORMS_LOADING_KEY), Loading.LAZY);
final MapperService mapperService = indexShard.mapperService();
final ObjectSet<String> warmUp = new ObjectOpenHashSet<String>();
for (DocumentMapper docMapper : mapperService) {
for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
final String indexName = fieldMapper.names().indexName();
if (fieldMapper.fieldType().indexed() && !fieldMapper.fieldType().omitNorms() && fieldMapper.normsLoading(defaultLoading) == Loading.EAGER) {
warmUp.add(indexName);
}
}
}
final CountDownLatch latch = new CountDownLatch(1);
// Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
try {
for (Iterator<ObjectCursor<String>> it = warmUp.iterator(); it.hasNext(); ) {
final String indexName = it.next().value;
final long start = System.nanoTime();
for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
final NumericDocValues values = ctx.reader().getNormValues(indexName);
if (values != null) {
values.get(0);
}
}
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName, TimeValue.timeValueNanos(System.nanoTime() - start));
}
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up norms", t);
} finally {
latch.countDown();
}
}
});
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
}
static class FieldDataWarmer extends IndicesWarmer.Listener {
@Override
public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
final MapperService mapperService = indexShard.mapperService();
final Map<String, FieldMapper<?>> warmUp = new HashMap<String, FieldMapper<?>>();
boolean parentChild = false;
for (DocumentMapper docMapper : mapperService) {
for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
if (fieldMapper instanceof ParentFieldMapper) {
ParentFieldMapper parentFieldMapper = (ParentFieldMapper) fieldMapper;
if (parentFieldMapper.active()) {
parentChild = true;
}
}
final FieldDataType fieldDataType = fieldMapper.fieldDataType();
if (fieldDataType == null) {
continue;
}
if (fieldDataType.getLoading() != Loading.EAGER) {
continue;
}
final String indexName = fieldMapper.names().indexName();
if (warmUp.containsKey(indexName)) {
continue;
}
warmUp.put(indexName, fieldMapper);
}
}
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
final Executor executor = threadPool.executor(executor());
final CountDownLatch latch = new CountDownLatch(context.newSearcher().reader().leaves().size() * warmUp.size() + (parentChild ? 1 : 0));
for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
for (final FieldMapper<?> fieldMapper : warmUp.values()) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
final long start = System.nanoTime();
indexFieldDataService.getForField(fieldMapper).load(ctx);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldMapper.names().name(), TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldMapper.names().name());
} finally {
latch.countDown();
}
}
});
}
}
if (parentChild) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
final long start = System.nanoTime();
indexShard.indexService().cache().idCache().refresh(context.newSearcher().reader().leaves());
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed id_cache, took [{}]", TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up id cache", t);
} finally {
latch.countDown();
}
}
});
}
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
}
class SearchWarmer extends IndicesWarmer.Listener {
@Override
public TerminationHandle warm(final IndexShard indexShard, final IndexMetaData indexMetaData, final IndicesWarmer.WarmerContext warmerContext, ThreadPool threadPool) {
IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (custom == null) {
return TerminationHandle.NO_WAIT;
}
final Executor executor = threadPool.executor(executor());
final CountDownLatch latch = new CountDownLatch(custom.entries().size());
for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
executor.execute(new Runnable() {
@Override
public void run() {
SearchContext context = null;
try {
long now = System.nanoTime();
ShardSearchRequest request = new ShardSearchRequest(indexShard.shardId().index().name(), indexShard.shardId().id(), indexMetaData.numberOfShards(),
SearchType.QUERY_THEN_FETCH /* we don't use COUNT so sorting will also kick in whatever warming logic*/)
.source(entry.source())
.types(entry.types());
context = createContext(request, warmerContext.newSearcher());
queryPhase.execute(context);
long took = System.nanoTime() - now;
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
} finally {
try {
if (context != null) {
freeContext(context);
cleanContext(context);
}
} finally {
latch.countDown();
}
}
}
});
}
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
}
class Reaper implements Runnable {
@Override
public void run() {
long time = threadPool.estimatedTimeInMillis();
for (SearchContext context : activeContexts.values()) {
if (context.lastAccessTime() == -1) { // its being processed or timeout is disabled
continue;
}
if ((time - context.lastAccessTime() > context.keepAlive())) {
freeContext(context);
}
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_SearchService.java
|
3 |
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
clusterClient.performRoleElections();
clusterClient.removeClusterListener( this );
}
});
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_backup_HaBackupProvider.java
|
485 |
static final class Fields {
static final XContentBuilderString TOKENS = new XContentBuilderString("tokens");
static final XContentBuilderString TOKEN = new XContentBuilderString("token");
static final XContentBuilderString START_OFFSET = new XContentBuilderString("start_offset");
static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset");
static final XContentBuilderString TYPE = new XContentBuilderString("type");
static final XContentBuilderString POSITION = new XContentBuilderString("position");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeResponse.java
|
45 |
@Component("blPendingSandBoxItemCustomPersistenceHandler")
public class PendingSandBoxItemCustomPersistenceHandler extends SandBoxItemCustomPersistenceHandler {
private final Log LOG = LogFactory.getLog(PendingSandBoxItemCustomPersistenceHandler.class);
@Override
public Boolean canHandleFetch(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
boolean isSandboxItem = SandBoxItem.class.getName().equals(ceilingEntityFullyQualifiedClassname);
if (isSandboxItem) {
return persistencePackage.getCustomCriteria()[4].equals("pending");
}
return false;
}
@Override
public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
String[] customCriteria = persistencePackage.getCustomCriteria();
if (ArrayUtils.isEmpty(customCriteria) || customCriteria.length != 5) {
ServiceException e = new ServiceException("Invalid request for entity: " + ceilingEntityFullyQualifiedClassname);
LOG.error("Invalid request for entity: " + ceilingEntityFullyQualifiedClassname, e);
throw e;
}
AdminUser adminUser = adminRemoteSecurityService.getPersistentAdminUser();
if (adminUser == null) {
ServiceException e = new ServiceException("Unable to determine current user logged in status");
throw e;
}
try {
String operation = customCriteria[1];
List<Long> targets = new ArrayList<Long>();
if (!StringUtils.isEmpty(customCriteria[2])) {
String[] parts = customCriteria[2].split(",");
for (String part : parts) {
try {
targets.add(Long.valueOf(part));
} catch (NumberFormatException e) {
//do nothing
}
}
}
String requiredPermission = "PERMISSION_ALL_USER_SANDBOX";
boolean allowOperation = false;
for (AdminRole role : adminUser.getAllRoles()) {
for (AdminPermission permission : role.getAllPermissions()) {
if (permission.getName().equals(requiredPermission)) {
allowOperation = true;
break;
}
}
}
if (!allowOperation) {
ServiceException e = new ServiceException("Current user does not have permission to perform operation");
LOG.error("Current user does not have permission to perform operation", e);
throw e;
}
SandBox mySandBox = sandBoxService.retrieveUserSandBox(null, adminUser);
SandBox approvalSandBox = sandBoxService.retrieveApprovalSandBox(mySandBox);
if (operation.equals("releaseAll")) {
sandBoxService.revertAllSandBoxItems(mySandBox, approvalSandBox);
} else if (operation.equals("releaseSelected")) {
List<SandBoxItem> items = retrieveSandBoxItems(targets, dynamicEntityDao, mySandBox);
sandBoxService.revertSelectedSandBoxItems(approvalSandBox, items);
} else if (operation.equals("reclaimAll")) {
sandBoxService.rejectAllSandBoxItems(mySandBox, approvalSandBox, "reclaiming sandbox items");
} else if (operation.equals("reclaimSelected")) {
List<SandBoxItem> items = retrieveSandBoxItems(targets, dynamicEntityDao, mySandBox);
sandBoxService.rejectSelectedSandBoxItems(approvalSandBox, "reclaiming sandbox item", items);
}
PersistencePerspective persistencePerspective = persistencePackage.getPersistencePerspective();
Map<String, FieldMetadata> originalProps = helper.getSimpleMergedProperties(SandBoxItem.class.getName(), persistencePerspective);
cto.get("originalSandBoxId").setFilterValue(mySandBox.getId().toString());
cto.get("archivedFlag").setFilterValue(Boolean.FALSE.toString());
List<FilterMapping> filterMappings = helper.getFilterMappings(persistencePerspective, cto, SandBoxItem.class.getName(), originalProps);
List<Serializable> records = helper.getPersistentRecords(SandBoxItem.class.getName(), filterMappings, cto.getFirstResult(), cto.getMaxResults());
Entity[] results = helper.getRecords(originalProps, records);
int totalRecords = helper.getTotalRecords(StringUtils.isEmpty(persistencePackage.getFetchTypeFullyQualifiedClassname())?
persistencePackage.getCeilingEntityFullyQualifiedClassname():persistencePackage.getFetchTypeFullyQualifiedClassname(),
filterMappings);
DynamicResultSet response = new DynamicResultSet(results, totalRecords);
return response;
} catch (Exception e) {
throw new ServiceException("Unable to execute persistence activity for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_PendingSandBoxItemCustomPersistenceHandler.java
|
326 |
public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
public NodesInfoRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new NodesInfoRequest());
}
/**
* Clears all info flags.
*/
public NodesInfoRequestBuilder clear() {
request.clear();
return this;
}
/**
* Sets to reutrn all the data.
*/
public NodesInfoRequestBuilder all() {
request.all();
return this;
}
/**
* Should the node settings be returned.
*/
public NodesInfoRequestBuilder setSettings(boolean settings) {
request.settings(settings);
return this;
}
/**
* Should the node OS info be returned.
*/
public NodesInfoRequestBuilder setOs(boolean os) {
request.os(os);
return this;
}
/**
* Should the node OS process be returned.
*/
public NodesInfoRequestBuilder setProcess(boolean process) {
request.process(process);
return this;
}
/**
* Should the node JVM info be returned.
*/
public NodesInfoRequestBuilder setJvm(boolean jvm) {
request.jvm(jvm);
return this;
}
/**
* Should the node thread pool info be returned.
*/
public NodesInfoRequestBuilder setThreadPool(boolean threadPool) {
request.threadPool(threadPool);
return this;
}
/**
* Should the node Network info be returned.
*/
public NodesInfoRequestBuilder setNetwork(boolean network) {
request.network(network);
return this;
}
/**
* Should the node Transport info be returned.
*/
public NodesInfoRequestBuilder setTransport(boolean transport) {
request.transport(transport);
return this;
}
/**
* Should the node HTTP info be returned.
*/
public NodesInfoRequestBuilder setHttp(boolean http) {
request.http(http);
return this;
}
public NodesInfoRequestBuilder setPlugin(boolean plugin) {
request().plugin(plugin);
return this;
}
@Override
protected void doExecute(ActionListener<NodesInfoResponse> listener) {
((ClusterAdminClient) client).nodesInfo(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_NodesInfoRequestBuilder.java
|
315 |
public interface OStorageClusterConfiguration {
public int getId();
public String getName();
public String getLocation();
public int getDataSegmentId();
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_config_OStorageClusterConfiguration.java
|
1,697 |
public class Imports {
private static final List<String> imports = new ArrayList<String>();
static {
// titan
imports.add("com.thinkaurelius.titan.core.*");
imports.add("com.thinkaurelius.titan.core.attribute.*");
imports.add("com.thinkaurelius.titan.core.log.*");
imports.add("com.thinkaurelius.titan.core.olap.*");
imports.add("com.thinkaurelius.titan.core.schema.*");
imports.add("com.thinkaurelius.titan.core.util.*");
imports.add("com.thinkaurelius.titan.example.*");
imports.add("org.apache.commons.configuration.*");
imports.add("static " + Geo.class.getName() + ".*");
imports.add("static " + Text.class.getName() + ".*");
// todo: remove with Gremlin 2.3.1+
imports.add("static " + Query.Compare.class.getName() + ".*");
}
public static List<String> getImports() {
return Imports.imports;
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_tinkerpop_gremlin_Imports.java
|
642 |
public abstract class AbstractGeneratedResourceHandler {
protected Cache generatedResourceCache;
/**
* @param path
* @return booelean determining whether or not this handler is able to handle the given request
*/
public abstract boolean canHandle(String path);
/**
* @param path
* @param locations
* @return the Resource representing this file
*/
public abstract Resource getFileContents(String path, List<Resource> locations);
/**
* @param cachedResource
* @param path
* @param locations
* @return whether or not the given cachedResource needs to be regenerated
*/
public abstract boolean isCachedResourceExpired(GeneratedResource cachedResource, String path, List<Resource> locations);
/**
* Attempts to retrive the requested resource from cache. If not cached, generates the resource, caches it,
* and then returns it
*
* @param request
* @param location
* @return the generated resource
*/
public Resource getResource(String path, List<Resource> locations) {
Element e = getGeneratedResourceCache().get(path);
Resource r = null;
boolean shouldGenerate = false;
if (e == null || e.getObjectValue() == null) {
shouldGenerate = true;
} else if (e.getObjectValue() instanceof GeneratedResource
&& isCachedResourceExpired((GeneratedResource) e.getObjectValue(), path, locations)) {
shouldGenerate = true;
} else {
r = (Resource) e.getObjectValue();
}
if (shouldGenerate) {
r = getFileContents(path, locations);
e = new Element(path, r);
getGeneratedResourceCache().put(e);
}
return r;
}
protected Cache getGeneratedResourceCache() {
if (generatedResourceCache == null) {
generatedResourceCache = CacheManager.getInstance().getCache("generatedResourceCache");
}
return generatedResourceCache;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_web_resource_AbstractGeneratedResourceHandler.java
|
1,442 |
public static class Factory implements MetaData.Custom.Factory<RestoreMetaData> {
/**
* {@inheritDoc}
*/
@Override
public String type() {
return TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public RestoreMetaData readFrom(StreamInput in) throws IOException {
Entry[] entries = new Entry[in.readVInt()];
for (int i = 0; i < entries.length; i++) {
SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
State state = State.fromValue(in.readByte());
int indices = in.readVInt();
ImmutableList.Builder<String> indexBuilder = ImmutableList.builder();
for (int j = 0; j < indices; j++) {
indexBuilder.add(in.readString());
}
ImmutableMap.Builder<ShardId, ShardRestoreStatus> builder = ImmutableMap.<ShardId, ShardRestoreStatus>builder();
int shards = in.readVInt();
for (int j = 0; j < shards; j++) {
ShardId shardId = ShardId.readShardId(in);
ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in);
builder.put(shardId, shardState);
}
entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build());
}
return new RestoreMetaData(entries);
}
/**
* {@inheritDoc}
*/
@Override
public void writeTo(RestoreMetaData repositories, StreamOutput out) throws IOException {
out.writeVInt(repositories.entries().size());
for (Entry entry : repositories.entries()) {
entry.snapshotId().writeTo(out);
out.writeByte(entry.state().value());
out.writeVInt(entry.indices().size());
for (String index : entry.indices()) {
out.writeString(index);
}
out.writeVInt(entry.shards().size());
for (Map.Entry<ShardId, ShardRestoreStatus> shardEntry : entry.shards().entrySet()) {
shardEntry.getKey().writeTo(out);
shardEntry.getValue().writeTo(out);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public RestoreMetaData fromXContent(XContentParser parser) throws IOException {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public void toXContent(RestoreMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startArray("snapshots");
for (Entry entry : customIndexMetaData.entries()) {
toXContent(entry, builder, params);
}
builder.endArray();
}
/**
* Serializes single restore operation
*
* @param entry restore operation metadata
* @param builder XContent builder
* @param params serialization parameters
* @throws IOException
*/
public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
builder.field("snapshot", entry.snapshotId().getSnapshot());
builder.field("repository", entry.snapshotId().getRepository());
builder.field("state", entry.state());
builder.startArray("indices");
{
for (String index : entry.indices()) {
builder.value(index);
}
}
builder.endArray();
builder.startArray("shards");
{
for (Map.Entry<ShardId, ShardRestoreStatus> shardEntry : entry.shards.entrySet()) {
ShardId shardId = shardEntry.getKey();
ShardRestoreStatus status = shardEntry.getValue();
builder.startObject();
{
builder.field("index", shardId.getIndex());
builder.field("shard", shardId.getId());
builder.field("state", status.state());
}
builder.endObject();
}
}
builder.endArray();
builder.endObject();
}
/**
* {@inheritDoc}
*/
@Override
public boolean isPersistent() {
return false;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_RestoreMetaData.java
|
1,187 |
public class Elasticsearch extends Bootstrap {
public static void close(String[] args) {
Bootstrap.close(args);
}
public static void main(String[] args) {
Bootstrap.main(args);
}
}
| 0true
|
src_main_java_org_elasticsearch_bootstrap_Elasticsearch.java
|
712 |
public class CountRequest extends BroadcastOperationRequest<CountRequest> {
private static final XContentType contentType = Requests.CONTENT_TYPE;
public static final float DEFAULT_MIN_SCORE = -1f;
private float minScore = DEFAULT_MIN_SCORE;
@Nullable
protected String routing;
@Nullable
private String preference;
private BytesReference source;
private boolean sourceUnsafe;
private String[] types = Strings.EMPTY_ARRAY;
long nowInMillis;
CountRequest() {
}
/**
* Constructs a new count request against the provided indices. No indices provided means it will
* run against all indices.
*/
public CountRequest(String... indices) {
super(indices);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
return validationException;
}
@Override
protected void beforeStart() {
if (sourceUnsafe) {
source = source.copyBytesArray();
sourceUnsafe = false;
}
}
/**
* The minimum score of the documents to include in the count.
*/
float minScore() {
return minScore;
}
/**
* The minimum score of the documents to include in the count. Defaults to <tt>-1</tt> which means all
* documents will be included in the count.
*/
public CountRequest minScore(float minScore) {
this.minScore = minScore;
return this;
}
/**
* The source to execute.
*/
BytesReference source() {
return source;
}
/**
* The source to execute.
*/
public CountRequest source(QuerySourceBuilder sourceBuilder) {
this.source = sourceBuilder.buildAsBytes(contentType);
this.sourceUnsafe = false;
return this;
}
/**
* The source to execute in the form of a map.
*/
public CountRequest source(Map querySource) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.map(querySource);
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + querySource + "]", e);
}
}
public CountRequest source(XContentBuilder builder) {
this.source = builder.bytes();
this.sourceUnsafe = false;
return this;
}
/**
* The source to execute. It is preferable to use either {@link #source(byte[])}
* or {@link #source(QuerySourceBuilder)}.
*/
public CountRequest source(String querySource) {
this.source = new BytesArray(querySource);
this.sourceUnsafe = false;
return this;
}
/**
* The source to execute.
*/
public CountRequest source(byte[] querySource) {
return source(querySource, 0, querySource.length, false);
}
/**
* The source to execute.
*/
public CountRequest source(byte[] querySource, int offset, int length, boolean unsafe) {
return source(new BytesArray(querySource, offset, length), unsafe);
}
public CountRequest source(BytesReference querySource, boolean unsafe) {
this.source = querySource;
this.sourceUnsafe = unsafe;
return this;
}
/**
* The types of documents the query will run against. Defaults to all types.
*/
public String[] types() {
return this.types;
}
/**
* The types of documents the query will run against. Defaults to all types.
*/
public CountRequest types(String... types) {
this.types = types;
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public String routing() {
return this.routing;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public CountRequest routing(String routing) {
this.routing = routing;
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public CountRequest routing(String... routings) {
this.routing = Strings.arrayToCommaDelimitedString(routings);
return this;
}
public CountRequest preference(String preference) {
this.preference = preference;
return this;
}
public String preference() {
return this.preference;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
minScore = in.readFloat();
routing = in.readOptionalString();
preference = in.readOptionalString();
sourceUnsafe = false;
source = in.readBytesReference();
types = in.readStringArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeFloat(minScore);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeBytesReference(source);
out.writeStringArray(types);
}
@Override
public String toString() {
String sSource = "_na_";
try {
sSource = XContentHelper.convertToJson(source, false);
} catch (Exception e) {
// ignore
}
return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", source[" + sSource + "]";
}
}
| 0true
|
src_main_java_org_elasticsearch_action_count_CountRequest.java
|
156 |
public class TransactionInfo implements Comparable<TransactionInfo>
{
private final int identifier;
private final boolean trueForOnePhase;
private final long txId;
private final int master;
private final long checksum;
public TransactionInfo( int identifier, boolean trueForOnePhase, long txId, int master, long checksum )
{
super();
this.identifier = identifier;
this.trueForOnePhase = trueForOnePhase;
this.txId = txId;
this.master = master;
this.checksum = checksum;
}
public int getIdentifier()
{
return identifier;
}
public boolean isOnePhase()
{
return trueForOnePhase;
}
public long getTxId()
{
return txId;
}
public int getMaster()
{
return master;
}
public long getChecksum()
{
return checksum;
}
@Override
public int hashCode()
{
return identifier;
}
@Override
public boolean equals( Object obj )
{
return obj instanceof TransactionInfo && ((TransactionInfo)obj).identifier == identifier;
}
@Override
public int compareTo( TransactionInfo o )
{
return Long.valueOf( txId ).compareTo( Long.valueOf( o.txId ) );
}
}
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_TransactionInfo.java
|
956 |
public abstract class TransportMasterNodeReadOperationAction<Request extends MasterNodeReadOperationRequest, Response extends ActionResponse> extends TransportMasterNodeOperationAction<Request, Response> {
public static final String FORCE_LOCAL_SETTING = "action.master.force_local";
private Boolean forceLocal;
protected TransportMasterNodeReadOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
this.forceLocal = settings.getAsBoolean(FORCE_LOCAL_SETTING, null);
}
protected final boolean localExecute(Request request) {
if (forceLocal != null) {
return forceLocal;
}
return request.local();
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_master_TransportMasterNodeReadOperationAction.java
|
459 |
private static class TreeKeyIterator implements Iterator<OIdentifiable> {
private final boolean autoConvertToRecord;
private OSBTreeMapEntryIterator<OIdentifiable, Boolean> entryIterator;
public TreeKeyIterator(OTreeInternal<OIdentifiable, Boolean> tree, boolean autoConvertToRecord) {
entryIterator = new OSBTreeMapEntryIterator<OIdentifiable, Boolean>(tree);
this.autoConvertToRecord = autoConvertToRecord;
}
@Override
public boolean hasNext() {
return entryIterator.hasNext();
}
@Override
public OIdentifiable next() {
final OIdentifiable identifiable = entryIterator.next().getKey();
if (autoConvertToRecord)
return identifiable.getRecord();
else
return identifiable;
}
@Override
public void remove() {
entryIterator.remove();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_record_ridset_sbtree_OSBTreeRIDSet.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.