Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
1,200 |
objectFloatMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectFloatOpenHashMap>() {
@Override
public ObjectFloatOpenHashMap newInstance(int sizing) {
return new ObjectFloatOpenHashMap(size(sizing));
}
@Override
public void clear(ObjectFloatOpenHashMap value) {
value.clear();
}
});
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java
|
183 |
public class OCollections {
/**
* This method is used to find item in collection using passed in comparator. Only 0 value (requested object is found) returned by
* comparator is taken into account the rest is ignored.
*
*
* @param list
* List in which value should be found.
* @param object
* Object to find.
* @param comparator
* Comparator is sued for search.
* @param <T>
* Type of collection elements.
* @return Index of found item or <code>-1</code> otherwise.
*/
public static <T> int indexOf(final List<T> list, final T object, final Comparator<T> comparator) {
int i = 0;
for (final T item : list) {
if (comparator.compare(item, object) == 0)
return i;
i++;
}
return -1;
}
/**
* Create a string representation of all objects in the given Iterable. example : [value1,value2,value3]
*
* @param iterable
* @return String
*/
public static String toString(Iterable<?> iterable) {
final StringBuilder builder = new StringBuilder();
builder.append('[');
int cnt = 0;
final Iterator<?> ite = iterable.iterator();
while (ite.hasNext()) {
if (cnt != 0) {
builder.append(',');
}
cnt++;
final Object obj = ite.next();
builder.append(obj);
}
builder.append(']');
return builder.toString();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_util_OCollections.java
|
1,427 |
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(final ClusterState currentState) throws Exception {
List<String> indicesToClose = Lists.newArrayList();
try {
for (String index : request.indices()) {
if (!currentState.metaData().hasIndex(index)) {
throw new IndexMissingException(new Index(index));
}
}
// pre create indices here and add mappings to them so we can merge the mappings here if needed
for (String index : request.indices()) {
if (indicesService.hasIndex(index)) {
continue;
}
final IndexMetaData indexMetaData = currentState.metaData().index(index);
IndexService indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), clusterService.localNode().id());
indicesToClose.add(indexMetaData.index());
// make sure to add custom default mapping if exists
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false);
}
// only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(request.type())) {
indexService.mapperService().merge(request.type(), indexMetaData.mappings().get(request.type()).source(), false);
}
}
Map<String, DocumentMapper> newMappers = newHashMap();
Map<String, DocumentMapper> existingMappers = newHashMap();
for (String index : request.indices()) {
IndexService indexService = indicesService.indexService(index);
if (indexService != null) {
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), false);
} else {
newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()));
if (existingMapper != null) {
// first, simulate
DocumentMapper.MergeResult mergeResult = existingMapper.merge(newMapper, mergeFlags().simulate(true));
// if we have conflicts, and we are not supposed to ignore them, throw an exception
if (!request.ignoreConflicts() && mergeResult.hasConflicts()) {
throw new MergeMappingException(mergeResult.conflicts());
}
}
}
newMappers.put(index, newMapper);
if (existingMapper != null) {
existingMappers.put(index, existingMapper);
}
} else {
throw new IndexMissingException(new Index(index));
}
}
String mappingType = request.type();
if (mappingType == null) {
mappingType = newMappers.values().iterator().next().type();
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
final Map<String, MappingMetaData> mappings = newHashMap();
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
String index = entry.getKey();
// do the actual merge here on the master, and update the mapping source
DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index);
CompressedString existingSource = null;
if (existingMappers.containsKey(entry.getKey())) {
existingSource = existingMappers.get(entry.getKey()).mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false);
CompressedString updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
if (existingSource.equals(updatedSource)) {
// same source, no changes, ignore it
} else {
// use the merged mapping source
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
}
}
} else {
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
}
}
}
if (mappings.isEmpty()) {
// no changes, return
return currentState;
}
MetaData.Builder builder = MetaData.builder(currentState.metaData());
for (String indexName : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
if (indexMetaData == null) {
throw new IndexMissingException(new Index(indexName));
}
MappingMetaData mappingMd = mappings.get(indexName);
if (mappingMd != null) {
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
}
}
return ClusterState.builder(currentState).metaData(builder).build();
} finally {
for (String index : indicesToClose) {
indicesService.removeIndex(index, "created for mapping processing");
}
}
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_MetaDataMappingService.java
|
1,853 |
public abstract class AdminAbstractController extends BroadleafAbstractController {
public static final String FILTER_VALUE_SEPARATOR = "|";
public static final String FILTER_VALUE_SEPARATOR_REGEX = "\\|";
// ***********************
// RESOURCE DECLARATIONS *
// ***********************
@Resource(name = "blAdminEntityService")
protected AdminEntityService service;
@Resource(name = "blFormBuilderService")
protected FormBuilderService formService;
@Resource(name = "blAdminNavigationService")
protected AdminNavigationService adminNavigationService;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Resource(name = "blEntityFormValidator")
protected EntityFormValidator entityFormValidator;
@Resource(name="blAdminSecurityRemoteService")
protected SecurityVerifier adminRemoteSecurityService;
@Resource(name = "blMainEntityActionsExtensionManager")
protected MainEntityActionsExtensionManager mainEntityActionsExtensionManager;
// *********************************************************
// UNBOUND CONTROLLER METHODS (USED BY DIFFERENT SECTIONS) *
// *********************************************************
/**
* Returns a partial representing a dynamic form. An example of this is the dynamic fields that render
* on structured content, which are determined by the currently selected structured content type. This
* method is typically only invoked through Javascript and used to replace the current dynamic form with
* the one for the newly selected type.
*
* @param request
* @param response
* @param model
* @param pathVars
* @param info
* @return the return view path
* @throws Exception
*/
protected String getDynamicForm(HttpServletRequest request, HttpServletResponse response, Model model,
Map<String, String> pathVars,
DynamicEntityFormInfo info) throws Exception {
String sectionKey = getSectionKey(pathVars);
EntityForm blankFormContainer = new EntityForm();
EntityForm dynamicForm = getBlankDynamicFieldTemplateForm(info);
blankFormContainer.putDynamicForm(info.getPropertyName(), dynamicForm);
model.addAttribute("entityForm", blankFormContainer);
model.addAttribute("dynamicPropertyName", info.getPropertyName());
String reqUrl = request.getRequestURL().toString();
reqUrl = reqUrl.substring(0, reqUrl.indexOf("/dynamicForm"));
model.addAttribute("currentUrl", reqUrl);
setModelAttributes(model, sectionKey);
return "views/dynamicFormPartial";
}
// **********************************
// HELPER METHODS FOR BUILDING DTOS *
// **********************************
/**
* Convenience method for obtaining a ListGrid DTO object for a collection. Note that if no <b>criteria</b> is
* available, then this should be null (or empty)
*
* @param mainMetadata class metadata for the root entity that this <b>collectionProperty</b> relates to
* @param id foreign key from the root entity for <b>collectionProperty</b>
* @param collectionProperty property that this collection should be based on from the root entity
* @param form the criteria form model attribute
* @param sectionKey the current main section key
* @return the list grid
* @throws ServiceException
*/
protected ListGrid getCollectionListGrid(ClassMetadata mainMetadata, Entity entity, Property collectionProperty,
MultiValueMap<String, String> requestParams, String sectionKey)
throws ServiceException {
DynamicResultSet drs = service.getRecordsForCollection(mainMetadata, entity, collectionProperty,
getCriteria(requestParams), getStartIndex(requestParams), getMaxIndex(requestParams));
String idProperty = service.getIdProperty(mainMetadata);
ListGrid listGrid = formService.buildCollectionListGrid(entity.findProperty(idProperty).getValue(), drs,
collectionProperty, sectionKey);
listGrid.setListGridType(ListGrid.Type.INLINE);
return listGrid;
}
/**
* @see #getBlankDynamicFieldTemplateForm(DynamicEntityFormInfo, EntityForm)
* @param info
* @throws ServiceException
*/
protected EntityForm getBlankDynamicFieldTemplateForm(DynamicEntityFormInfo info) throws ServiceException {
return getBlankDynamicFieldTemplateForm(info, null);
}
/**
* Convenience method for obtaining a blank dynamic field template form. For example, if the main entity form should
* render different fields depending on the value of a specific field in that main form itself, the "dynamic" fields
* are generated by this method. Because this is invoked when a new value is chosen, the form generated by this method
* will never have values set.
*
* @param info
* @return the entity form
* @throws ServiceException
*/
protected EntityForm getBlankDynamicFieldTemplateForm(DynamicEntityFormInfo info, EntityForm dynamicFormOverride)
throws ServiceException {
// We need to inspect with the second custom criteria set to the id of
// the desired structured content type
PersistencePackageRequest ppr = PersistencePackageRequest.standard()
.withCeilingEntityClassname(info.getCeilingClassName())
.withCustomCriteria(new String[] { info.getCriteriaName(), null, info.getPropertyName(), info.getPropertyValue() });
ClassMetadata cmd = service.getClassMetadata(ppr);
EntityForm dynamicForm = formService.createEntityForm(cmd);
dynamicForm.clearFieldsMap();
if (dynamicFormOverride != null) {
dynamicFormOverride.clearFieldsMap();
Map<String, Field> fieldOverrides = dynamicFormOverride.getFields();
for (Entry<String, Field> override : fieldOverrides.entrySet()) {
if (dynamicForm.getFields().containsKey(override.getKey())) {
dynamicForm.getFields().get(override.getKey()).setValue(override.getValue().getValue());
}
}
}
// Set the specialized name for these fields - we need to handle them separately
dynamicForm.clearFieldsMap();
for (Tab tab : dynamicForm.getTabs()) {
for (FieldGroup group : tab.getFieldGroups()) {
for (Field field : group.getFields()) {
field.setName(info.getPropertyName() + DynamicEntityFormInfo.FIELD_SEPARATOR + field.getName());
}
}
}
return dynamicForm;
}
/**
* Convenience method for obtaining a dynamic field template form for a particular entity. This method differs from
* {@link #getBlankDynamicFieldTemplateForm(DynamicEntityFormInfo)} in that it will fill out the current values for
* the fields in this dynamic form from the database. This method is invoked when the initial view of a page containing
* a dynamic form is triggered.
*
* Optionally, you can pass in a pre-existing dynamic form to this method that already has updated values. Example usage
* would be for after validation has failed and you do not want to lookup old values from the database again.
*
* @param info
* @param entityId
* @param dynamicForm optional dynamic form that already has values to fill out
* @return the entity form
* @throws ServiceException
*/
protected EntityForm getDynamicFieldTemplateForm(DynamicEntityFormInfo info, String entityId, EntityForm dynamicFormOverride)
throws ServiceException {
PersistencePackageRequest ppr = PersistencePackageRequest.standard()
.withCeilingEntityClassname(info.getCeilingClassName())
.withCustomCriteria(new String[] { info.getCriteriaName(), entityId, info.getPropertyName(), info.getPropertyValue() });
ClassMetadata cmd = service.getClassMetadata(ppr);
Entity entity = service.getRecord(ppr, entityId, cmd, true);
List<Field> fieldsToMove = new ArrayList<Field>();
// override the results of the entity with the dynamic form passed in
if (dynamicFormOverride != null) {
dynamicFormOverride.clearFieldsMap();
Map<String, Field> fieldOverrides = dynamicFormOverride.getFields();
for (Entry<String, Field> override : fieldOverrides.entrySet()) {
if (entity.getPMap().containsKey(override.getKey())) {
entity.getPMap().get(override.getKey()).setValue(override.getValue().getValue());
} else {
fieldsToMove.add(override.getValue());
}
}
}
// Assemble the dynamic form for structured content type
EntityForm dynamicForm = formService.createEntityForm(cmd, entity);
for (Field field : fieldsToMove) {
FieldMetadata fmd = cmd.getPMap().get(field.getName()).getMetadata();
if (fmd instanceof BasicFieldMetadata) {
BasicFieldMetadata bfmd = (BasicFieldMetadata) fmd;
field.setFieldType(bfmd.getFieldType().toString());
field.setFriendlyName(bfmd.getFriendlyName());
field.setRequired(bfmd.getRequired());
}
dynamicForm.addField(field);
}
// Set the specialized name for these fields - we need to handle them separately
dynamicForm.clearFieldsMap();
for (Tab tab : dynamicForm.getTabs()) {
for (FieldGroup group : tab.getFieldGroups()) {
for (Field field : group.getFields()) {
field.setName(info.getPropertyName() + DynamicEntityFormInfo.FIELD_SEPARATOR + field.getName());
}
}
}
return dynamicForm;
}
/**
* This method will scan the entityForm for all dynamic form fields and pull them out
* as appropriate.
*
* @param entityForm
*/
protected void extractDynamicFormFields(EntityForm entityForm) {
Map<String, Field> dynamicFields = new HashMap<String, Field>();
// Find all of the dynamic form fields
for (Entry<String, Field> entry : entityForm.getFields().entrySet()) {
if (entry.getKey().contains(DynamicEntityFormInfo.FIELD_SEPARATOR)) {
dynamicFields.put(entry.getKey(), entry.getValue());
}
}
// Remove the dynamic form fields from the main entity - they are persisted separately
for (Entry<String, Field> entry : dynamicFields.entrySet()) {
entityForm.removeField(entry.getKey());
}
// Create the entity form for the dynamic form, as it needs to be persisted separately
for (Entry<String, Field> entry : dynamicFields.entrySet()) {
String[] fieldName = entry.getKey().split("\\" + DynamicEntityFormInfo.FIELD_SEPARATOR);
DynamicEntityFormInfo info = entityForm.getDynamicFormInfo(fieldName[0]);
EntityForm dynamicForm = entityForm.getDynamicForm(fieldName[0]);
if (dynamicForm == null) {
dynamicForm = new EntityForm();
dynamicForm.setCeilingEntityClassname(info.getCeilingClassName());
entityForm.putDynamicForm(fieldName[0], dynamicForm);
}
entry.getValue().setName(fieldName[1]);
dynamicForm.addField(entry.getValue());
}
}
// ***********************************************
// HELPER METHODS FOR SECTION-SPECIFIC OVERRIDES *
// ***********************************************
/**
* This method is used to determine the current section key. For this default implementation, the sectionKey is pulled
* from the pathVariable, {sectionKey}, as defined by the request mapping on this controller. To support controller
* inheritance and allow more specialized controllers to delegate some methods to this basic controller, overridden
* implementations of this method could return a hardcoded value instead of reading the map
*
* @param pathVars - the map of all currently bound path variables for this request
* @return the sectionKey for this request
*/
protected String getSectionKey(Map<String, String> pathVars) {
return pathVars.get("sectionKey");
}
/**
* <p>Helper method to return an array of {@link org.broadleafcommerce.openadmin.dto.FilterAndSortCriteria} based on a map of propertyName -> list of criteria
* value. This will also grab the sorts off of the request parameters, if any.</p>
*
* <p>The multi-valued map allows users to specify multiple criteria values per property, as well as multiple sort
* properties and sort directions. For multiple sort properties and sort directions, these would usually come in as
* request parameters like:
* <br />
* <br />
* ....?sortProperty=defaultSku.name&sortProperty=manufacturer&sortDirection=ASCENDING&sortDirection=DESCENDING
* <br />
* <br />
* This would attach criteria such that defaultSku.name was sorted ascending, and manufacturer was sorted descending</p>
*
* @param requestParams usually a {@link MultiValueMap} that has been bound by a controller to receive all of the
* request parameters that are not explicitly named
* @return the final array of {@link org.broadleafcommerce.openadmin.dto.FilterAndSortCriteria} to pass to the fetch
*
* @see {@link #getSortPropertyNames(Map)}
* @see {@link #getSortDirections(Map)}
*/
protected FilterAndSortCriteria[] getCriteria(Map<String, List<String>> requestParams) {
if (requestParams == null || requestParams.isEmpty()) {
return null;
}
List<FilterAndSortCriteria> result = new ArrayList<FilterAndSortCriteria>();
for (Entry<String, List<String>> entry : requestParams.entrySet()) {
if (!entry.getKey().equals(FilterAndSortCriteria.SORT_PROPERTY_PARAMETER) &&
!entry.getKey().equals(FilterAndSortCriteria.SORT_DIRECTION_PARAMETER)) {
List<String> values = entry.getValue();
List<String> collapsedValues = new ArrayList<String>();
for (String value : values) {
if (value.contains(FILTER_VALUE_SEPARATOR)) {
String[] vs = value.split(FILTER_VALUE_SEPARATOR_REGEX);
for (String v : vs) {
collapsedValues.add(v);
}
} else {
collapsedValues.add(value);
}
}
FilterAndSortCriteria fasCriteria = new FilterAndSortCriteria(entry.getKey(), collapsedValues);
result.add(fasCriteria);
}
}
List<String> sortProperties = getSortPropertyNames(requestParams);
List<String> sortDirections = getSortDirections(requestParams);
if (CollectionUtils.isNotEmpty(sortProperties)) {
//set up a map to determine if there is already some criteria set for the sort property
Map<String, FilterAndSortCriteria> fasMap = BLCMapUtils.keyedMap(result, new TypedClosure<String, FilterAndSortCriteria>() {
@Override
public String getKey(FilterAndSortCriteria value) {
return value.getPropertyId();
}
});
for (int i = 0; i < sortProperties.size(); i++) {
boolean sortAscending = SortDirection.ASCENDING.toString().equals(sortDirections.get(i));
FilterAndSortCriteria propertyCriteria = fasMap.get(sortProperties.get(i));
//If there is already criteria for this property, attach the sort to that. Otherwise, create some new
//FilterAndSortCriteria for the sort
if (propertyCriteria != null) {
propertyCriteria.setSortAscending(sortAscending);
} else {
FilterAndSortCriteria fasc = new FilterAndSortCriteria(sortProperties.get(i));
fasc.setSortAscending(sortAscending);
result.add(fasc);
}
}
}
return result.toArray(new FilterAndSortCriteria[result.size()]);
}
/**
* Obtains the list of sort directions from the bound request parameters. Note that these should appear in the same
* relative order as {@link #getSortPropertyNames(Map)}
*
* @param requestParams
* @return
*/
protected List<String> getSortDirections(Map<String, List<String>> requestParams) {
List<String> sortTypes = requestParams.get(FilterAndSortCriteria.SORT_DIRECTION_PARAMETER);
return sortTypes;
}
/**
* Obtains the list of property names to sort on from the bound request parameters. Note that these should appear in the
* same relative order as {@link #getSortDirections(Map)}.
*
* @param requestParams
* @return
*/
protected List<String> getSortPropertyNames(Map<String, List<String>> requestParams) {
return requestParams.get(FilterAndSortCriteria.SORT_PROPERTY_PARAMETER);
}
/**
* Gets the fully qualified ceiling entity classname for this section. If this section is not explicitly defined in
* the database, will return the value passed into this function. For example, if there is a mapping from "/myentity" to
* "com.mycompany.myentity", both "http://localhost/myentity" and "http://localhost/com.mycompany.myentity" are valid
* request paths.
*
* @param sectionKey
* @return the className for this sectionKey if found in the database or the sectionKey if not
*/
protected String getClassNameForSection(String sectionKey) {
AdminSection section = adminNavigationService.findAdminSectionByURI("/" + sectionKey);
return (section == null) ? sectionKey : section.getCeilingEntity();
}
/**
* If there are certain types of entities that should not be allowed to be created, an override of this method would be
* able to specify that. It could also add additional types if desired.
*
* @param classTree
* @return a List<ClassTree> representing all potentially avaialble entity types to create
*/
protected List<ClassTree> getAddEntityTypes(ClassTree classTree) {
return classTree.getCollapsedClassTrees();
}
/**
* This method is called when attempting to add new entities that have a polymorphic tree.
*
* If this method returns null, there is no default type set for this particular entity type, and the user will be
* presented with a selection of possible types to utilize.
*
* If it returns a non-null value, the returned fullyQualifiedClassname will be used and will bypass the selection step.
*
* @return null if there is no default type, otherwise the default type
*/
protected String getDefaultEntityType() {
return null;
}
/**
* This method is invoked for every request for this controller. By default, we do not want to specify a custom
* criteria, but specialized controllers may want to.
*
* @return the custom criteria for this section for all requests, if any
*/
protected String[] getSectionCustomCriteria() {
return null;
}
/**
* A hook method that is invoked every time the {@link #getSectionPersistencePackageRequest(String)} method is invoked.
* This allows specialized controllers to hook into every request and manipulate the persistence package request as
* desired.
*
* @param ppr
*/
protected void attachSectionSpecificInfo(PersistencePackageRequest ppr) {
}
/**
* Obtains the requested start index parameter
*
* @param requestParams
* @return
*/
protected Integer getStartIndex(Map<String, List<String>> requestParams) {
if (requestParams == null || requestParams.isEmpty()) {
return null;
}
List<String> startIndex = requestParams.get(FilterAndSortCriteria.START_INDEX_PARAMETER);
return CollectionUtils.isEmpty(startIndex) ? null : Integer.parseInt(startIndex.get(0));
}
/**
* Obtains the requested max index parameter
*
* @param requestParams
* @return
*/
protected Integer getMaxIndex(Map<String, List<String>> requestParams) {
if (requestParams == null || requestParams.isEmpty()) {
return null;
}
List<String> maxIndex = requestParams.get(FilterAndSortCriteria.MAX_INDEX_PARAMETER);
return CollectionUtils.isEmpty(maxIndex) ? null : Integer.parseInt(maxIndex.get(0));
}
// ************************
// GENERIC HELPER METHODS *
// ************************
/**
* Attributes to add to the model on every request
*
* @param model
* @param sectionKey
*/
protected void setModelAttributes(Model model, String sectionKey) {
AdminSection section = adminNavigationService.findAdminSectionByURI("/" + sectionKey);
if (section != null) {
model.addAttribute("sectionKey", sectionKey);
model.addAttribute(AdminNavigationHandlerMapping.CURRENT_ADMIN_MODULE_ATTRIBUTE_NAME, section.getModule());
model.addAttribute(AdminNavigationHandlerMapping.CURRENT_ADMIN_SECTION_ATTRIBUTE_NAME, section);
}
}
/**
* Returns a PersistencePackageRequest for the given sectionClassName. Will also invoke the
* {@link #getSectionCustomCriteria()} and {@link #attachSectionSpecificInfo(PersistencePackageRequest)} to allow
* specialized controllers to manipulate the request for every action in this controller.
*
* @param sectionClassName
* @return the PersistencePacakageRequest
*/
protected PersistencePackageRequest getSectionPersistencePackageRequest(String sectionClassName) {
PersistencePackageRequest ppr = PersistencePackageRequest.standard()
.withCeilingEntityClassname(sectionClassName)
.withCustomCriteria(getSectionCustomCriteria());
attachSectionSpecificInfo(ppr);
return ppr;
}
/**
* Returns the result of a call to {@link #getSectionPersistencePackageRequest(String)} with the additional filter
* and sort criteria attached.
*
* @param sectionClassName
* @param filterAndSortCriteria
* @return the PersistencePacakageRequest
*/
protected PersistencePackageRequest getSectionPersistencePackageRequest(String sectionClassName,
MultiValueMap<String, String> requestParams) {
FilterAndSortCriteria[] fascs = getCriteria(requestParams);
return getSectionPersistencePackageRequest(sectionClassName)
.withFilterAndSortCriteria(fascs)
.withStartIndex(getStartIndex(requestParams))
.withMaxIndex(getMaxIndex(requestParams));
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_web_controller_AdminAbstractController.java
|
123 |
public interface DefaultSchemaMaker {
/**
* Creates a new edge label with default settings against the provided {@link EdgeLabelMaker}.
*
* @param factory EdgeLabelMaker through which the edge label is created
* @return A new edge label
* @throws IllegalArgumentException if the name is already in use or if other configured values are invalid.
*/
public EdgeLabel makeEdgeLabel(EdgeLabelMaker factory);
/**
* Creates a new property key with default settings against the provided {@link PropertyKeyMaker}.
*
* @param factory PropertyKeyMaker through which the property key is created
* @return A new property key
* @throws IllegalArgumentException if the name is already in use or if other configured values are invalid.
*/
public PropertyKey makePropertyKey(PropertyKeyMaker factory);
/**
* Creates a new vertex label with the default settings against the provided {@link VertexLabelMaker}.
*
* @param factory VertexLabelMaker through which the vertex label is created
* @return A new vertex label
* @throws IllegalArgumentException if the name is already in use or if other configured values are invalid.
*/
public VertexLabel makeVertexLabel(VertexLabelMaker factory);
/**
* Whether to ignore undefined types occurring in a query.
* <p/>
* If this method returns true, then undefined types referred to in a {@link com.thinkaurelius.titan.core.TitanVertexQuery} will be silently
* ignored and an empty result set will be returned. If this method returns false, then usage of undefined types
* in queries results in an {@link IllegalArgumentException}.
*/
public boolean ignoreUndefinedQueryTypes();
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_schema_DefaultSchemaMaker.java
|
479 |
register(serviceName, new ClientProxyFactory() {
@Override
public ClientProxy create(String id) {
String instanceName = client.getName();
return instantiateClientProxy(proxyType, instanceName, serviceName, id);
}
});
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_ProxyManager.java
|
891 |
searchService.sendExecuteFetch(node, fetchSearchRequest, new SearchServiceListener<FetchSearchResult>() {
@Override
public void onResult(FetchSearchResult result) {
result.shardTarget(querySearchResult.shardTarget());
fetchResults.set(entry.index, result);
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Throwable t) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to execute fetch phase", t);
}
successfulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchScrollQueryThenFetchAction.java
|
859 |
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
super(request, listener);
queryFetchResults = new AtomicArray<QueryFetchSearchResult>(firstResults.length());
}
@Override
protected String firstPhaseName() {
return "dfs";
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<DfsSearchResult> listener) {
searchService.sendExecuteDfs(node, request, listener);
}
@Override
protected void moveToSecondPhase() {
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
int localOperations = 0;
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
DfsSearchResult dfsResult = entry.value;
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
localOperations++;
} else {
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
}
if (localOperations > 0) {
if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
DfsSearchResult dfsResult = entry.value;
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
}
}
});
} else {
boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
final DfsSearchResult dfsResult = entry.value;
final DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
final QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
try {
if (localAsync) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
});
} else {
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
} catch (Throwable t) {
onSecondPhaseFailure(t, querySearchRequest, entry.index, dfsResult, counter);
}
}
}
}
}
}
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
searchService.sendExecuteFetch(node, querySearchRequest, new SearchServiceListener<QueryFetchSearchResult>() {
@Override
public void onResult(QueryFetchSearchResult result) {
result.shardTarget(dfsResult.shardTarget());
queryFetchResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Throwable t) {
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
}
});
}
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
successulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
void finishHim() {
try {
innerFinishHim();
} catch (Throwable e) {
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", e, buildShardFailures());
if (logger.isDebugEnabled()) {
logger.debug("failed to reduce search", failure);
}
listener.onFailure(failure);
} finally {
//
}
}
void innerFinishHim() throws Exception {
sortedShardList = searchPhaseController.sortDocs(queryFetchResults);
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
| 0true
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchDfsQueryAndFetchAction.java
|
358 |
future.andThen(new ExecutionCallback<Integer>() {
@Override
public void onResponse(Integer response) {
result[0] = response.intValue();
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
658 |
public class OMemoryHashMapIndexEngine<V> implements OIndexEngine<V> {
private final ConcurrentMap<Object, V> concurrentHashMap = new ConcurrentHashMap<Object, V>();
private volatile ORID identity;
@Override
public void init() {
}
@Override
public void flush() {
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
final ODatabaseRecord database = getDatabase();
final ORecordBytes identityRecord = new ORecordBytes();
database.save(identityRecord, clusterIndexName);
identity = identityRecord.getIdentity();
}
@Override
public void delete() {
}
@Override
public void deleteWithoutLoad(String indexName) {
}
@Override
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
}
@Override
public boolean contains(Object key) {
return concurrentHashMap.containsKey(key);
}
@Override
public boolean remove(Object key) {
return concurrentHashMap.remove(key) != null;
}
@Override
public ORID getIdentity() {
return identity;
}
@Override
public void clear() {
concurrentHashMap.clear();
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
return concurrentHashMap.entrySet().iterator();
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
throw new UnsupportedOperationException("inverseIterator");
}
@Override
public Iterator<V> valuesIterator() {
throw new UnsupportedOperationException("valuesIterator");
}
@Override
public Iterator<V> inverseValuesIterator() {
throw new UnsupportedOperationException("inverseValuesIterator");
}
@Override
public Iterable<Object> keys() {
return concurrentHashMap.keySet();
}
@Override
public void unload() {
}
@Override
public void startTransaction() {
}
@Override
public void stopTransaction() {
}
@Override
public void afterTxRollback() {
}
@Override
public void afterTxCommit() {
}
@Override
public void closeDb() {
}
@Override
public void close() {
}
@Override
public void beforeTxBegin() {
}
@Override
public V get(Object key) {
return concurrentHashMap.get(key);
}
@Override
public void put(Object key, V value) {
concurrentHashMap.put(key, value);
}
@Override
public void getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
ValuesTransformer<V> transformer, ValuesResultListener valuesResultListener) {
throw new UnsupportedOperationException("getValuesBetween");
}
@Override
public void getValuesMajor(Object fromKey, boolean isInclusive, ValuesTransformer<V> transformer,
ValuesResultListener valuesResultListener) {
throw new UnsupportedOperationException("getValuesMajor");
}
@Override
public void getValuesMinor(Object toKey, boolean isInclusive, ValuesTransformer<V> transformer,
ValuesResultListener valuesResultListener) {
throw new UnsupportedOperationException("getValuesMinor");
}
@Override
public void getEntriesMajor(Object fromKey, boolean isInclusive, ValuesTransformer<V> transformer,
EntriesResultListener entriesResultListener) {
throw new UnsupportedOperationException("getEntriesMajor");
}
@Override
public void getEntriesMinor(Object toKey, boolean isInclusive, ValuesTransformer<V> transformer,
EntriesResultListener entriesResultListener) {
throw new UnsupportedOperationException("getEntriesMinor");
}
@Override
public void getEntriesBetween(Object iRangeFrom, Object iRangeTo, boolean iInclusive, ValuesTransformer<V> transformer,
EntriesResultListener entriesResultListener) {
throw new UnsupportedOperationException("getEntriesBetween");
}
@Override
public long size(ValuesTransformer<V> transformer) {
if (transformer == null)
return concurrentHashMap.size();
else {
long counter = 0;
for (V value : concurrentHashMap.values()) {
counter += transformer.transformFromValue(value).size();
}
return counter;
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
throw new UnsupportedOperationException("count");
}
@Override
public boolean hasRangeQuerySupport() {
return false;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_engine_OMemoryHashMapIndexEngine.java
|
635 |
public class IndicesStatusRequestBuilder extends BroadcastOperationRequestBuilder<IndicesStatusRequest, IndicesStatusResponse, IndicesStatusRequestBuilder> {
public IndicesStatusRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new IndicesStatusRequest());
}
/**
* Should the status include recovery information. Defaults to <tt>false</tt>.
*/
public IndicesStatusRequestBuilder setRecovery(boolean recovery) {
request.recovery(recovery);
return this;
}
/**
* Should the status include recovery information. Defaults to <tt>false</tt>.
*/
public IndicesStatusRequestBuilder setSnapshot(boolean snapshot) {
request.snapshot(snapshot);
return this;
}
@Override
protected void doExecute(ActionListener<IndicesStatusResponse> listener) {
((IndicesAdminClient) client).status(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_IndicesStatusRequestBuilder.java
|
1,255 |
public static interface NodeCallback<T> {
T doWithNode(DiscoveryNode node) throws ElasticsearchException;
}
| 0true
|
src_main_java_org_elasticsearch_client_transport_TransportClientNodesService.java
|
2,825 |
private class MigrateTask implements Runnable {
final MigrationInfo migrationInfo;
final BackupMigrationTask backupTask;
MigrateTask(MigrationInfo migrationInfo, BackupMigrationTask backupTask) {
this.migrationInfo = migrationInfo;
this.backupTask = backupTask;
final MemberImpl masterMember = getMasterMember();
if (masterMember != null) {
migrationInfo.setMasterUuid(masterMember.getUuid());
migrationInfo.setMaster(masterMember.getAddress());
}
}
@Override
public void run() {
if (!node.isActive() || !node.isMaster()) {
return;
}
final MigrationRequestOperation migrationRequestOp = new MigrationRequestOperation(migrationInfo);
try {
MigrationInfo info = migrationInfo;
InternalPartitionImpl partition = partitions[info.getPartitionId()];
Address owner = partition.getOwnerOrNull();
if(owner == null){
logger.severe("ERROR: partition owner is not set! -> "
+ partition + " -VS- " + info);
return;
}
if (!owner.equals(info.getSource())) {
logger.severe("ERROR: partition owner is not the source of migration! -> "
+ partition + " -VS- " + info +" found owner:"+owner);
return;
}
sendMigrationEvent(migrationInfo, MigrationStatus.STARTED);
Boolean result = Boolean.FALSE;
MemberImpl fromMember = getMember(migrationInfo.getSource());
if (logger.isFinestEnabled()) {
logger.finest("Started Migration : " + migrationInfo);
}
systemLogService.logPartition("Started Migration : " + migrationInfo);
if (fromMember == null) {
// Partition is lost! Assign new owner and exit.
logger.warning("Partition is lost! Assign new owner and exit...");
result = Boolean.TRUE;
} else {
Future future = nodeEngine.getOperationService().createInvocationBuilder(SERVICE_NAME,
migrationRequestOp, migrationInfo.getSource()).setTryPauseMillis(1000).invoke();
try {
Object response = future.get(partitionMigrationTimeout, TimeUnit.SECONDS);
result = (Boolean) nodeEngine.toObject(response);
} catch (Throwable e) {
final Level level = node.isActive() && migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Failed migrating from " + fromMember, e);
}
}
if (Boolean.TRUE.equals(result)) {
String message = "Finished Migration: " + migrationInfo;
if (logger.isFinestEnabled()) {
logger.finest(message);
}
systemLogService.logPartition(message);
processMigrationResult();
} else {
final Level level = migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Migration failed: " + migrationInfo);
migrationTaskFailed();
}
} catch (Throwable t) {
final Level level = migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Error [" + t.getClass() + ": " + t.getMessage() + "] while executing " + migrationRequestOp);
logger.finest(t);
migrationTaskFailed();
}
}
private void migrationTaskFailed() {
systemLogService.logPartition("Migration failed: " + migrationInfo);
lock.lock();
try {
addCompletedMigration(migrationInfo);
finalizeActiveMigration(migrationInfo);
syncPartitionRuntimeState();
} finally {
lock.unlock();
}
sendMigrationEvent(migrationInfo, MigrationStatus.FAILED);
// migration failed, clear current pending migration tasks and re-execute RepartitioningTask
migrationQueue.clear();
migrationQueue.add(new RepartitioningTask());
}
private void processMigrationResult() {
lock.lock();
try {
final int partitionId = migrationInfo.getPartitionId();
Address newOwner = migrationInfo.getDestination();
InternalPartitionImpl partition = partitions[partitionId];
partition.setOwner(newOwner);
addCompletedMigration(migrationInfo);
finalizeActiveMigration(migrationInfo);
if (backupTask != null) {
backupTask.run();
}
syncPartitionRuntimeState();
} finally {
lock.unlock();
}
sendMigrationEvent(migrationInfo, MigrationStatus.COMPLETED);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("MigrateTask{");
sb.append("migrationInfo=").append(migrationInfo);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_InternalPartitionServiceImpl.java
|
592 |
public class CompositeStatusHandler implements StatusHandler {
protected List<StatusHandler> handlers = new ArrayList<StatusHandler>();
public void handleStatus(String serviceName, ServiceStatusType status) {
for (StatusHandler statusHandler : handlers) {
statusHandler.handleStatus(serviceName, status);
}
}
public List<StatusHandler> getHandlers() {
return handlers;
}
public void setHandlers(List<StatusHandler> handlers) {
this.handlers = handlers;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_vendor_service_monitor_handler_CompositeStatusHandler.java
|
102 |
public class TestTransactionImpl
{
@Test
public void shouldBeAbleToAccessAllExceptionsOccurringInSynchronizationsBeforeCompletion()
throws IllegalStateException, RollbackException
{
TxManager mockedTxManager = mock( TxManager.class );
TransactionImpl tx = new TransactionImpl( getNewGlobalId( DEFAULT_SEED, 0 ), mockedTxManager, ForceMode.forced,
TransactionStateFactory.noStateFactory( new DevNullLoggingService() ),
new SystemOutLogging().getMessagesLog( TxManager.class ) );
// Evil synchronizations
final RuntimeException firstException = new RuntimeException( "Ex1" );
Synchronization meanSync1 = new Synchronization()
{
@Override
public void beforeCompletion()
{
throw firstException;
}
@Override
public void afterCompletion( int status )
{
}
};
final RuntimeException secondException = new RuntimeException( "Ex1" );
Synchronization meanSync2 = new Synchronization()
{
@Override
public void beforeCompletion()
{
throw secondException;
}
@Override
public void afterCompletion( int status )
{
}
};
tx.registerSynchronization( meanSync1 );
tx.registerSynchronization( meanSync2 );
tx.doBeforeCompletion();
assertThat( tx.getRollbackCause(),
is( instanceOf( MultipleCauseException.class ) ) );
MultipleCauseException error = (MultipleCauseException) tx.getRollbackCause();
assertThat( error.getCause(), is( (Throwable) firstException ) );
assertThat( error.getCauses().size(), is( 2 ) );
assertThat( error.getCauses().get( 0 ), is( (Throwable) firstException ) );
assertThat( error.getCauses().get( 1 ), is( (Throwable) secondException ) );
}
@Test
public void shouldNotThrowMultipleCauseIfOnlyOneErrorOccursInBeforeCompletion() throws IllegalStateException,
RollbackException
{
TxManager mockedTxManager = mock( TxManager.class );
TransactionImpl tx = new TransactionImpl( getNewGlobalId( DEFAULT_SEED, 0 ), mockedTxManager, ForceMode.forced,
TransactionStateFactory.noStateFactory( new DevNullLoggingService() ),
new SystemOutLogging().getMessagesLog( TxManager.class ) );
// Evil synchronizations
final RuntimeException firstException = new RuntimeException( "Ex1" );
Synchronization meanSync1 = new Synchronization()
{
@Override
public void beforeCompletion()
{
throw firstException;
}
@Override
public void afterCompletion( int status )
{
}
};
tx.registerSynchronization( meanSync1 );
tx.doBeforeCompletion();
assertThat( tx.getRollbackCause(), is( (Throwable) firstException ) );
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestTransactionImpl.java
|
516 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientTopicTest {
static HazelcastInstance client;
static HazelcastInstance server;
@BeforeClass
public static void init(){
server = Hazelcast.newHazelcastInstance();
client = HazelcastClient.newHazelcastClient(null);
}
@AfterClass
public static void stop(){
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testListener() throws InterruptedException{
ITopic topic = client.getTopic(randomString());
final CountDownLatch latch = new CountDownLatch(10);
MessageListener listener = new MessageListener() {
public void onMessage(Message message) {
latch.countDown();
}
};
topic.addMessageListener(listener);
for (int i=0; i<10; i++){
topic.publish(i);
}
assertTrue(latch.await(20, TimeUnit.SECONDS));
}
@Test
public void testRemoveListener() {
ITopic topic = client.getTopic(randomString());
MessageListener listener = new MessageListener() {
public void onMessage(Message message) {
}
};
String id = topic.addMessageListener(listener);
assertTrue(topic.removeMessageListener(id));
}
@Test(expected = UnsupportedOperationException.class)
public void testGetLocalTopicStats() throws Exception {
ITopic topic = client.getTopic(randomString());
topic.getLocalTopicStats();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_topic_ClientTopicTest.java
|
4,207 |
public class Store extends AbstractIndexShardComponent implements CloseableIndexComponent {
static final String CHECKSUMS_PREFIX = "_checksums-";
public static final boolean isChecksum(String name) {
return name.startsWith(CHECKSUMS_PREFIX);
}
private final IndexStore indexStore;
final CodecService codecService;
private final DirectoryService directoryService;
private final StoreDirectory directory;
private volatile ImmutableOpenMap<String, StoreFileMetaData> filesMetadata = ImmutableOpenMap.of();
private volatile String[] files = Strings.EMPTY_ARRAY;
private final Object mutex = new Object();
private final boolean sync;
@Inject
public Store(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, CodecService codecService, DirectoryService directoryService, Distributor distributor) throws IOException {
super(shardId, indexSettings);
this.indexStore = indexStore;
this.codecService = codecService;
this.directoryService = directoryService;
this.sync = componentSettings.getAsBoolean("sync", true); // TODO we don't really need to fsync when using shared gateway...
this.directory = new StoreDirectory(distributor);
}
public IndexStore indexStore() {
return this.indexStore;
}
public Directory directory() {
return directory;
}
public ImmutableMap<String, StoreFileMetaData> list() throws IOException {
ImmutableMap.Builder<String, StoreFileMetaData> builder = ImmutableMap.builder();
for (String name : files) {
StoreFileMetaData md = metaData(name);
if (md != null) {
builder.put(md.name(), md);
}
}
return builder.build();
}
public StoreFileMetaData metaData(String name) throws IOException {
StoreFileMetaData md = filesMetadata.get(name);
if (md == null) {
return null;
}
// IndexOutput not closed, does not exists
if (md.length() == -1) {
return null;
}
return md;
}
/**
* Deletes the content of a shard store. Be careful calling this!.
*/
public void deleteContent() throws IOException {
String[] files = directory.listAll();
IOException lastException = null;
for (String file : files) {
if (isChecksum(file)) {
try {
directory.deleteFileChecksum(file);
} catch (IOException e) {
lastException = e;
}
} else {
try {
directory.deleteFile(file);
} catch (FileNotFoundException e) {
// ignore
} catch (IOException e) {
lastException = e;
}
}
}
if (lastException != null) {
throw lastException;
}
}
public StoreStats stats() throws IOException {
return new StoreStats(Directories.estimateSize(directory), directoryService.throttleTimeInNanos());
}
public ByteSizeValue estimateSize() throws IOException {
return new ByteSizeValue(Directories.estimateSize(directory));
}
public void renameFile(String from, String to) throws IOException {
synchronized (mutex) {
StoreFileMetaData fromMetaData = filesMetadata.get(from); // we should always find this one
if (fromMetaData == null) {
throw new FileNotFoundException(from);
}
directoryService.renameFile(fromMetaData.directory(), from, to);
StoreFileMetaData toMetaData = new StoreFileMetaData(to, fromMetaData.length(), fromMetaData.checksum(), fromMetaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(from).fPut(to, toMetaData).build();
files = filesMetadata.keys().toArray(String.class);
}
}
public static Map<String, String> readChecksums(File[] locations) throws IOException {
Directory[] dirs = new Directory[locations.length];
try {
for (int i = 0; i < locations.length; i++) {
dirs[i] = new SimpleFSDirectory(locations[i]);
}
return readChecksums(dirs, null);
} finally {
for (Directory dir : dirs) {
if (dir != null) {
try {
dir.close();
} catch (IOException e) {
// ignore
}
}
}
}
}
static Map<String, String> readChecksums(Directory[] dirs, Map<String, String> defaultValue) throws IOException {
long lastFound = -1;
Directory lastDir = null;
for (Directory dir : dirs) {
for (String name : dir.listAll()) {
if (!isChecksum(name)) {
continue;
}
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
if (current > lastFound) {
lastFound = current;
lastDir = dir;
}
}
}
if (lastFound == -1) {
return defaultValue;
}
IndexInput indexInput = lastDir.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE);
try {
indexInput.readInt(); // version
return indexInput.readStringStringMap();
} catch (Throwable e) {
// failed to load checksums, ignore and return an empty map
return defaultValue;
} finally {
indexInput.close();
}
}
public void writeChecksums() throws IOException {
ImmutableMap<String, StoreFileMetaData> files = list();
String checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis();
synchronized (mutex) {
Map<String, String> checksums = new HashMap<String, String>();
for (StoreFileMetaData metaData : files.values()) {
if (metaData.checksum() != null) {
checksums.put(metaData.name(), metaData.checksum());
}
}
while (directory.fileExists(checksumName)) {
checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis();
}
IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT, true);
try {
output.writeInt(0); // version
output.writeStringStringMap(checksums);
} finally {
output.close();
}
}
for (StoreFileMetaData metaData : files.values()) {
if (metaData.name().startsWith(CHECKSUMS_PREFIX) && !checksumName.equals(metaData.name())) {
try {
directory.deleteFileChecksum(metaData.name());
} catch (Throwable e) {
// ignore
}
}
}
}
/**
* Returns <tt>true</tt> by default.
*/
public boolean suggestUseCompoundFile() {
return false;
}
public void close() {
try {
directory.close();
} catch (IOException e) {
logger.debug("failed to close directory", e);
}
}
/**
* Creates a raw output, no checksum is computed, and no compression if enabled.
*/
public IndexOutput createOutputRaw(String name) throws IOException {
return directory.createOutput(name, IOContext.DEFAULT, true);
}
/**
* Opened an index input in raw form, no decompression for example.
*/
public IndexInput openInputRaw(String name, IOContext context) throws IOException {
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
return metaData.directory().openInput(name, context);
}
public void writeChecksum(String name, String checksum) throws IOException {
// update the metadata to include the checksum and write a new checksums file
synchronized (mutex) {
StoreFileMetaData metaData = filesMetadata.get(name);
metaData = new StoreFileMetaData(metaData.name(), metaData.length(), checksum, metaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, metaData).build();
writeChecksums();
}
}
public void writeChecksums(Map<String, String> checksums) throws IOException {
// update the metadata to include the checksum and write a new checksums file
synchronized (mutex) {
for (Map.Entry<String, String> entry : checksums.entrySet()) {
StoreFileMetaData metaData = filesMetadata.get(entry.getKey());
metaData = new StoreFileMetaData(metaData.name(), metaData.length(), entry.getValue(), metaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(entry.getKey(), metaData).build();
}
writeChecksums();
}
}
/**
* The idea of the store directory is to cache file level meta data, as well as md5 of it
*/
public class StoreDirectory extends BaseDirectory implements ForceSyncDirectory {
private final Distributor distributor;
StoreDirectory(Distributor distributor) throws IOException {
this.distributor = distributor;
synchronized (mutex) {
ImmutableOpenMap.Builder<String, StoreFileMetaData> builder = ImmutableOpenMap.builder();
Map<String, String> checksums = readChecksums(distributor.all(), new HashMap<String, String>());
for (Directory delegate : distributor.all()) {
for (String file : delegate.listAll()) {
String checksum = checksums.get(file);
builder.put(file, new StoreFileMetaData(file, delegate.fileLength(file), checksum, delegate));
}
}
filesMetadata = builder.build();
files = filesMetadata.keys().toArray(String.class);
}
}
public ShardId shardId() {
return Store.this.shardId();
}
public Settings settings() {
return Store.this.indexSettings();
}
@Nullable
public CodecService codecService() {
return Store.this.codecService;
}
public Directory[] delegates() {
return distributor.all();
}
@Override
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
ensureOpen();
// lets the default implementation happen, so we properly open an input and create an output
super.copy(to, src, dest, context);
}
@Override
public String[] listAll() throws IOException {
ensureOpen();
return files;
}
@Override
public boolean fileExists(String name) throws IOException {
ensureOpen();
return filesMetadata.containsKey(name);
}
public void deleteFileChecksum(String name) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData != null) {
try {
metaData.directory().deleteFile(name);
} catch (IOException e) {
if (metaData.directory().fileExists(name)) {
throw e;
}
}
}
synchronized (mutex) {
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(name).build();
files = filesMetadata.keys().toArray(String.class);
}
}
@Override
public void deleteFile(String name) throws IOException {
ensureOpen();
// we don't allow to delete the checksums files, only using the deleteChecksum method
if (isChecksum(name)) {
return;
}
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData != null) {
try {
metaData.directory().deleteFile(name);
} catch (IOException e) {
if (metaData.directory().fileExists(name)) {
throw e;
}
}
}
synchronized (mutex) {
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(name).build();
files = filesMetadata.keys().toArray(String.class);
}
}
/**
* Returns the *actual* file length, not the uncompressed one if compression is enabled, this
* messes things up when using compound file format, but it shouldn't be used in any case...
*/
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
// not set yet (IndexOutput not closed)
if (metaData.length() != -1) {
return metaData.length();
}
return metaData.directory().fileLength(name);
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
return createOutput(name, context, false);
}
public IndexOutput createOutput(String name, IOContext context, boolean raw) throws IOException {
ensureOpen();
Directory directory;
// we want to write the segments gen file to the same directory *all* the time
// to make sure we don't create multiple copies of it
if (isChecksum(name) || IndexFileNames.SEGMENTS_GEN.equals(name)) {
directory = distributor.primary();
} else {
directory = distributor.any();
}
IndexOutput out = directory.createOutput(name, context);
boolean success = false;
try {
synchronized (mutex) {
StoreFileMetaData metaData = new StoreFileMetaData(name, -1, null, directory);
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, metaData).build();
files = filesMetadata.keys().toArray(String.class);
boolean computeChecksum = !raw;
if (computeChecksum) {
// don't compute checksum for segment based files
if (IndexFileNames.SEGMENTS_GEN.equals(name) || name.startsWith(IndexFileNames.SEGMENTS)) {
computeChecksum = false;
}
}
if (computeChecksum) {
out = new BufferedChecksumIndexOutput(out, new Adler32());
}
final StoreIndexOutput storeIndexOutput = new StoreIndexOutput(metaData, out, name);
success = true;
return storeIndexOutput;
}
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(out);
}
}
}
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
IndexInput in = metaData.directory().openInput(name, context);
boolean success = false;
try {
// Only for backward comp. since we now use Lucene codec compression
if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
Compressor compressor = CompressorFactory.compressor(in);
if (compressor != null) {
in = compressor.indexInput(in);
}
}
success = true;
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(in);
}
}
return in;
}
@Override
public IndexInputSlicer createSlicer(String name, IOContext context) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
// Only for backward comp. since we now use Lucene codec compression
if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
// rely on the slicer from the base class that uses an input, since they might be compressed...
// note, it seems like slicers are only used in compound file format..., so not relevant for now
return super.createSlicer(name, context);
}
return metaData.directory().createSlicer(name, context);
}
@Override
public synchronized void close() throws IOException {
isOpen = false;
for (Directory delegate : distributor.all()) {
delegate.close();
}
synchronized (mutex) {
filesMetadata = ImmutableOpenMap.of();
files = Strings.EMPTY_ARRAY;
}
}
@Override
public Lock makeLock(String name) {
return distributor.primary().makeLock(name);
}
@Override
public void clearLock(String name) throws IOException {
distributor.primary().clearLock(name);
}
@Override
public void setLockFactory(LockFactory lockFactory) throws IOException {
distributor.primary().setLockFactory(lockFactory);
}
@Override
public LockFactory getLockFactory() {
return distributor.primary().getLockFactory();
}
@Override
public String getLockID() {
return distributor.primary().getLockID();
}
@Override
public void sync(Collection<String> names) throws IOException {
ensureOpen();
if (sync) {
Map<Directory, Collection<String>> map = Maps.newHashMap();
for (String name : names) {
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
Collection<String> dirNames = map.get(metaData.directory());
if (dirNames == null) {
dirNames = new ArrayList<String>();
map.put(metaData.directory(), dirNames);
}
dirNames.add(name);
}
for (Map.Entry<Directory, Collection<String>> entry : map.entrySet()) {
entry.getKey().sync(entry.getValue());
}
}
for (String name : names) {
// write the checksums file when we sync on the segments file (committed)
if (!name.equals(IndexFileNames.SEGMENTS_GEN) && name.startsWith(IndexFileNames.SEGMENTS)) {
writeChecksums();
break;
}
}
}
@Override
public void forceSync(String name) throws IOException {
sync(ImmutableList.of(name));
}
@Override
public String toString() {
return "store(" + distributor.toString() + ")";
}
}
class StoreIndexOutput extends IndexOutput {
private final StoreFileMetaData metaData;
private final IndexOutput out;
private final String name;
StoreIndexOutput(StoreFileMetaData metaData, IndexOutput delegate, String name) {
this.metaData = metaData;
this.out = delegate;
this.name = name;
}
@Override
public void close() throws IOException {
out.close();
String checksum = null;
IndexOutput underlying = out;
if (underlying instanceof BufferedChecksumIndexOutput) {
checksum = Long.toString(((BufferedChecksumIndexOutput) underlying).digest().getValue(), Character.MAX_RADIX);
} else if (underlying instanceof ChecksumIndexOutput) {
checksum = Long.toString(((ChecksumIndexOutput) underlying).digest().getValue(), Character.MAX_RADIX);
}
synchronized (mutex) {
StoreFileMetaData md = new StoreFileMetaData(name, metaData.directory().fileLength(name), checksum, metaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, md).build();
files = filesMetadata.keys().toArray(String.class);
}
}
@Override
public void copyBytes(DataInput input, long numBytes) throws IOException {
out.copyBytes(input, numBytes);
}
@Override
public long getFilePointer() {
return out.getFilePointer();
}
@Override
public void writeByte(byte b) throws IOException {
out.writeByte(b);
}
@Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
out.writeBytes(b, offset, length);
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void seek(long pos) throws IOException {
out.seek(pos);
}
@Override
public long length() throws IOException {
return out.length();
}
@Override
public void setLength(long length) throws IOException {
out.setLength(length);
}
@Override
public String toString() {
return out.toString();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_store_Store.java
|
147 |
public abstract class InvocationClientRequest extends ClientRequest {
@Override
final void process() throws Exception {
invoke();
}
protected abstract void invoke();
protected final InvocationBuilder createInvocationBuilder(String serviceName, Operation op, int partitionId) {
return clientEngine.createInvocationBuilder(serviceName, op, partitionId);
}
protected final InvocationBuilder createInvocationBuilder(String serviceName, Operation op, Address target) {
return clientEngine.createInvocationBuilder(serviceName, op, target);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_InvocationClientRequest.java
|
648 |
indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() {
@Override
public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) {
listener.onResponse(new DeleteIndexTemplateResponse(response.acknowledged()));
}
@Override
public void onFailure(Throwable t) {
logger.debug("failed to delete templates [{}]", t, request.name());
listener.onFailure(t);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_delete_TransportDeleteIndexTemplateAction.java
|
400 |
@XmlAccessorType(XmlAccessType.FIELD)
public class Money implements Serializable, Cloneable, Comparable<Money>, Externalizable {
private static final long serialVersionUID = 1L;
@XmlElement
@XmlJavaTypeAdapter(value = BigDecimalRoundingAdapter.class)
private BigDecimal amount;
@XmlElement
@XmlJavaTypeAdapter(CurrencyAdapter.class)
private final Currency currency;
public static final Money ZERO = new Money(BigDecimal.ZERO);
protected static String getCurrencyCode(BroadleafCurrency blCurrency) {
if (blCurrency != null) {
return blCurrency.getCurrencyCode();
} else {
return defaultCurrency().getCurrencyCode();
}
}
public Money(Currency currency) {
this(BankersRounding.zeroAmount(), currency);
}
public Money(BroadleafCurrency blCurrency) {
this(0, Currency.getInstance(getCurrencyCode(blCurrency)));
}
public Money(BigDecimal amount, BroadleafCurrency blCurrency) {
this(amount, Currency.getInstance(getCurrencyCode(blCurrency)));
}
public Money(BigDecimal amount, BroadleafCurrency blCurrency, int scale) {
this(amount, Currency.getInstance(getCurrencyCode(blCurrency)), scale);
}
public Money() {
this(BankersRounding.zeroAmount(), defaultCurrency());
}
public Money(BigDecimal amount) {
this(amount, defaultCurrency());
}
public Money(double amount) {
this(valueOf(amount), defaultCurrency());
}
public Money(int amount) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(defaultCurrency()),
RoundingMode.HALF_EVEN), defaultCurrency());
}
public Money(long amount) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(defaultCurrency()),
RoundingMode.HALF_EVEN), defaultCurrency());
}
public Money(String amount) {
this(valueOf(amount), defaultCurrency());
}
public Money(BigDecimal amount, String currencyCode) {
this(amount, Currency.getInstance(currencyCode));
}
public Money(double amount, Currency currency) {
this(valueOf(amount), currency);
}
public Money(double amount, String currencyCode) {
this(valueOf(amount), Currency.getInstance(currencyCode));
}
public Money(int amount, Currency currency) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN), currency);
}
public Money(int amount, String currencyCode) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(Currency.getInstance(currencyCode)), RoundingMode.HALF_EVEN), Currency.getInstance(currencyCode));
}
public Money(long amount, Currency currency) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN), currency);
}
public Money(long amount, String currencyCode) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(Currency.getInstance(currencyCode)), RoundingMode.HALF_EVEN), Currency.getInstance(currencyCode));
}
public Money(String amount, Currency currency) {
this(valueOf(amount), currency);
}
public Money(String amount, String currencyCode) {
this(valueOf(amount), Currency.getInstance(currencyCode));
}
public Money(BigDecimal amount, Currency currency) {
if (currency == null) {
throw new IllegalArgumentException("currency cannot be null");
}
this.currency = currency;
this.amount = BankersRounding.setScale(BankersRounding.getScaleForCurrency(currency), amount);
}
public Money(BigDecimal amount, Currency currency, int scale) {
if (currency == null) {
throw new IllegalArgumentException("currency cannot be null");
}
this.currency = currency;
this.amount = BankersRounding.setScale(amount, scale);
}
public BigDecimal getAmount() {
return amount;
}
public Currency getCurrency() {
return currency;
}
public Money add(Money other) {
if (!other.getCurrency().equals(getCurrency())) {
if (
CurrencyConversionContext.getCurrencyConversionContext() != null &&
CurrencyConversionContext.getCurrencyConversionContext().size() > 0 &&
CurrencyConversionContext.getCurrencyConversionService() != null
) {
other = CurrencyConversionContext.getCurrencyConversionService().convertCurrency(other, getCurrency(), amount.scale());
} else {
throw new UnsupportedOperationException("No currency conversion service is registered, cannot add different currency " +
"types together (" + getCurrency().getCurrencyCode() + " " + other.getCurrency().getCurrencyCode() + ")");
}
}
return new Money(amount.add(other.amount), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money subtract(Money other) {
if (!other.getCurrency().equals(getCurrency())) {
if (
CurrencyConversionContext.getCurrencyConversionContext() != null &&
CurrencyConversionContext.getCurrencyConversionContext().size() > 0 &&
CurrencyConversionContext.getCurrencyConversionService() != null
) {
other = CurrencyConversionContext.getCurrencyConversionService().convertCurrency(other, getCurrency(), amount.scale());
} else {
throw new UnsupportedOperationException("No currency conversion service is registered, cannot subtract different currency " +
"types (" + getCurrency().getCurrencyCode() + ", " + other.getCurrency().getCurrencyCode() + ")");
}
}
return new Money(amount.subtract(other.amount), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money multiply(double amount) {
return multiply(valueOf(amount));
}
public Money multiply(int amount) {
BigDecimal value = BigDecimal.valueOf(amount);
value = value.setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN);
return multiply(value);
}
public Money multiply(BigDecimal multiplier) {
return new Money(amount.multiply(multiplier), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money divide(double amount) {
return this.divide(amount, RoundingMode.HALF_EVEN);
}
public Money divide(double amount, RoundingMode roundingMode) {
return divide(valueOf(amount), roundingMode);
}
public Money divide(int amount) {
return this.divide(amount, RoundingMode.HALF_EVEN);
}
public Money divide(int amount, RoundingMode roundingMode) {
BigDecimal value = BigDecimal.valueOf(amount);
value = value.setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN);
return divide(value, roundingMode);
}
public Money divide(BigDecimal divisor) {
return this.divide(divisor, RoundingMode.HALF_EVEN);
}
public Money divide(BigDecimal divisor, RoundingMode roundingMode) {
return new Money(amount.divide(divisor, amount.scale(), roundingMode), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money abs() {
return new Money(amount.abs(), currency);
}
public Money min(Money other) {
if (other == null) { return this; }
return lessThan(other) ? this : other;
}
public Money max(Money other) {
if (other == null) { return this; }
return greaterThan(other) ? this : other;
}
public Money negate() {
return new Money(amount.negate(), currency);
}
public boolean isZero() {
return amount.compareTo(BankersRounding.zeroAmount()) == 0;
}
public Money zero() {
return Money.zero(currency);
}
public boolean lessThan(Money other) {
return compareTo(other) < 0;
}
public boolean lessThan(BigDecimal value) {
return amount.compareTo(value) < 0;
}
public boolean lessThanOrEqual(Money other) {
return compareTo(other) <= 0;
}
public boolean lessThanOrEqual(BigDecimal value) {
return amount.compareTo(value) <= 0;
}
public boolean greaterThan(Money other) {
return compareTo(other) > 0;
}
public boolean greaterThan(BigDecimal value) {
return amount.compareTo(value) > 0;
}
public boolean greaterThanOrEqual(Money other) {
return compareTo(other) >= 0;
}
public boolean greaterThanOrEqual(BigDecimal value) {
return amount.compareTo(value) >= 0;
}
@Override
public int compareTo(Money other) {
return amount.compareTo(other.amount);
}
public int compareTo(BigDecimal value) {
return amount.compareTo(value);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Money)) {
return false;
}
Money money = (Money) o;
if (amount != null ? !amount.equals(money.amount) : money.amount != null) {
return false;
}
if (isZero()) {
return true;
}
if (currency != null ? !currency.equals(money.currency) : money.currency != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = amount != null ? amount.hashCode() : 0;
result = 31 * result + (currency != null ? currency.hashCode() : 0);
return result;
}
@Override
public Object clone() {
return new Money(amount, currency);
}
@Override
public String toString() {
return amount.toString();
}
public double doubleValue() {
try {
return amount.doubleValue();
} catch (NumberFormatException e) {
// HotSpot bug in JVM < 1.4.2_06.
if (e.getMessage().equals("For input string: \"0.00null\"")) {
return amount.doubleValue();
} else {
throw e;
}
}
}
public String stringValue() {
return amount.toString() + " " + currency.getCurrencyCode();
}
public static Money zero(String currencyCode) {
return zero(Currency.getInstance(currencyCode));
}
public static Money zero(Currency currency) {
return new Money(BankersRounding.zeroAmount(), currency);
}
public static Money abs(Money money) {
return new Money(money.amount.abs(), money.currency);
}
public static Money min(Money left, Money right) {
return left.min(right);
}
public static Money max(Money left, Money right) {
return left.max(right);
}
public static BigDecimal toAmount(Money money) {
return ((money == null) ? null : money.amount);
}
public static Currency toCurrency(Money money) {
return ((money == null) ? null : money.currency);
}
/**
* Ensures predictable results by converting the double into a string then calling the BigDecimal string constructor.
* @param amount The amount
* @return BigDecimal a big decimal with a predictable value
*/
private static BigDecimal valueOf(double amount) {
return valueOf(String.valueOf(amount));
}
private static BigDecimal valueOf(String amount) {
BigDecimal value = new BigDecimal(amount);
if (value.scale() < 2) {
value = value.setScale(BankersRounding.getScaleForCurrency(defaultCurrency()), RoundingMode.HALF_EVEN);
}
return value;
}
/**
* Attempts to load a default currency by using the default locale. {@link Currency#getInstance(Locale)} uses the country component of the locale to resolve the currency. In some instances, the locale may not have a country component, in which case the default currency can be controlled with a
* system property.
* @return The default currency to use when none is specified
*/
public static Currency defaultCurrency() {
if (
CurrencyConsiderationContext.getCurrencyConsiderationContext() != null &&
CurrencyConsiderationContext.getCurrencyConsiderationContext().size() > 0 &&
CurrencyConsiderationContext.getCurrencyDeterminationService() != null
) {
return Currency.getInstance(CurrencyConsiderationContext.getCurrencyDeterminationService().getCurrencyCode(CurrencyConsiderationContext.getCurrencyConsiderationContext()));
}
// Check the BLC Thread
BroadleafRequestContext brc = BroadleafRequestContext.getBroadleafRequestContext();
if (brc != null && brc.getBroadleafCurrency() != null) {
assert brc.getBroadleafCurrency().getCurrencyCode()!=null;
return Currency.getInstance(brc.getBroadleafCurrency().getCurrencyCode());
}
if (System.getProperty("currency.default") != null) {
return Currency.getInstance(System.getProperty("currency.default"));
}
Locale locale = Locale.getDefault();
if (locale.getCountry() != null && locale.getCountry().length() == 2) {
return Currency.getInstance(locale);
}
return Currency.getInstance("USD");
}
@Override
public void readExternal(ObjectInput in) throws IOException,ClassNotFoundException {
// Read in the server properties from the client representation.
amount = new BigDecimal( in.readFloat());
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
// Write out the client properties from the server representation.
out.writeFloat(amount.floatValue());
// out.writeObject(currency);
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_money_Money.java
|
313 |
LOG_CONSOLE_LEVEL("log.console.level", "Console logging level", String.class, "info", new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
OLogManager.instance().setLevel((String) iNewValue, ConsoleHandler.class);
}
}),
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_config_OGlobalConfiguration.java
|
607 |
public class UpdateSettingsAction extends IndicesAction<UpdateSettingsRequest, UpdateSettingsResponse, UpdateSettingsRequestBuilder> {
public static final UpdateSettingsAction INSTANCE = new UpdateSettingsAction();
public static final String NAME = "indices/settings/update";
private UpdateSettingsAction() {
super(NAME);
}
@Override
public UpdateSettingsResponse newResponse() {
return new UpdateSettingsResponse();
}
@Override
public UpdateSettingsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new UpdateSettingsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_put_UpdateSettingsAction.java
|
207 |
private class ClientPacketProcessor implements Runnable {
final ClientPacket packet;
ClientPacketProcessor(ClientPacket packet) {
this.packet = packet;
}
@Override
public void run() {
final ClientConnection conn = (ClientConnection) packet.getConn();
final ClientResponse clientResponse = getSerializationService().toObject(packet.getData());
final int callId = clientResponse.getCallId();
final Data response = clientResponse.getResponse();
if (clientResponse.isEvent()) {
handleEvent(response, callId, conn);
} else {
handlePacket(response, clientResponse.isError(), callId, conn);
}
conn.decrementPacketCount();
}
private void handlePacket(Object response, boolean isError, int callId, ClientConnection conn) {
final ClientCallFuture future = conn.deRegisterCallId(callId);
if (future == null) {
LOGGER.warning("No call for callId: " + callId + ", response: " + response);
return;
}
if (isError) {
response = getSerializationService().toObject(response);
}
future.notify(response);
}
private void handleEvent(Data event, int callId, ClientConnection conn) {
final EventHandler eventHandler = conn.getEventHandler(callId);
final Object eventObject = getSerializationService().toObject(event);
if (eventHandler == null) {
LOGGER.warning("No eventHandler for callId: " + callId + ", event: " + eventObject + ", conn: " + conn);
return;
}
eventHandler.handle(eventObject);
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientConnectionManagerImpl.java
|
257 |
public static enum Type {
NONE,
MERGE,
ALL;
public static Type fromString(String type) throws ElasticsearchIllegalArgumentException {
if ("none".equalsIgnoreCase(type)) {
return NONE;
} else if ("merge".equalsIgnoreCase(type)) {
return MERGE;
} else if ("all".equalsIgnoreCase(type)) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
}
}
| 0true
|
src_main_java_org_apache_lucene_store_StoreRateLimiting.java
|
233 |
public abstract class OAbstractMapCache<T extends Map<ORID, ORecordInternal<?>>> implements OCache {
protected final OSharedResourceAdaptiveExternal lock = new OSharedResourceAdaptiveExternal(
OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), 0,
true);
protected final T cache;
private final AtomicBoolean enabled = new AtomicBoolean(false);
public OAbstractMapCache(T cache) {
this.cache = cache;
}
@Override
public void startup() {
enable();
}
@Override
public void shutdown() {
disable();
}
@Override
public boolean isEnabled() {
return enabled.get();
}
@Override
public boolean enable() {
return enabled.compareAndSet(false, true);
}
@Override
public boolean disable() {
clear();
return enabled.compareAndSet(true, false);
}
@Override
public ORecordInternal<?> get(final ORID id) {
if (!isEnabled())
return null;
lock.acquireExclusiveLock();
try {
return cache.get(id);
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public ORecordInternal<?> put(final ORecordInternal<?> record) {
if (!isEnabled())
return null;
lock.acquireExclusiveLock();
try {
return cache.put(record.getIdentity(), record);
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public ORecordInternal<?> remove(final ORID id) {
if (!isEnabled())
return null;
lock.acquireExclusiveLock();
try {
return cache.remove(id);
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public void clear() {
if (!isEnabled())
return;
lock.acquireExclusiveLock();
try {
cache.clear();
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public int size() {
lock.acquireSharedLock();
try {
return cache.size();
} finally {
lock.releaseSharedLock();
}
}
@Override
public Collection<ORID> keys() {
lock.acquireExclusiveLock();
try {
return new ArrayList<ORID>(cache.keySet());
} finally {
lock.releaseExclusiveLock();
}
}
@Override
public void lock(final ORID id) {
lock.acquireExclusiveLock();
}
@Override
public void unlock(final ORID id) {
lock.releaseExclusiveLock();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_cache_OAbstractMapCache.java
|
570 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(NightlyTest.class)
//TODO:
public class ClusterJoinTest {
@Before
@After
public void killAllHazelcastInstances() throws IOException {
Hazelcast.shutdownAll();
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIp1() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1");
c.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
h1.shutdown();
h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIp2() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig()
.addMember("127.0.0.1:5701")
.addMember("127.0.0.1:5702");
c.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
h1.shutdown();
h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIp3() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig()
.addMember("127.0.0.1:5701")
.addMember("127.0.0.1:5702");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
h1.getLifecycleService().shutdown();
h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testMulticast() throws Exception {
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIpWithDifferentBuildNumber() throws Exception {
System.setProperty("hazelcast.build", "1");
try {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1:5701");
c.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
System.setProperty("hazelcast.build", "2");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
} finally {
System.clearProperty("hazelcast.build");
}
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testMulticastWithDifferentBuildNumber() throws Exception {
System.setProperty("hazelcast.build", "1");
try {
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(1, h1.getCluster().getMembers().size());
System.setProperty("hazelcast.build", "2");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
} finally {
System.clearProperty("hazelcast.build");
}
}
/**
* Test for the issue 184
* <p/>
* Hazelcas.newHazelcastInstance(new Config()) doesn't join the cluster.
* new Config() should be enough as the default config.
*/
@Test(timeout = 240000)
@Category(ProblematicTest.class)
public void testDefaultConfigCluster() {
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test
public void unresolvableHostName() {
Config config = new Config();
config.getGroupConfig().setName("abc");
config.getGroupConfig().setPassword("def");
JoinConfig join = config.getNetworkConfig().getJoin();
join.getMulticastConfig().setEnabled(false);
join.getTcpIpConfig().setEnabled(true);
join.getTcpIpConfig().setMembers(Arrays.asList(new String[]{"localhost", "nonexistinghost"}));
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
assertEquals(1, hz.getCluster().getMembers().size());
}
@Test
public void testNewInstanceByName() {
Config config = new Config();
config.setInstanceName("test");
HazelcastInstance hc1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance hc2 = Hazelcast.getHazelcastInstanceByName("test");
HazelcastInstance hc3 = Hazelcast.getHazelcastInstanceByName(hc1.getName());
assertTrue(hc1 == hc2);
assertTrue(hc1 == hc3);
}
@Test(expected = DuplicateInstanceNameException.class)
public void testNewInstanceByNameFail() {
Config config = new Config();
config.setInstanceName("test");
Hazelcast.newHazelcastInstance(config);
Hazelcast.newHazelcastInstance(config);
}
@Test
public void testMulticastJoinWithIncompatibleGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config1.getGroupConfig().setName("group1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setName("group2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testTcpIpJoinWithIncompatibleGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config1.getGroupConfig().setName("group1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setName("group2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testMulticastJoinWithIncompatiblePasswords() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config1.getGroupConfig().setPassword("pass1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setPassword("pass2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testTcpIpJoinWithIncompatiblePasswords() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config1.getGroupConfig().setPassword("pass1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setPassword("pass2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
@Category(ProblematicTest.class)
public void testJoinWithIncompatibleJoiners() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
@Category(ProblematicTest.class)
public void testMulticastJoinWithIncompatiblePartitionGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config1.getPartitionGroupConfig().setEnabled(true).setGroupType(PartitionGroupConfig.MemberGroupType.HOST_AWARE);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config2.getPartitionGroupConfig().setEnabled(false);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testTcpIpJoinWithIncompatiblePartitionGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config1.getPartitionGroupConfig().setEnabled(true).setGroupType(PartitionGroupConfig.MemberGroupType.CUSTOM);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config2.getPartitionGroupConfig().setEnabled(true).setGroupType(PartitionGroupConfig.MemberGroupType.HOST_AWARE);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testMulticastJoinDuringSplitBrainHandlerRunning() throws InterruptedException {
Properties props = new Properties();
props.setProperty(GroupProperties.PROP_WAIT_SECONDS_BEFORE_JOIN, "5");
props.setProperty(GroupProperties.PROP_MERGE_FIRST_RUN_DELAY_SECONDS, "0");
props.setProperty(GroupProperties.PROP_MERGE_NEXT_RUN_DELAY_SECONDS, "0");
final CountDownLatch latch = new CountDownLatch(1);
Config config1 = new Config();
config1.getNetworkConfig().setPort(5901) ; // bigger port to make sure address.hashCode() check pass during merge!
config1.setProperties(props);
config1.addListenerConfig(new ListenerConfig(new LifecycleListener() {
public void stateChanged(final LifecycleEvent event) {
switch (event.getState()) {
case MERGING:
case MERGED:
latch.countDown();
default:
break;
}
}
}));
Hazelcast.newHazelcastInstance(config1);
Thread.sleep(5000);
Config config2 = new Config();
config2.getNetworkConfig().setPort(5701) ;
config2.setProperties(props);
Hazelcast.newHazelcastInstance(config2);
assertFalse("Latch should not be countdown!", latch.await(3, TimeUnit.SECONDS));
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_cluster_ClusterJoinTest.java
|
5,835 |
public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder {
private final FieldMapper<?> mapper;
private final SearchContext searchContext;
public SourceScoreOrderFragmentsBuilder(FieldMapper<?> mapper, SearchContext searchContext,
String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
super(preTags, postTags, boundaryScanner);
this.mapper = mapper;
this.searchContext = searchContext;
}
@Override
protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
// we know its low level reader, and matching docId, since that's how we call the highlighter with
SearchLookup lookup = searchContext.lookup();
lookup.setNextReader((AtomicReaderContext) reader.getContext());
lookup.setNextDocId(docId);
List<Object> values = lookup.source().extractRawValues(mapper.names().sourcePath());
Field[] fields = new Field[values.size()];
for (int i = 0; i < values.size(); i++) {
fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
}
return fields;
}
protected String makeFragment( StringBuilder buffer, int[] index, Field[] values, WeightedFragInfo fragInfo,
String[] preTags, String[] postTags, Encoder encoder ){
return super.makeFragment(buffer, index, values, FragmentBuilderHelper.fixWeightedFragInfo(mapper, values, fragInfo), preTags, postTags, encoder);
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_highlight_vectorhighlight_SourceScoreOrderFragmentsBuilder.java
|
260 |
.around(new NoInstanceHooksOverridesRule() {
@Override
protected boolean verify(Method key) {
String name = key.getName();
return !(name.equals("setUp") || name.equals("tearDown"));
}
})
| 0true
|
src_test_java_org_apache_lucene_util_AbstractRandomizedTest.java
|
381 |
public class PutItemsThread extends Thread{
public static final int MAX_ITEMS = 1000;
public final MyEntryListener listener = new MyEntryListener();
public HazelcastInstance hzInstance;
public MultiMap mm;
public String id;
public PutItemsThread(HazelcastInstance hzInstance){
this.id = randomString();
this.hzInstance = hzInstance;
this.mm = hzInstance.getMultiMap(MAP_NAME);
mm.addEntryListener(listener, true);
}
public void run(){
for(int i=0; i< MAX_ITEMS; i++){
mm.put(id+i, id+i);
}
}
public void assertResult(int target){
System.out.println("listener "+id+" add events received "+listener.add.get());
assertEquals(target, listener.add.get());
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapListenerStressTest.java
|
1,727 |
public class DefaultRecordStore implements RecordStore {
private static final long DEFAULT_TTL = -1L;
/**
* Number of reads before clean up.
*/
private static final byte POST_READ_CHECK_POINT = 0x3F;
private final String name;
private final int partitionId;
private final ConcurrentMap<Data, Record> records = new ConcurrentHashMap<Data, Record>(1000);
private final MapContainer mapContainer;
private final MapService mapService;
private final LockStore lockStore;
private final RecordFactory recordFactory;
private final ILogger logger;
private final SizeEstimator sizeEstimator;
private final AtomicBoolean loaded = new AtomicBoolean(false);
private final WriteBehindQueue<DelayedEntry> writeBehindQueue;
private long lastEvictionTime;
/**
* If there is no clean-up caused by puts after some time,
* count a number of gets and start eviction.
*/
private byte readCountBeforeCleanUp;
/**
* To check if a key has a delayed delete operation or not.
*/
private final Set<Data> writeBehindWaitingDeletions;
/**
* used for lru eviction.
*/
private long lruAccessSequenceNumber;
public DefaultRecordStore(String name, MapService mapService, int partitionId) {
this.name = name;
this.partitionId = partitionId;
this.mapService = mapService;
this.mapContainer = mapService.getMapContainer(name);
this.logger = mapService.getNodeEngine().getLogger(this.getName());
this.recordFactory = mapContainer.getRecordFactory();
this.writeBehindQueue = WriteBehindQueues.createDefaultWriteBehindQueue(mapContainer.isWriteBehindMapStoreEnabled());
this.writeBehindWaitingDeletions = mapContainer.isWriteBehindMapStoreEnabled()
? Collections.newSetFromMap(new ConcurrentHashMap()) : (Set<Data>) Collections.EMPTY_SET;
NodeEngine nodeEngine = mapService.getNodeEngine();
final LockService lockService = nodeEngine.getSharedService(LockService.SERVICE_NAME);
this.lockStore = lockService == null ? null
: lockService.createLockStore(partitionId, new DefaultObjectNamespace(MapService.SERVICE_NAME, name));
this.sizeEstimator = SizeEstimators.createMapSizeEstimator();
loadFromMapStore(nodeEngine);
}
public boolean isLoaded() {
return loaded.get();
}
public void setLoaded(boolean isLoaded) {
loaded.set(isLoaded);
}
public void checkIfLoaded() {
if (mapContainer.getStore() != null && !loaded.get()) {
throw ExceptionUtil.rethrow(new RetryableHazelcastException("Map is not ready!!!"));
}
}
public String getName() {
return name;
}
public void flush() {
checkIfLoaded();
final Collection<Data> processedKeys
= mapContainer.getWriteBehindManager().flush(writeBehindQueue);
for (Data pkey : processedKeys) {
final Record record = records.get(pkey);
if (record != null) {
record.onStore();
}
}
}
public MapContainer getMapContainer() {
return mapContainer;
}
public Record getRecord(Data key) {
return records.get(key);
}
public void putForReplication(Data key, Record record) {
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
removeFromWriteBehindWaitingDeletions(key);
}
public Record putBackup(Data key, Object value) {
return putBackup(key, value, DEFAULT_TTL);
}
public Record putBackup(Data key, Object value, long ttl) {
earlyWriteCleanup();
Record record = records.get(key);
if (record == null) {
record = mapService.createRecord(name, key, value, ttl);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
} else {
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
updateSizeEstimator(calculateRecordSize(record));
}
removeFromWriteBehindWaitingDeletions(key);
addToDelayedStore(key, record.getValue());
return record;
}
public void deleteRecord(Data key) {
Record record = records.remove(key);
if (record != null) {
record.invalidate();
}
}
public Map<Data, Record> getReadonlyRecordMap() {
return Collections.unmodifiableMap(records);
}
public void clearPartition() {
final LockService lockService = mapService.getNodeEngine().getSharedService(LockService.SERVICE_NAME);
if (lockService != null) {
lockService.clearLockStore(partitionId, new DefaultObjectNamespace(MapService.SERVICE_NAME, name));
}
final IndexService indexService = mapContainer.getIndexService();
if (indexService.hasIndex()) {
for (Data key : records.keySet()) {
indexService.removeEntryIndex(key);
}
}
clearRecordsMap(Collections.<Data, Record>emptyMap());
resetSizeEstimator();
resetAccessSequenceNumber();
}
private void clearRecordsMap(Map<Data, Record> excludeRecords) {
InMemoryFormat inMemoryFormat = recordFactory.getStorageFormat();
switch (inMemoryFormat) {
case BINARY:
case OBJECT:
records.clear();
if (excludeRecords != null && !excludeRecords.isEmpty()) {
records.putAll(excludeRecords);
}
return;
case OFFHEAP:
Iterator<Record> iter = records.values().iterator();
while (iter.hasNext()) {
Record record = iter.next();
if (excludeRecords == null || !excludeRecords.containsKey(record.getKey())) {
record.invalidate();
iter.remove();
}
}
return;
default:
throw new IllegalArgumentException("Unknown storage format: " + inMemoryFormat);
}
}
/**
* Size may not give precise size at a specific moment
* due to the expiration logic. But eventually, it should be correct.
*
* @return record store size.
*/
public int size() {
// do not add checkIfLoaded(), size() is also used internally
return records.size();
}
public boolean isEmpty() {
checkIfLoaded();
return records.isEmpty();
}
@Override
public WriteBehindQueue<DelayedEntry> getWriteBehindQueue() {
return writeBehindQueue;
}
@Override
public List findUnlockedExpiredRecords() {
checkIfLoaded();
final long nowInNanos = nowInNanos();
List<Object> expiredKeyValueSequence = Collections.emptyList();
boolean createLazy = true;
for (Map.Entry<Data, Record> entry : records.entrySet()) {
final Data key = entry.getKey();
if (isLocked(key)) {
continue;
}
final Record record = entry.getValue();
if (isReachable(record, nowInNanos)) {
continue;
}
final Object value = record.getValue();
evict(key);
if (createLazy) {
expiredKeyValueSequence = new ArrayList<Object>();
createLazy = false;
}
expiredKeyValueSequence.add(key);
expiredKeyValueSequence.add(value);
}
return expiredKeyValueSequence;
}
@Override
public boolean containsValue(Object value) {
checkIfLoaded();
for (Record record : records.values()) {
if (nullIfExpired(record) == null) {
continue;
}
if (mapService.compare(name, value, record.getValue())) {
return true;
}
}
postReadCleanUp();
return false;
}
public boolean lock(Data key, String caller, long threadId, long ttl) {
checkIfLoaded();
return lockStore != null && lockStore.lock(key, caller, threadId, ttl);
}
public boolean txnLock(Data key, String caller, long threadId, long ttl) {
checkIfLoaded();
return lockStore != null && lockStore.txnLock(key, caller, threadId, ttl);
}
public boolean extendLock(Data key, String caller, long threadId, long ttl) {
checkIfLoaded();
return lockStore != null && lockStore.extendLeaseTime(key, caller, threadId, ttl);
}
public boolean unlock(Data key, String caller, long threadId) {
checkIfLoaded();
return lockStore != null && lockStore.unlock(key, caller, threadId);
}
public boolean forceUnlock(Data dataKey) {
return lockStore != null && lockStore.forceUnlock(dataKey);
}
@Override
public long getHeapCost() {
return getSizeEstimator().getSize();
}
public boolean isLocked(Data dataKey) {
return lockStore != null && lockStore.isLocked(dataKey);
}
public boolean canAcquireLock(Data key, String caller, long threadId) {
return lockStore == null || lockStore.canAcquireLock(key, caller, threadId);
}
public String getLockOwnerInfo(Data key) {
return lockStore != null ? lockStore.getOwnerInfo(key) : null;
}
public Set<Map.Entry<Data, Data>> entrySetData() {
checkIfLoaded();
Map<Data, Data> temp = new HashMap<Data, Data>(records.size());
for (Data key : records.keySet()) {
temp.put(key, mapService.toData(records.get(key).getValue()));
}
return temp.entrySet();
}
public Map.Entry<Data, Object> getMapEntry(Data dataKey) {
checkIfLoaded();
Record record = records.get(dataKey);
if (record == null) {
record = getRecordInternal(dataKey, true);
} else {
accessRecord(record);
}
final Object data = record != null ? record.getValue() : null;
return new AbstractMap.SimpleImmutableEntry<Data, Object>(dataKey, data);
}
public Map.Entry<Data, Object> getMapEntryForBackup(Data dataKey) {
checkIfLoaded();
Record record = records.get(dataKey);
if (record == null) {
record = getRecordInternal(dataKey, false);
} else {
accessRecord(record);
}
final Object data = record != null ? record.getValue() : null;
return new AbstractMap.SimpleImmutableEntry<Data, Object>(dataKey, data);
}
private Record getRecordInternal(Data dataKey, boolean enableIndex) {
Record record = null;
if (mapContainer.getStore() != null) {
final Object value = mapContainer.getStore().load(mapService.toObject(dataKey));
if (value != null) {
record = mapService.createRecord(name, dataKey, value, DEFAULT_TTL);
records.put(dataKey, record);
if (enableIndex) {
saveIndex(record);
}
updateSizeEstimator(calculateRecordSize(record));
}
}
return record;
}
public Set<Data> keySet() {
checkIfLoaded();
Set<Data> keySet = new HashSet<Data>(records.size());
for (Data data : records.keySet()) {
keySet.add(data);
}
return keySet;
}
public Collection<Data> valuesData() {
checkIfLoaded();
Collection<Data> values = new ArrayList<Data>(records.size());
for (Record record : records.values()) {
values.add(mapService.toData(record.getValue()));
}
return values;
}
public void clear() {
checkIfLoaded();
resetSizeEstimator();
final Collection<Data> lockedKeys = lockStore != null ? lockStore.getLockedKeys() : Collections.<Data>emptySet();
final Map<Data, Record> lockedRecords = new HashMap<Data, Record>(lockedKeys.size());
// Locked records should not be removed!
for (Data key : lockedKeys) {
Record record = records.get(key);
if (record != null) {
lockedRecords.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
}
}
Set<Data> keysToDelete = records.keySet();
keysToDelete.removeAll(lockedRecords.keySet());
final MapStoreWrapper store = mapContainer.getStore();
if (store != null) {
// Use an ArrayList so that we don't trigger calls to equals or hashCode on the key objects
Collection<Object> keysObject = new ArrayList<Object>(keysToDelete.size());
for (Data key : keysToDelete) {
keysObject.add(mapService.toObject(key));
}
store.deleteAll(keysObject);
}
removeIndex(keysToDelete);
clearRecordsMap(lockedRecords);
resetAccessSequenceNumber();
writeBehindQueue.clear();
}
public void reset() {
checkIfLoaded();
clearRecordsMap(Collections.<Data, Record>emptyMap());
resetSizeEstimator();
resetAccessSequenceNumber();
writeBehindQueue.clear();
writeBehindWaitingDeletions.clear();
}
private void resetAccessSequenceNumber() {
lruAccessSequenceNumber = 0L;
}
public Object evict(Data dataKey) {
checkIfLoaded();
Record record = records.get(dataKey);
Object oldValue = null;
if (record != null) {
mapService.interceptRemove(name, record.getValue());
oldValue = record.getValue();
updateSizeEstimator(-calculateRecordSize(record));
deleteRecord(dataKey);
removeIndex(dataKey);
}
return oldValue;
}
@Override
public void removeBackup(Data key) {
earlyWriteCleanup();
final Record record = records.get(key);
if (record == null) {
return;
}
// reduce size
updateSizeEstimator(-calculateRecordSize(record));
deleteRecord(key);
addToWriteBehindWaitingDeletions(key);
addToDelayedStore(key, null);
}
@Override
public boolean remove(Data dataKey, Object testValue) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(dataKey);
Object oldValue = null;
boolean removed = false;
if (record == null) {
if (mapContainer.getStore() != null) {
oldValue = mapContainer.getStore().load(mapService.toObject(dataKey));
}
if (oldValue == null) {
return false;
}
} else {
oldValue = record.getValue();
}
if (mapService.compare(name, testValue, oldValue)) {
mapService.interceptRemove(name, oldValue);
removeIndex(dataKey);
mapStoreDelete(record, dataKey);
// reduce size
updateSizeEstimator(-calculateRecordSize(record));
deleteRecord(dataKey);
removed = true;
}
return removed;
}
@Override
public Object remove(Data dataKey) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(dataKey);
Object oldValue = null;
if (record == null) {
if (mapContainer.getStore() != null) {
oldValue = mapContainer.getStore().load(mapService.toObject(dataKey));
if (oldValue != null) {
removeIndex(dataKey);
mapStoreDelete(null, dataKey);
}
}
} else {
oldValue = record.getValue();
oldValue = mapService.interceptRemove(name, oldValue);
if (oldValue != null) {
removeIndex(dataKey);
mapStoreDelete(record, dataKey);
}
// reduce size
updateSizeEstimator(-calculateRecordSize(record));
deleteRecord(dataKey);
}
return oldValue;
}
@Override
public Object get(Data key) {
checkIfLoaded();
if (hasWaitingWriteBehindDeleteOperation(key)) {
// not reachable record.
return null;
}
Record record = records.get(key);
record = nullIfExpired(record);
Object value = null;
if (record == null) {
if (mapContainer.getStore() != null) {
value = mapContainer.getStore().load(mapService.toObject(key));
if (value != null) {
record = mapService.createRecord(name, key, value, DEFAULT_TTL);
records.put(key, record);
saveIndex(record);
updateSizeEstimator(calculateRecordSize(record));
}
}
} else {
accessRecord(record);
value = record.getValue();
}
value = mapService.interceptGet(name, value);
postReadCleanUp();
return value;
}
@Override
public MapEntrySet getAll(Set<Data> keySet) {
checkIfLoaded();
final MapEntrySet mapEntrySet = new MapEntrySet();
Map<Object, Data> keyMapForLoader = Collections.emptyMap();
if (mapContainer.getStore() != null) {
keyMapForLoader = new HashMap<Object, Data>();
}
for (Data dataKey : keySet) {
Record record = records.get(dataKey);
if (hasWaitingWriteBehindDeleteOperation(dataKey)) {
continue;
}
if (record == null) {
if (mapContainer.getStore() != null) {
keyMapForLoader.put(mapService.toObject(dataKey), dataKey);
}
} else {
accessRecord(record);
Object value = record.getValue();
value = mapService.interceptGet(name, value);
if (value != null) {
mapEntrySet.add(new AbstractMap.SimpleImmutableEntry(dataKey, mapService.toData(value)));
}
}
}
if (mapContainer.getStore() == null || keyMapForLoader.size() == 0) {
return mapEntrySet;
}
final Map<Object, Object> loadedKeys = mapContainer.getStore().loadAll(keyMapForLoader.keySet());
for (Map.Entry entry : loadedKeys.entrySet()) {
final Object objectKey = entry.getKey();
Object value = entry.getValue();
Data dataKey = keyMapForLoader.get(objectKey);
if (hasWaitingWriteBehindDeleteOperation(dataKey)) {
continue;
}
if (value != null) {
Record record = mapService.createRecord(name, dataKey, value, DEFAULT_TTL);
records.put(dataKey, record);
saveIndex(record);
updateSizeEstimator(calculateRecordSize(record));
}
value = mapService.interceptGet(name, value);
if (value != null) {
mapEntrySet.add(new AbstractMap.SimpleImmutableEntry(dataKey, mapService.toData(value)));
}
}
return mapEntrySet;
}
@Override
public boolean containsKey(Data key) {
checkIfLoaded();
Record record = records.get(key);
if (hasWaitingWriteBehindDeleteOperation(key)) {
// not reachable record.
return false;
}
record = nullIfExpired(record);
if (record == null) {
if (mapContainer.getStore() != null) {
Object value = mapContainer.getStore().load(mapService.toObject(key));
if (value != null) {
record = mapService.createRecord(name, key, value, DEFAULT_TTL);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
}
}
}
boolean contains = record != null;
if (contains) {
accessRecord(record);
}
postReadCleanUp();
return contains;
}
@Override
public void put(Map.Entry<Data, Object> entry) {
checkIfLoaded();
earlyWriteCleanup();
Data key = entry.getKey();
Object value = entry.getValue();
Record record = records.get(key);
if (record == null) {
value = mapService.interceptPut(name, null, value);
value = mapStoreWrite(key, value, null);
record = mapService.createRecord(name, key, value, DEFAULT_TTL);
records.put(key, record);
// increase size.
updateSizeEstimator(calculateRecordSize(record));
saveIndex(record);
} else {
final Object oldValue = record.getValue();
value = mapService.interceptPut(name, oldValue, value);
value = mapStoreWrite(key, value, record);
// if key exists before, first reduce size
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
// then increase size
updateSizeEstimator(calculateRecordSize(record));
saveIndex(record);
}
removeFromWriteBehindWaitingDeletions(key);
}
public Object put(Data key, Object value, long ttl) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(key);
Object oldValue = null;
if (record == null) {
if (mapContainer.getStore() != null) {
oldValue = mapContainer.getStore().load(mapService.toObject(key));
}
value = mapService.interceptPut(name, null, value);
value = mapStoreWrite(key, value, null);
record = mapService.createRecord(name, key, value, ttl);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
saveIndex(record);
} else {
oldValue = record.getValue();
value = mapService.interceptPut(name, oldValue, value);
value = mapStoreWrite(key, value, record);
// if key exists before, first reduce size
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
// then increase size.
updateSizeEstimator(calculateRecordSize(record));
updateTtl(record, ttl);
saveIndex(record);
}
removeFromWriteBehindWaitingDeletions(key);
return oldValue;
}
public boolean set(Data dataKey, Object value, long ttl) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(dataKey);
boolean newRecord = false;
if (record == null) {
value = mapService.interceptPut(name, null, value);
value = mapStoreWrite(dataKey, value, null);
record = mapService.createRecord(name, dataKey, value, ttl);
records.put(dataKey, record);
updateSizeEstimator(calculateRecordSize(record));
newRecord = true;
} else {
value = mapService.interceptPut(name, record.getValue(), value);
value = mapStoreWrite(dataKey, value, record);
// if key exists before, first reduce size
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
// then increase size.
updateSizeEstimator(calculateRecordSize(record));
updateTtl(record, ttl);
}
saveIndex(record);
return newRecord;
}
public boolean merge(Data dataKey, EntryView mergingEntry, MapMergePolicy mergePolicy) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(dataKey);
Object newValue;
if (record == null) {
newValue = mergingEntry.getValue();
newValue = mapStoreWrite(dataKey, newValue, null);
record = mapService.createRecord(name, dataKey, newValue, DEFAULT_TTL);
records.put(dataKey, record);
updateSizeEstimator(calculateRecordSize(record));
} else {
Object oldValue = record.getValue();
EntryView existingEntry = mapService.createSimpleEntryView(mapService.toObject(record.getKey()),
mapService.toObject(record.getValue()), record);
newValue = mergePolicy.merge(name, mergingEntry, existingEntry);
// existing entry will be removed
if (newValue == null) {
removeIndex(dataKey);
mapStoreDelete(record, dataKey);
// reduce size.
updateSizeEstimator(-calculateRecordSize(record));
//remove from map & invalidate.
deleteRecord(dataKey);
return true;
}
// same with the existing entry so no need to mapstore etc operations.
if (mapService.compare(name, newValue, oldValue)) {
return true;
}
newValue = mapStoreWrite(dataKey, newValue, record);
updateSizeEstimator(-calculateRecordSize(record));
recordFactory.setValue(record, newValue);
updateSizeEstimator(calculateRecordSize(record));
}
saveIndex(record);
return newValue != null;
}
public Object replace(Data dataKey, Object value) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(dataKey);
Object oldValue;
if (record != null && record.getValue() != null) {
oldValue = record.getValue();
value = mapService.interceptPut(name, oldValue, value);
value = mapStoreWrite(dataKey, value, record);
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
updateSizeEstimator(calculateRecordSize(record));
} else {
return null;
}
saveIndex(record);
return oldValue;
}
public boolean replace(Data dataKey, Object testValue, Object newValue) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(dataKey);
if (record == null) {
return false;
}
if (mapService.compare(name, record.getValue(), testValue)) {
newValue = mapService.interceptPut(name, record.getValue(), newValue);
newValue = mapStoreWrite(dataKey, newValue, record);
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, newValue);
updateSizeEstimator(calculateRecordSize(record));
} else {
return false;
}
saveIndex(record);
return true;
}
public void putTransient(Data key, Object value, long ttl) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(key);
if (record == null) {
value = mapService.interceptPut(name, null, value);
record = mapService.createRecord(name, key, value, ttl);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
} else {
value = mapService.interceptPut(name, record.getValue(), value);
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
updateSizeEstimator(calculateRecordSize(record));
updateTtl(record, ttl);
}
saveIndex(record);
removeFromWriteBehindWaitingDeletions(key);
}
public void putFromLoad(Data key, Object value, long ttl) {
Record record = records.get(key);
earlyWriteCleanup();
if (record == null) {
value = mapService.interceptPut(name, null, value);
record = mapService.createRecord(name, key, value, ttl);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
} else {
value = mapService.interceptPut(name, record.getValue(), value);
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
updateSizeEstimator(calculateRecordSize(record));
updateTtl(record, ttl);
}
saveIndex(record);
removeFromWriteBehindWaitingDeletions(key);
}
public boolean tryPut(Data key, Object value, long ttl) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(key);
if (record == null) {
value = mapService.interceptPut(name, null, value);
value = mapStoreWrite(key, value, null);
record = mapService.createRecord(name, key, value, ttl);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
} else {
value = mapService.interceptPut(name, record.getValue(), value);
value = mapStoreWrite(key, value, record);
updateSizeEstimator(-calculateRecordSize(record));
setRecordValue(record, value);
updateSizeEstimator(calculateRecordSize(record));
updateTtl(record, ttl);
}
saveIndex(record);
removeFromWriteBehindWaitingDeletions(key);
return true;
}
public Object putIfAbsent(Data key, Object value, long ttl) {
checkIfLoaded();
earlyWriteCleanup();
Record record = records.get(key);
Object oldValue = null;
if (record == null) {
if (mapContainer.getStore() != null) {
oldValue = mapContainer.getStore().load(mapService.toObject(key));
if (oldValue != null) {
record = mapService.createRecord(name, key, oldValue, DEFAULT_TTL);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
}
}
} else {
accessRecord(record);
oldValue = record.getValue();
}
if (oldValue == null) {
value = mapService.interceptPut(name, null, value);
value = mapStoreWrite(key, value, record);
record = mapService.createRecord(name, key, value, ttl);
records.put(key, record);
updateSizeEstimator(calculateRecordSize(record));
updateTtl(record, ttl);
}
saveIndex(record);
removeFromWriteBehindWaitingDeletions(key);
return oldValue;
}
private void loadFromMapStore(NodeEngine nodeEngine) {
final AtomicBoolean loadOccurred = loaded;
if (!mapContainer.isMapStoreEnabled() || loadOccurred.get()) {
return;
}
final Address partitionOwner = nodeEngine.getPartitionService().getPartitionOwner(partitionId);
final boolean isOwner = nodeEngine.getThisAddress().equals(partitionOwner);
if (!isOwner) {
loadOccurred.set(true);
return;
}
final Map<Data, Object> loadedKeys = mapContainer.getInitialKeys();
if (loadedKeys == null || loadedKeys.isEmpty()) {
loadOccurred.set(true);
return;
}
doChunkedLoad(loadedKeys, nodeEngine);
}
private void doChunkedLoad(Map<Data, Object> loadedKeys, NodeEngine nodeEngine) {
final int mapLoadChunkSize = nodeEngine.getGroupProperties().MAP_LOAD_CHUNK_SIZE.getInteger();
final Queue<Map> chunks = new LinkedList<Map>();
Map<Data, Object> partitionKeys = new HashMap<Data, Object>();
Iterator<Map.Entry<Data, Object>> iterator = loadedKeys.entrySet().iterator();
while (iterator.hasNext()) {
final Map.Entry<Data, Object> entry = iterator.next();
final Data data = entry.getKey();
if (partitionId == nodeEngine.getPartitionService().getPartitionId(data)) {
partitionKeys.put(data, entry.getValue());
//split into chunks
if (partitionKeys.size() >= mapLoadChunkSize) {
chunks.add(partitionKeys);
partitionKeys = new HashMap<Data, Object>();
}
iterator.remove();
}
}
if (!partitionKeys.isEmpty()) {
chunks.add(partitionKeys);
}
if (chunks.isEmpty()) {
loaded.set(true);
return;
}
try {
final AtomicInteger checkIfMapLoaded = new AtomicInteger(chunks.size());
Map<Data, Object> chunkedKeys;
while ((chunkedKeys = chunks.poll()) != null) {
nodeEngine.getExecutionService().submit("hz:map-load", new MapLoadAllTask(chunkedKeys, checkIfMapLoaded));
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
/**
* TODO make checkEvictable fast by carrying threshold logic to partition.
* This cleanup adds some latency to write operations.
* But it sweeps records much better under high write loads.
* <p/>
*/
private void earlyWriteCleanup() {
if (!mapContainer.isEvictionEnabled()) {
return;
}
cleanUp();
}
/**
* If there is no clean-up caused by puts after some time,
* try to clean-up from gets.
*/
private void postReadCleanUp() {
if (!mapContainer.isEvictionEnabled()) {
return;
}
readCountBeforeCleanUp++;
if ((readCountBeforeCleanUp & POST_READ_CHECK_POINT) == 0) {
cleanUp();
}
}
private void cleanUp() {
final long now = System.currentTimeMillis();
final int evictAfterMs = 1000;
if (now - lastEvictionTime <= evictAfterMs) {
return;
}
final boolean evictable = EvictionHelper.checkEvictable(mapContainer);
if (!evictable) {
return;
}
EvictionHelper.removeEvictableRecords(DefaultRecordStore.this,
mapContainer.getMapConfig(), mapService);
lastEvictionTime = now;
readCountBeforeCleanUp = 0;
}
private Record nullIfExpired(Record record) {
return evictIfNotReachable(record);
}
private void addToWriteBehindWaitingDeletions(Data key) {
if (!mapContainer.isWriteBehindMapStoreEnabled()) {
return;
}
writeBehindWaitingDeletions.add(key);
}
@Override
public void removeFromWriteBehindWaitingDeletions(Data key) {
if (!mapContainer.isWriteBehindMapStoreEnabled()) {
return;
}
writeBehindWaitingDeletions.remove(key);
}
private boolean hasWaitingWriteBehindDeleteOperation(Data key) {
if (!mapContainer.isWriteBehindMapStoreEnabled()) {
return false;
}
return writeBehindWaitingDeletions.contains(key);
}
/**
* Check if record is reachable according to ttl or idle times.
* If not reachable return null.
*
* @param record {@link com.hazelcast.map.record.Record}
* @return null if evictable.
*/
private Record evictIfNotReachable(Record record) {
if (record == null) {
return null;
}
if (isReachable(record)) {
return record;
}
final Data key = record.getKey();
final Object value = record.getValue();
evict(key);
doPostEvictionOperations(key, value);
return null;
}
private boolean isReachable(Record record) {
return isReachable(record, nowInNanos());
}
private boolean isReachable(Record record, long timeInNanos) {
final Record result = mapContainer.getReachabilityHandlerChain().isReachable(record,
-1L, timeInNanos);
return result != null;
}
private void doPostEvictionOperations(Data key, Object value) {
mapService.interceptAfterRemove(name, value);
if (mapService.isNearCacheAndInvalidationEnabled(name)) {
mapService.invalidateAllNearCaches(name, key);
}
EvictionHelper.fireEvent(key, value, name, mapService);
}
private void accessRecord(Record record) {
increaseRecordEvictionCriteriaNumber(record, mapContainer.getMapConfig().getEvictionPolicy());
record.onAccess();
}
private void increaseRecordEvictionCriteriaNumber(Record record, MapConfig.EvictionPolicy evictionPolicy) {
switch (evictionPolicy) {
case LRU:
++lruAccessSequenceNumber;
record.setEvictionCriteriaNumber(lruAccessSequenceNumber);
break;
case LFU:
record.setEvictionCriteriaNumber(record.getEvictionCriteriaNumber() + 1L);
break;
case NONE:
break;
default:
throw new IllegalArgumentException("Not an appropriate eviction policy [" + evictionPolicy + ']');
}
}
private void saveIndex(Record record) {
Data dataKey = record.getKey();
final IndexService indexService = mapContainer.getIndexService();
if (indexService.hasIndex()) {
SerializationService ss = mapService.getSerializationService();
QueryableEntry queryableEntry = new QueryEntry(ss, dataKey, dataKey, record.getValue());
indexService.saveEntryIndex(queryableEntry);
}
}
private Object mapStoreWrite(Data dataKey, Object recordValue, Record record) {
final MapStoreWrapper store = mapContainer.getStore();
if (store == null) {
return recordValue;
}
if (mapContainer.getWriteDelayMillis() < 1L) {
Object objectValue = mapService.toObject(recordValue);
store.store(mapService.toObject(dataKey), objectValue);
if (record != null) {
record.onStore();
}
// if store is not a post-processing map-store, then avoid extra de-serialization phase.
return store.isPostProcessingMapStore() ? objectValue : recordValue;
}
addToDelayedStore(dataKey, recordValue);
return recordValue;
}
private void mapStoreDelete(Record record, Data key) {
final MapStoreWrapper store = mapContainer.getStore();
if (store == null) {
return;
}
final long writeDelayMillis = mapContainer.getWriteDelayMillis();
if (writeDelayMillis < 1L) {
store.delete(mapService.toObject(key));
// todo ea record will be deleted then why calling onStore
if (record != null) {
record.onStore();
}
return;
}
addToDelayedStore(key, null);
}
/**
* Constructs and adds a {@link com.hazelcast.map.writebehind.DelayedEntry}
* instance to write behind queue.
*
* @param dataKey
* @param recordValue
*/
private void addToDelayedStore(Data dataKey, Object recordValue) {
if (!mapContainer.isWriteBehindMapStoreEnabled()) {
return;
}
final long writeDelayMillis = mapContainer.getWriteDelayMillis();
final DelayedEntry<Data, Object> delayedEntry
= mapService.constructDelayedEntry(dataKey, recordValue,
partitionId, writeDelayMillis);
// if value is null this is a delete operation.
if (recordValue == null) {
addToWriteBehindWaitingDeletions(dataKey);
}
writeBehindQueue.offer(delayedEntry);
}
public SizeEstimator getSizeEstimator() {
return sizeEstimator;
}
private void updateTtl(Record record, long ttlInMillis) {
if (ttlInMillis < 0L) {
return;
}
final long ttlInNanos = TimeUnit.MILLISECONDS.toNanos(ttlInMillis);
record.setTtl(ttlInNanos);
if (record.getStatistics() != null) {
record.getStatistics().setExpirationTime(System.nanoTime() + ttlInNanos);
}
}
private static long nowInNanos() {
return System.nanoTime();
}
private void updateSizeEstimator(long recordSize) {
sizeEstimator.add(recordSize);
}
private long calculateRecordSize(Record record) {
return sizeEstimator.getCost(record);
}
private void resetSizeEstimator() {
sizeEstimator.reset();
}
private void setRecordValue(Record record, Object value) {
accessRecord(record);
record.onUpdate();
recordFactory.setValue(record, value);
}
private void removeIndex(Data key) {
final IndexService indexService = mapContainer.getIndexService();
if (indexService.hasIndex()) {
indexService.removeEntryIndex(key);
}
}
private void removeIndex(Set<Data> keys) {
final IndexService indexService = mapContainer.getIndexService();
if (indexService.hasIndex()) {
for (Data key : keys) {
indexService.removeEntryIndex(key);
}
}
}
private final class MapLoadAllTask implements Runnable {
private Map<Data, Object> keys;
private AtomicInteger checkIfMapLoaded;
private MapLoadAllTask(Map<Data, Object> keys, AtomicInteger checkIfMapLoaded) {
this.keys = keys;
this.checkIfMapLoaded = checkIfMapLoaded;
}
public void run() {
final NodeEngine nodeEngine = mapService.getNodeEngine();
try {
Map values = mapContainer.getStore().loadAll(keys.values());
if (values == null || values.isEmpty()) {
if (checkIfMapLoaded.decrementAndGet() == 0) {
loaded.set(true);
}
return;
}
MapEntrySet entrySet = new MapEntrySet();
for (Data dataKey : keys.keySet()) {
Object key = keys.get(dataKey);
Object value = values.get(key);
if (value != null) {
Data dataValue = mapService.toData(value);
entrySet.add(dataKey, dataValue);
}
}
PutAllOperation operation = new PutAllOperation(name, entrySet, true);
operation.setNodeEngine(nodeEngine);
operation.setResponseHandler(new ResponseHandler() {
@Override
public void sendResponse(Object obj) {
if (checkIfMapLoaded.decrementAndGet() == 0) {
loaded.set(true);
}
}
public boolean isLocal() {
return true;
}
});
operation.setPartitionId(partitionId);
OperationAccessor.setCallerAddress(operation, nodeEngine.getThisAddress());
operation.setCallerUuid(nodeEngine.getLocalMember().getUuid());
operation.setServiceName(MapService.SERVICE_NAME);
nodeEngine.getOperationService().executeOperation(operation);
} catch (Exception e) {
logger.warning("Exception while load all task:" + e.toString());
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_DefaultRecordStore.java
|
933 |
public class OfferDeliveryType implements Serializable, BroadleafEnumerationType, Comparable<OfferDeliveryType> {
private static final long serialVersionUID = 1L;
private static final Map<String, OfferDeliveryType> TYPES = new LinkedHashMap<String, OfferDeliveryType>();
public static final OfferDeliveryType AUTOMATIC = new OfferDeliveryType("AUTOMATIC", "Automatically", 1000);
public static final OfferDeliveryType CODE = new OfferDeliveryType("CODE", "Using Shared Code", 2000);
public static final OfferDeliveryType MANUAL = new OfferDeliveryType("MANUAL", "Via Application or Shared Code", 3000);
public static OfferDeliveryType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
private int order;
public OfferDeliveryType() {
//do nothing
}
public OfferDeliveryType(final String type, final String friendlyType, int order) {
this.friendlyType = friendlyType;
setType(type);
setOrder(order);
}
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
public int getOrder() {
return order;
}
public void setOrder(int order) {
this.order = order;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferDeliveryType other = (OfferDeliveryType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
@Override
public int compareTo(OfferDeliveryType arg0) {
return this.order - arg0.order;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_type_OfferDeliveryType.java
|
84 |
public interface ClientEngine {
int getClientEndpointCount();
InternalPartitionService getPartitionService();
ClusterService getClusterService();
SerializationService getSerializationService();
EventService getEventService();
TransactionManagerService getTransactionManagerService();
ProxyService getProxyService();
Config getConfig();
ILogger getLogger(Class clazz);
ILogger getLogger(String className);
Object toObject(Data data);
Data toData(Object obj);
Address getMasterAddress();
Address getThisAddress();
MemberImpl getLocalMember();
SecurityContext getSecurityContext();
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_ClientEngine.java
|
21 |
public class DeleteCommandParser implements CommandParser {
public TextCommand parser(SocketTextReader socketTextReader, String cmd, int space) {
StringTokenizer st = new StringTokenizer(cmd);
st.nextToken();
String key = null;
int expiration = 0;
boolean noReply = false;
if (st.hasMoreTokens()) {
key = st.nextToken();
}
if (st.hasMoreTokens()) {
expiration = Integer.parseInt(st.nextToken());
}
if (st.hasMoreTokens()) {
noReply = "noreply".equals(st.nextToken());
}
return new DeleteCommand(key, expiration, noReply);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_DeleteCommandParser.java
|
203 |
public class OServerAdmin {
private OStorageRemote storage;
private int sessionId = -1;
/**
* Creates the object passing a remote URL to connect.
*
* @param iURL
* URL to connect. It supports only the "remote" storage type.
* @throws IOException
*/
public OServerAdmin(String iURL) throws IOException {
if (iURL.startsWith(OEngineRemote.NAME))
iURL = iURL.substring(OEngineRemote.NAME.length() + 1);
if (!iURL.contains("/"))
iURL += "/";
storage = new OStorageRemote(null, iURL, "");
}
/**
* Creates the object starting from an existent remote storage.
*
* @param iStorage
*/
public OServerAdmin(final OStorageRemote iStorage) {
storage = iStorage;
}
/**
* Connects to a remote server.
*
* @param iUserName
* Server's user name
* @param iUserPassword
* Server's password for the user name used
* @return The instance itself. Useful to execute method in chain
* @throws IOException
*/
public synchronized OServerAdmin connect(final String iUserName, final String iUserPassword) throws IOException {
storage.createConnectionPool();
storage.setSessionId(null, -1);
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_CONNECT);
storage.sendClientInfo(network);
try {
network.writeString(iUserName);
network.writeString(iUserPassword);
} finally {
storage.endRequest(network);
}
try {
storage.beginResponse(network);
sessionId = network.readInt();
storage.setSessionId(network.getServerURL(), sessionId);
} finally {
storage.endResponse(network);
}
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot connect to the remote server: " + storage.getName(), e, OStorageException.class);
storage.close(true);
}
return this;
}
/**
* List the databases on a remote server.
*
* @param iUserName
* Server's user name
* @param iUserPassword
* Server's password for the user name used
* @return The instance itself. Useful to execute method in chain
* @throws IOException
*/
@SuppressWarnings("unchecked")
public synchronized Map<String, String> listDatabases() throws IOException {
storage.checkConnection();
final ODocument result = new ODocument();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_LIST);
storage.endRequest(network);
try {
storage.beginResponse(network);
result.fromStream(network.readBytes());
} finally {
storage.endResponse(network);
}
} catch (Exception e) {
OLogManager.instance().exception("Cannot retrieve the configuration list", e, OStorageException.class);
storage.close(true);
}
return (Map<String, String>) result.field("databases");
}
public int getSessionId() {
return sessionId;
}
/**
* Deprecated. Use the {@link #createDatabase(String, String)} instead.
*/
@Deprecated
public synchronized OServerAdmin createDatabase(final String iStorageMode) throws IOException {
return createDatabase("document", iStorageMode);
}
/**
* Creates a database in a remote server.
*
* @param iDatabaseType
* 'document' or 'graph'
* @param iStorageMode
* local or memory
* @return The instance itself. Useful to execute method in chain
* @throws IOException
*/
public synchronized OServerAdmin createDatabase(final String iDatabaseType, String iStorageMode) throws IOException {
return createDatabase(storage.getName(), iDatabaseType, iStorageMode);
}
/**
* Creates a database in a remote server.
*
* @param iDatabaseName
* The database name
* @param iDatabaseType
* 'document' or 'graph'
* @param iStorageMode
* local or memory
* @return The instance itself. Useful to execute method in chain
* @throws IOException
*/
public synchronized OServerAdmin createDatabase(final String iDatabaseName, final String iDatabaseType, String iStorageMode)
throws IOException {
storage.checkConnection();
try {
if (iDatabaseName == null || iDatabaseName.length() <= 0) {
OLogManager.instance().error(this, "Cannot create unnamed remote storage. Check your syntax", OStorageException.class);
} else {
if (iStorageMode == null)
iStorageMode = "csv";
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_CREATE);
try {
network.writeString(iDatabaseName);
if (network.getSrvProtocolVersion() >= 8)
network.writeString(iDatabaseType);
network.writeString(iStorageMode);
} finally {
storage.endRequest(network);
}
storage.getResponse(network);
}
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot create the remote storage: " + storage.getName(), e, OStorageException.class);
storage.close(true);
}
return this;
}
/**
* Checks if a database exists in the remote server.
*
* @return true if exists, otherwise false
* @throws IOException
* @param storageType
*/
public synchronized boolean existsDatabase(String storageType) throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_EXIST);
try {
network.writeString(storage.getName());
network.writeString(storageType);
} finally {
storage.endRequest(network);
}
try {
storage.beginResponse(network);
return network.readByte() == 1;
} finally {
storage.endResponse(network);
}
} catch (Exception e) {
OLogManager.instance().exception("Error on checking existence of the remote storage: " + storage.getName(), e,
OStorageException.class);
storage.close(true);
}
return false;
}
/**
* Deprecated. Use dropDatabase() instead.
*
* @return The instance itself. Useful to execute method in chain
* @see #dropDatabase(String)
* @throws IOException
* @param storageType
* Type of storage of server database.
*/
@Deprecated
public OServerAdmin deleteDatabase(String storageType) throws IOException {
return dropDatabase(storageType);
}
/**
* Drops a database from a remote server instance.
*
* @return The instance itself. Useful to execute method in chain
* @throws IOException
* @param storageType
*/
public synchronized OServerAdmin dropDatabase(String storageType) throws IOException {
storage.checkConnection();
boolean retry = true;
while (retry)
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_DROP);
try {
network.writeString(storage.getName());
network.writeString(storageType);
} finally {
storage.endRequest(network);
}
storage.getResponse(network);
retry = false;
} catch (OModificationOperationProhibitedException oope) {
retry = handleDBFreeze();
} catch (Exception e) {
OLogManager.instance().exception("Cannot delete the remote storage: " + storage.getName(), e, OStorageException.class);
}
for (OStorage s : Orient.instance().getStorages()) {
if (s.getURL().startsWith(getURL())) {
s.removeResource(OSchema.class.getSimpleName());
s.removeResource(OIndexManager.class.getSimpleName());
s.removeResource(OSecurity.class.getSimpleName());
}
}
ODatabaseRecordThreadLocal.INSTANCE.set(null);
return this;
}
private boolean handleDBFreeze() {
boolean retry;
OLogManager.instance().warn(this,
"DB is frozen will wait for " + OGlobalConfiguration.CLIENT_DB_RELEASE_WAIT_TIMEOUT.getValue() + " ms. and then retry.");
retry = true;
try {
Thread.sleep(OGlobalConfiguration.CLIENT_DB_RELEASE_WAIT_TIMEOUT.getValueAsInteger());
} catch (InterruptedException ie) {
retry = false;
Thread.currentThread().interrupt();
}
return retry;
}
public synchronized OServerAdmin freezeDatabase(String storageType) throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_FREEZE);
try {
network.writeString(storage.getName());
network.writeString(storageType);
} finally {
storage.endRequest(network);
}
storage.getResponse(network);
} catch (Exception e) {
OLogManager.instance().exception("Cannot freeze the remote storage: " + storage.getName(), e, OStorageException.class);
}
return this;
}
public synchronized OServerAdmin releaseDatabase(String storageType) throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_RELEASE);
try {
network.writeString(storage.getName());
network.writeString(storageType);
} finally {
storage.endRequest(network);
}
storage.getResponse(network);
} catch (Exception e) {
OLogManager.instance().exception("Cannot release the remote storage: " + storage.getName(), e, OStorageException.class);
}
return this;
}
public synchronized OServerAdmin freezeCluster(int clusterId, String storageType) throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_FREEZE);
try {
network.writeString(storage.getName());
network.writeShort((short) clusterId);
network.writeString(storageType);
} finally {
storage.endRequest(network);
}
storage.getResponse(network);
} catch (IllegalArgumentException e) {
throw e;
} catch (Exception e) {
OLogManager.instance().exception("Cannot freeze the remote cluster " + clusterId + " on storage: " + storage.getName(), e,
OStorageException.class);
}
return this;
}
public synchronized OServerAdmin releaseCluster(int clusterId, String storageType) throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_RELEASE);
try {
network.writeString(storage.getName());
network.writeShort((short) clusterId);
network.writeString(storageType);
} finally {
storage.endRequest(network);
}
storage.getResponse(network);
} catch (IllegalArgumentException e) {
throw e;
} catch (Exception e) {
OLogManager.instance().exception("Cannot release the remote cluster " + clusterId + " on storage: " + storage.getName(), e,
OStorageException.class);
}
return this;
}
/**
* Gets the cluster status.
*
* @return the JSON containing the current cluster structure
*/
public ODocument clusterStatus() {
final ODocument response = sendRequest(OChannelBinaryProtocol.REQUEST_CLUSTER, new ODocument().field("operation", "status"),
"Cluster status");
OLogManager.instance().debug(this, "Cluster status %s", response.toJSON("prettyPrint"));
return response;
}
/**
* Copies a database to a remote server instance.
*
* @param iDatabaseName
* @param iDatabaseUserName
* @param iDatabaseUserPassword
* @param iRemoteName
* @param iRemoteEngine
* @return The instance itself. Useful to execute method in chain
* @throws IOException
*/
public synchronized OServerAdmin copyDatabase(final String iDatabaseName, final String iDatabaseUserName,
final String iDatabaseUserPassword, final String iRemoteName, final String iRemoteEngine) throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_COPY);
try {
network.writeString(iDatabaseName);
network.writeString(iDatabaseUserName);
network.writeString(iDatabaseUserPassword);
network.writeString(iRemoteName);
network.writeString(iRemoteEngine);
} finally {
storage.endRequest(network);
}
storage.getResponse(network);
OLogManager.instance().debug(this, "Database '%s' has been copied to the server '%s'", iDatabaseName, iRemoteName);
} catch (Exception e) {
OLogManager.instance().exception("Cannot copy the database: " + iDatabaseName, e, OStorageException.class);
}
return this;
}
public synchronized Map<String, String> getGlobalConfigurations() throws IOException {
storage.checkConnection();
final Map<String, String> config = new HashMap<String, String>();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_CONFIG_LIST);
storage.endRequest(network);
try {
storage.beginResponse(network);
final int num = network.readShort();
for (int i = 0; i < num; ++i)
config.put(network.readString(), network.readString());
} finally {
storage.endResponse(network);
}
} catch (Exception e) {
OLogManager.instance().exception("Cannot retrieve the configuration list", e, OStorageException.class);
storage.close(true);
}
return config;
}
public synchronized String getGlobalConfiguration(final OGlobalConfiguration iConfig) throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_CONFIG_GET);
network.writeString(iConfig.getKey());
try {
storage.beginResponse(network);
return network.readString();
} finally {
storage.endResponse(network);
}
} catch (Exception e) {
OLogManager.instance().exception("Cannot retrieve the configuration value: " + iConfig.getKey(), e, OStorageException.class);
storage.close(true);
}
return null;
}
public synchronized OServerAdmin setGlobalConfiguration(final OGlobalConfiguration iConfig, final Object iValue)
throws IOException {
storage.checkConnection();
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_CONFIG_SET);
network.writeString(iConfig.getKey());
network.writeString(iValue != null ? iValue.toString() : "");
storage.getResponse(network);
} catch (Exception e) {
OLogManager.instance().exception("Cannot set the configuration value: " + iConfig.getKey(), e, OStorageException.class);
storage.close(true);
}
return this;
}
/**
* Close the connection if open.
*/
public synchronized void close() {
storage.close();
}
public synchronized void close(boolean iForce) {
storage.close(iForce);
}
public synchronized String getURL() {
return storage != null ? storage.getURL() : null;
}
protected ODocument sendRequest(final byte iRequest, final ODocument iPayLoad, final String iActivity) {
boolean retry = true;
while (retry)
try {
final OChannelBinaryAsynchClient network = storage.beginRequest(iRequest);
try {
network.writeBytes(iPayLoad.toStream());
} finally {
storage.endRequest(network);
}
retry = false;
try {
storage.beginResponse(network);
return new ODocument(network.readBytes());
} finally {
storage.endResponse(network);
}
} catch (OModificationOperationProhibitedException ompe) {
retry = handleDBFreeze();
} catch (Exception e) {
OLogManager.instance().exception("Error on executing '%s'", e, OStorageException.class, iActivity);
}
return null;
}
public boolean isConnected() {
return storage != null && !storage.isClosed();
}
}
| 0true
|
client_src_main_java_com_orientechnologies_orient_client_remote_OServerAdmin.java
|
444 |
trackedSet.addChangeListener(new OMultiValueChangeListener<String, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<String, String> event) {
changed.value = true;
}
});
| 0true
|
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedSetTest.java
|
161 |
@Service("blStructuredContentService")
public class StructuredContentServiceImpl extends AbstractContentService implements StructuredContentService {
protected static final Log LOG = LogFactory.getLog(StructuredContentServiceImpl.class);
protected static String AND = " && ";
@Resource(name="blStructuredContentDao")
protected StructuredContentDao structuredContentDao;
@Resource(name="blSandBoxItemDao")
protected SandBoxItemDao sandBoxItemDao;
@Resource(name="blSandBoxDao")
protected SandBoxDao sandBoxDao;
@Resource(name="blStaticAssetService")
protected StaticAssetService staticAssetService;
@Resource(name="blLocaleService")
protected LocaleService localeService;
@Resource(name="blContentRuleProcessors")
protected List<StructuredContentRuleProcessor> contentRuleProcessors;
@Value("${automatically.approve.structured.content}")
protected boolean automaticallyApproveAndPromoteStructuredContent=true;
protected Cache structuredContentCache;
protected List<ArchivedStructuredContentPublisher> archivedStructuredContentListeners;
@Override
public StructuredContent findStructuredContentById(Long contentId) {
return structuredContentDao.findStructuredContentById(contentId);
}
@Override
public StructuredContentType findStructuredContentTypeById(Long id) {
return structuredContentDao.findStructuredContentTypeById(id);
}
@Override
public StructuredContentType findStructuredContentTypeByName(String name) {
return structuredContentDao.findStructuredContentTypeByName(name);
}
@Override
public List<StructuredContentType> retrieveAllStructuredContentTypes() {
return structuredContentDao.retrieveAllStructuredContentTypes();
}
@Override
public Map<String, StructuredContentField> findFieldsByContentId(Long contentId) {
StructuredContent sc = findStructuredContentById(contentId);
return structuredContentDao.readFieldsForStructuredContentItem(sc);
}
@Override
public List<StructuredContent> findContentItems(SandBox sandbox, Criteria c) {
return findItems(sandbox, c, StructuredContent.class, StructuredContentImpl.class, "originalItemId");
}
@Override
public List<StructuredContent> findAllContentItems() {
return structuredContentDao.findAllContentItems();
}
@Override
public Long countContentItems(SandBox sandbox, Criteria c) {
return countItems(sandbox, c, StructuredContentImpl.class, "originalItemId");
}
@Override
public StructuredContent addStructuredContent(StructuredContent content, SandBox destinationSandbox) {
if (automaticallyApproveAndPromoteStructuredContent) {
if (destinationSandbox != null && destinationSandbox.getSite() != null) {
destinationSandbox = destinationSandbox.getSite().getProductionSandbox();
} else {
// Null means production for single-site installations.
destinationSandbox = null;
}
}
content.setSandbox(destinationSandbox);
content.setArchivedFlag(false);
content.setDeletedFlag(false);
StructuredContent sc = structuredContentDao.addOrUpdateContentItem(content);
if (! isProductionSandBox(destinationSandbox)) {
sandBoxItemDao.addSandBoxItem(destinationSandbox.getId(), SandBoxOperationType.ADD, SandBoxItemType.STRUCTURED_CONTENT, sc.getContentName(), sc.getId(), null);
}
return sc;
}
@Override
public StructuredContent updateStructuredContent(StructuredContent content, SandBox destSandbox) {
if (content.getLockedFlag()) {
throw new IllegalArgumentException("Unable to update a locked record");
}
if (automaticallyApproveAndPromoteStructuredContent) {
if (destSandbox != null && destSandbox.getSite() != null) {
destSandbox = destSandbox.getSite().getProductionSandbox();
} else {
// Null means production for single-site installations.
destSandbox = null;
}
}
if (checkForSandboxMatch(content.getSandbox(), destSandbox)) {
if (content.getDeletedFlag()) {
SandBoxItem item = sandBoxItemDao.retrieveBySandboxAndTemporaryItemId(content.getSandbox()==null?null:content.getSandbox().getId(), SandBoxItemType.STRUCTURED_CONTENT, content.getId());
if (content.getOriginalItemId() == null && item != null) {
// This page was added in this sandbox and now needs to be deleted.
content.setArchivedFlag(true);
item.setArchivedFlag(true);
} else if (item != null) {
// This page was being updated but now is being deleted - so change the
// sandbox operation type to deleted
item.setSandBoxOperationType(SandBoxOperationType.DELETE);
sandBoxItemDao.updateSandBoxItem(item);
} else if (automaticallyApproveAndPromoteStructuredContent) {
content.setArchivedFlag(true);
}
}
return structuredContentDao.addOrUpdateContentItem(content);
} else if (checkForProductionSandbox(content.getSandbox())) {
// The passed in content is an existing content item whose values were updated
// Instead, we want to create a clone of this item for the destSandbox
// Create the clone
StructuredContent clonedContent = content.cloneEntity();
clonedContent.setOriginalItemId(content.getId());
clonedContent.setSandbox(destSandbox);
// Detach the old item so it doesn't get updated
structuredContentDao.detach(content);
// Update the new item
StructuredContent returnContent = structuredContentDao.addOrUpdateContentItem(clonedContent);
// Lookup the previous item so that we can update its locked status
StructuredContent prod = findStructuredContentById(content.getId());
prod.setLockedFlag(true);
prod = structuredContentDao.addOrUpdateContentItem(prod);
SandBoxOperationType type = SandBoxOperationType.UPDATE;
if (clonedContent.getDeletedFlag()) {
type = SandBoxOperationType.DELETE;
}
sandBoxItemDao.addSandBoxItem(destSandbox.getId(), type, SandBoxItemType.STRUCTURED_CONTENT, returnContent.getContentName(), returnContent.getId(), returnContent.getOriginalItemId());
return returnContent;
} else {
// This should happen via a promote, revert, or reject in the sandbox service
throw new IllegalArgumentException("Update called when promote or reject was expected.");
}
}
/**
* Saves the given <b>type</b> and returns the merged instance
*/
@Override
public StructuredContentType saveStructuredContentType(StructuredContentType type) {
return structuredContentDao.saveStructuredContentType(type);
}
protected boolean checkForSandboxMatch(SandBox src, SandBox dest) {
if (src != null) {
if (dest != null) {
return src.getId().equals(dest.getId());
}
}
return (src == null && dest == null);
}
protected boolean checkForProductionSandbox(SandBox dest) {
boolean productionSandbox = false;
if (dest == null) {
productionSandbox = true;
} else {
if (dest.getSite() != null && dest.getSite().getProductionSandbox() != null && dest.getSite().getProductionSandbox().getId() != null) {
productionSandbox = dest.getSite().getProductionSandbox().getId().equals(dest.getId());
}
}
return productionSandbox;
}
@Override
public void deleteStructuredContent(StructuredContent content, SandBox destinationSandbox) {
content.setDeletedFlag(true);
updateStructuredContent(content, destinationSandbox);
}
protected String buildRuleExpression(StructuredContent sc) {
StringBuffer ruleExpression = null;
Map<String, StructuredContentRule> ruleMap = sc.getStructuredContentMatchRules();
if (ruleMap != null) {
for (String ruleKey : ruleMap.keySet()) {
if (ruleMap.get(ruleKey).getMatchRule() == null) continue;
if (ruleExpression == null) {
ruleExpression = new StringBuffer(ruleMap.get(ruleKey).getMatchRule());
} else {
ruleExpression.append(AND);
ruleExpression.append(ruleMap.get(ruleKey).getMatchRule());
}
}
}
if (ruleExpression != null) {
return ruleExpression.toString();
} else {
return null;
}
}
protected List<ItemCriteriaDTO> buildItemCriteriaDTOList(StructuredContent sc) {
List<ItemCriteriaDTO> itemCriteriaDTOList = new ArrayList<ItemCriteriaDTO>();
for(StructuredContentItemCriteria criteria : sc.getQualifyingItemCriteria()) {
ItemCriteriaDTO criteriaDTO = new ItemCriteriaDTO();
criteriaDTO.setMatchRule(criteria.getMatchRule());
criteriaDTO.setQty(criteria.getQuantity());
itemCriteriaDTOList.add(criteriaDTO);
}
return itemCriteriaDTOList;
}
protected void buildFieldValues(StructuredContent sc, StructuredContentDTO scDTO, boolean secure) {
String envPrefix = staticAssetService.getStaticAssetEnvironmentUrlPrefix();
if (envPrefix != null && secure) {
envPrefix = staticAssetService.getStaticAssetEnvironmentSecureUrlPrefix();
}
String cmsPrefix = staticAssetService.getStaticAssetUrlPrefix();
for (String fieldKey : sc.getStructuredContentFields().keySet()) {
StructuredContentField scf = sc.getStructuredContentFields().get(fieldKey);
String originalValue = scf.getValue();
if (StringUtils.isNotBlank(envPrefix) && StringUtils.isNotBlank(originalValue) && StringUtils.isNotBlank(cmsPrefix) && originalValue.contains(cmsPrefix)) {
if (originalValue.startsWith("/")) {
originalValue = originalValue.substring(1);
}
String fldValue = originalValue.replaceAll(cmsPrefix, envPrefix+cmsPrefix);
scDTO.getValues().put(fieldKey, fldValue);
} else {
scDTO.getValues().put(fieldKey, originalValue);
}
}
}
/**
* Converts a list of structured content items to a list of structured content DTOs.<br>
* Internally calls buildStructuredContentDTO(...).
*
* @param structuredContentList
* @param secure
* @return
*/
protected List<StructuredContentDTO> buildStructuredContentDTOList(List<StructuredContent> structuredContentList, boolean secure) {
List<StructuredContentDTO> dtoList = new ArrayList<StructuredContentDTO>();
if (structuredContentList != null) {
for(StructuredContent sc : structuredContentList) {
dtoList.add(buildStructuredContentDTO(sc, secure));
}
}
return dtoList;
}
/**
* Converts a StructuredContent into a StructuredContentDTO. If the item contains fields with
* broadleaf cms urls, the urls are converted to utilize the domain
* @param sc
* @param secure
* @return
*/
protected StructuredContentDTO buildStructuredContentDTO(StructuredContent sc, boolean secure) {
StructuredContentDTO scDTO = new StructuredContentDTO();
scDTO.setContentName(sc.getContentName());
scDTO.setContentType(sc.getStructuredContentType().getName());
scDTO.setId(sc.getId());
scDTO.setPriority(sc.getPriority());
if (sc.getLocale() != null) {
scDTO.setLocaleCode(sc.getLocale().getLocaleCode());
}
if (sc.getSandbox() != null) {
scDTO.setSandboxId(sc.getSandbox().getId());
}
scDTO.setRuleExpression(buildRuleExpression(sc));
buildFieldValues(sc, scDTO, secure);
if (sc.getQualifyingItemCriteria() != null && sc.getQualifyingItemCriteria().size() > 0) {
scDTO.setItemCriteriaDTOList(buildItemCriteriaDTOList(sc));
}
return scDTO;
}
protected List<StructuredContentDTO> mergeContent(List<StructuredContentDTO> productionList, List<StructuredContent> sandboxList, boolean secure) {
if (sandboxList == null || sandboxList.size() == 0) {
return productionList;
}
Map<Long,StructuredContentDTO> scMap = new LinkedHashMap<Long,StructuredContentDTO>();
if (productionList != null) {
for(StructuredContentDTO sc : productionList) {
scMap.put(sc.getId(), sc);
}
}
for(StructuredContent sc : sandboxList) {
if (sc.getOriginalItemId() != null) {
scMap.remove(sc.getOriginalItemId());
}
if (! sc.getDeletedFlag() && ! sc.getOfflineFlag()) {
StructuredContentDTO convertedItem = buildStructuredContentDTO(sc, secure);
scMap.put(sc.getId(), convertedItem);
}
}
ArrayList<StructuredContentDTO> returnList = new ArrayList<StructuredContentDTO>(scMap.values());
if (returnList.size() > 1) {
Collections.sort(returnList, new BeanComparator("priority"));
}
return returnList;
}
protected List<StructuredContentDTO> evaluateAndPriortizeContent(List<StructuredContentDTO> structuredContentList, int count, Map<String, Object> ruleDTOs) {
// some optimization for single item lists which don't require prioritization
if (structuredContentList.size() == 1) {
if (processContentRules(structuredContentList.get(0), ruleDTOs)) {
return structuredContentList;
} else {
return new ArrayList<StructuredContentDTO>();
}
}
Iterator<StructuredContentDTO> structuredContentIterator = structuredContentList.iterator();
List<StructuredContentDTO> returnList = new ArrayList<StructuredContentDTO>();
List<StructuredContentDTO> tmpList = new ArrayList<StructuredContentDTO>();
Integer lastPriority = Integer.MIN_VALUE;
while (structuredContentIterator.hasNext()) {
StructuredContentDTO sc = structuredContentIterator.next();
if (! lastPriority.equals(sc.getPriority())) {
// If we've moved to another priority, then shuffle all of the items
// with the previous priority and add them to the return list.
if (tmpList.size() > 1) {
Collections.shuffle(tmpList);
}
returnList.addAll(tmpList);
tmpList.clear();
// If we've added enough items to satisfy the count, then return the
// list.
if (returnList.size() == count) {
return returnList;
} else if (returnList.size() > count) {
return returnList.subList(0, count);
} else {
if (processContentRules(sc, ruleDTOs)) {
tmpList.add(sc);
}
}
} else {
if (processContentRules(sc, ruleDTOs)) {
tmpList.add(sc);
}
}
lastPriority = sc.getPriority();
}
if (tmpList.size() > 1) {
Collections.shuffle(tmpList);
}
returnList.addAll(tmpList);
if (returnList.size() > count) {
return returnList.subList(0, count);
}
return returnList;
}
protected boolean processContentRules(StructuredContentDTO sc, Map<String, Object> ruleDTOs) {
if (contentRuleProcessors != null) {
for (StructuredContentRuleProcessor processor : contentRuleProcessors) {
boolean matchFound = processor.checkForMatch(sc, ruleDTOs);
if (! matchFound) {
return false;
}
}
}
return true;
}
@Override
public List<StructuredContentDTO> lookupStructuredContentItemsByType(SandBox sandBox, StructuredContentType contentType, Locale locale, Integer count, Map<String, Object> ruleDTOs, boolean secure) {
List<StructuredContent> sandBoxContentList = null;
Locale languageOnlyLocale = findLanguageOnlyLocale(locale);
String cacheKey = buildTypeKey(getProductionSandBox(sandBox), locale, contentType.getName());
cacheKey = cacheKey+"-"+secure;
List<StructuredContentDTO> productionContentDTOList = getStructuredContentListFromCache(cacheKey);
if (productionContentDTOList == null) {
List<StructuredContent> productionContentList = structuredContentDao.findActiveStructuredContentByType(getProductionSandBox(sandBox), contentType, locale, languageOnlyLocale);
productionContentDTOList = buildStructuredContentDTOList(productionContentList, secure);
if (productionContentDTOList != null) {
addStructuredContentListToCache(cacheKey, productionContentDTOList);
}
}
final List<StructuredContentDTO> contentList;
if (! isProductionSandBox(sandBox)) {
sandBoxContentList = structuredContentDao.findActiveStructuredContentByType(sandBox, contentType, locale, languageOnlyLocale);
contentList = mergeContent(productionContentDTOList, sandBoxContentList, secure);
} else {
contentList = productionContentDTOList;
}
return evaluateAndPriortizeContent(contentList, count, ruleDTOs);
}
@Override
public List<StructuredContentDTO> lookupStructuredContentItemsByName(SandBox sandBox, StructuredContentType contentType, String contentName, org.broadleafcommerce.common.locale.domain.Locale locale, Integer count, Map<String, Object> ruleDTOs, boolean secure) {
List<StructuredContent> sandBoxContentList = null;
Locale languageOnlyLocale = findLanguageOnlyLocale(locale);
String cacheKey = buildNameKey(getProductionSandBox(sandBox), locale, contentType.getName(), contentName);
cacheKey = cacheKey+"-"+secure;
List<StructuredContentDTO> productionContentDTOList = getStructuredContentListFromCache(cacheKey);
if (productionContentDTOList == null) {
List<StructuredContent> productionContentList = structuredContentDao.findActiveStructuredContentByNameAndType(getProductionSandBox(sandBox), contentType, contentName, locale, languageOnlyLocale);
productionContentDTOList = buildStructuredContentDTOList(productionContentList, secure);
if (productionContentDTOList != null) {
addStructuredContentListToCache(cacheKey, productionContentDTOList);
}
}
final List<StructuredContentDTO> contentList;
if (! isProductionSandBox(sandBox)) {
sandBoxContentList = structuredContentDao.findActiveStructuredContentByNameAndType(sandBox, contentType, contentName, locale, languageOnlyLocale);
contentList = mergeContent(productionContentDTOList, sandBoxContentList, secure);
} else {
contentList = productionContentDTOList;
}
return evaluateAndPriortizeContent(contentList, count, ruleDTOs);
}
@Override
public List<StructuredContentDTO> lookupStructuredContentItemsByName(SandBox sandBox, String contentName, org.broadleafcommerce.common.locale.domain.Locale locale, Integer count, Map<String, Object> ruleDTOs, boolean secure) {
List<StructuredContent> sandBoxContentList = null;
Locale languageOnlyLocale = findLanguageOnlyLocale(locale);
String cacheKey = buildNameKey(getProductionSandBox(sandBox), locale, "any", contentName);
cacheKey = cacheKey+"-"+secure;
List<StructuredContentDTO> productionContentDTOList = getStructuredContentListFromCache(cacheKey);
if (productionContentDTOList == null) {
List<StructuredContent> productionContentList = structuredContentDao.findActiveStructuredContentByName(getProductionSandBox(sandBox), contentName, locale, languageOnlyLocale);
productionContentDTOList = buildStructuredContentDTOList(productionContentList, secure);
if (productionContentDTOList != null) {
addStructuredContentListToCache(cacheKey, productionContentDTOList);
}
}
final List<StructuredContentDTO> contentList;
if (! isProductionSandBox(sandBox)) {
sandBoxContentList = structuredContentDao.findActiveStructuredContentByName(sandBox, contentName, locale, languageOnlyLocale);
contentList = mergeContent(productionContentDTOList, sandBoxContentList, secure);
} else {
contentList = productionContentDTOList;
}
return evaluateAndPriortizeContent(contentList, count, ruleDTOs);
}
protected SandBox getProductionSandBox(SandBox currentSandBox) {
SandBox productionSandBox = null;
if (currentSandBox == null || SandBoxType.PRODUCTION.equals(currentSandBox.getSandBoxType())) {
productionSandBox = currentSandBox;
} else if (currentSandBox.getSite() != null) {
productionSandBox = currentSandBox.getSite().getProductionSandbox();
}
return productionSandBox;
}
protected boolean isProductionSandBox(SandBox dest) {
if (dest == null) {
return true;
} else {
return SandBoxType.PRODUCTION.equals(dest.getSandBoxType());
}
}
protected void productionItemArchived(StructuredContent sc) {
// Immediately remove the content from this VM.
removeStructuredContentFromCache(sc);
if (archivedStructuredContentListeners != null) {
for (ArchivedStructuredContentPublisher listener : archivedStructuredContentListeners) {
listener.processStructuredContentArchive(sc, buildTypeKey(sc), buildNameKey(sc));
}
}
}
@Override
public void itemPromoted(SandBoxItem sandBoxItem, SandBox destinationSandBox) {
if (! SandBoxItemType.STRUCTURED_CONTENT.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StructuredContent sc = structuredContentDao.findStructuredContentById(sandBoxItem.getTemporaryItemId());
if (sc == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Structured Content Item not found " + sandBoxItem.getTemporaryItemId());
}
} else {
boolean productionSandBox = isProductionSandBox(destinationSandBox);
if (productionSandBox) {
sc.setLockedFlag(false);
} else {
sc.setLockedFlag(true);
}
if (productionSandBox && sc.getOriginalItemId() != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Structured content promoted to production. " + sc.getId() + ". Archiving original item " + sc.getOriginalItemId());
}
StructuredContent originalSC = structuredContentDao.findStructuredContentById(sc.getOriginalItemId());
originalSC.setArchivedFlag(Boolean.TRUE);
structuredContentDao.addOrUpdateContentItem(originalSC);
productionItemArchived(originalSC);
if (sc.getDeletedFlag()) {
// if this deleted content is being pushed to production, set it as archived.
sc.setArchivedFlag(true);
}
// We are archiving the old content and making this the new "production page content", so
// null out the original page id before saving.
sc.setOriginalItemId(null);
}
}
if (sc.getOriginalSandBox() == null) {
sc.setOriginalSandBox(sc.getSandbox());
}
sc.setSandbox(destinationSandBox);
structuredContentDao.addOrUpdateContentItem(sc);
}
@Override
public void itemRejected(SandBoxItem sandBoxItem, SandBox destinationSandBox) {
if (! SandBoxItemType.STRUCTURED_CONTENT.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StructuredContent sc = structuredContentDao.findStructuredContentById(sandBoxItem.getTemporaryItemId());
if (sc != null) {
sc.setSandbox(destinationSandBox);
sc.setOriginalSandBox(null);
sc.setLockedFlag(false);
structuredContentDao.addOrUpdateContentItem(sc);
}
}
@Override
public void itemReverted(SandBoxItem sandBoxItem) {
if (! SandBoxItemType.STRUCTURED_CONTENT.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StructuredContent sc = structuredContentDao.findStructuredContentById(sandBoxItem.getTemporaryItemId());
if (sc != null) {
if (sandBoxItem.getOriginalItemId() != null) {
sc.setArchivedFlag(Boolean.TRUE);
sc.setLockedFlag(Boolean.FALSE);
structuredContentDao.addOrUpdateContentItem(sc);
StructuredContent originalSc = structuredContentDao.findStructuredContentById(sandBoxItem.getOriginalItemId());
originalSc.setLockedFlag(false);
structuredContentDao.addOrUpdateContentItem(originalSc);
}
}
}
public List<StructuredContentRuleProcessor> getContentRuleProcessors() {
return contentRuleProcessors;
}
public void setContentRuleProcessors(List<StructuredContentRuleProcessor> contentRuleProcessors) {
this.contentRuleProcessors = contentRuleProcessors;
}
protected Cache getStructuredContentCache() {
if (structuredContentCache == null) {
structuredContentCache = CacheManager.getInstance().getCache("cmsStructuredContentCache");
}
return structuredContentCache;
}
protected String buildNameKey(StructuredContent sc) {
return buildNameKey(sc.getSandbox(), sc.getLocale(), sc.getStructuredContentType().getName(), sc.getContentName());
}
protected String buildTypeKey(StructuredContent sc) {
return buildTypeKey(sc.getSandbox(), sc.getLocale(), sc.getStructuredContentType().getName());
}
protected String buildNameKey(SandBox currentSandbox, Locale locale, String contentType, String contentName) {
StringBuffer key = new StringBuffer(contentType).append("-").append(contentName);
if (locale != null) {
key.append("-").append(locale.getLocaleCode());
}
if (currentSandbox != null) {
key.append("-").append(currentSandbox.getId());
}
return key.toString();
}
protected String buildTypeKey(SandBox currentSandbox, Locale locale, String contentType) {
StringBuffer key = new StringBuffer(contentType);
if (locale != null) {
key.append("-").append(locale.getLocaleCode());
}
if (currentSandbox != null) {
key.append("-").append(currentSandbox.getId());
}
return key.toString();
}
protected void addStructuredContentListToCache(String key, List<StructuredContentDTO> scDTOList) {
getStructuredContentCache().put(new Element(key, scDTOList));
}
protected List<StructuredContentDTO> getStructuredContentListFromCache(String key) {
Element scElement = getStructuredContentCache().get(key);
if (scElement != null) {
return (List<StructuredContentDTO>) scElement.getValue();
}
return null;
}
/**
* Call to evict an item from the cache.
* @param sc
*/
public void removeStructuredContentFromCache(StructuredContent sc) {
// Remove secure and non-secure instances of the page.
// Typically the page will be in one or the other if at all.
removeItemFromCache(buildNameKey(sc), buildTypeKey(sc));
}
/**
* Call to evict both secure and non-secure SC items matching
* the passed in key.
*
* @param nameKey
*/
@Override
public void removeItemFromCache(String nameKey, String typeKey) {
// Remove secure and non-secure instances of the structured content.
// Typically the structured content will be in one or the other if at all.
getStructuredContentCache().remove(nameKey+"-"+true);
getStructuredContentCache().remove(nameKey+"-"+false);
getStructuredContentCache().remove(typeKey+"-"+true);
getStructuredContentCache().remove(typeKey+"-"+false);
}
public List<ArchivedStructuredContentPublisher> getArchivedStructuredContentListeners() {
return archivedStructuredContentListeners;
}
public void setArchivedStructuredContentListeners(List<ArchivedStructuredContentPublisher> archivedStructuredContentListeners) {
this.archivedStructuredContentListeners = archivedStructuredContentListeners;
}
@Override
public boolean isAutomaticallyApproveAndPromoteStructuredContent() {
return automaticallyApproveAndPromoteStructuredContent;
}
@Override
public void setAutomaticallyApproveAndPromoteStructuredContent(boolean automaticallyApproveAndPromoteStructuredContent) {
this.automaticallyApproveAndPromoteStructuredContent = automaticallyApproveAndPromoteStructuredContent;
}
protected Locale findLanguageOnlyLocale(Locale locale) {
if (locale != null ) {
Locale languageOnlyLocale = localeService.findLocaleByCode(LocaleUtil.findLanguageCode(locale));
if (languageOnlyLocale != null) {
return languageOnlyLocale;
}
}
return locale;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_service_StructuredContentServiceImpl.java
|
4,458 |
public class IndexingMemoryController extends AbstractLifecycleComponent<IndexingMemoryController> {
private final ThreadPool threadPool;
private final IndicesService indicesService;
private final ByteSizeValue indexingBuffer;
private final ByteSizeValue minShardIndexBufferSize;
private final ByteSizeValue maxShardIndexBufferSize;
private final ByteSizeValue translogBuffer;
private final ByteSizeValue minShardTranslogBufferSize;
private final ByteSizeValue maxShardTranslogBufferSize;
private final TimeValue inactiveTime;
private final TimeValue interval;
private final AtomicBoolean shardsCreatedOrDeleted = new AtomicBoolean();
private final Listener listener = new Listener();
private final Map<ShardId, ShardIndexingStatus> shardsIndicesStatus = Maps.newHashMap();
private volatile ScheduledFuture scheduler;
private final Object mutex = new Object();
@Inject
public IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) {
super(settings);
this.threadPool = threadPool;
this.indicesService = indicesService;
ByteSizeValue indexingBuffer;
String indexingBufferSetting = componentSettings.get("index_buffer_size", "10%");
if (indexingBufferSetting.endsWith("%")) {
double percent = Double.parseDouble(indexingBufferSetting.substring(0, indexingBufferSetting.length() - 1));
indexingBuffer = new ByteSizeValue((long) (((double) JvmInfo.jvmInfo().mem().heapMax().bytes()) * (percent / 100)));
ByteSizeValue minIndexingBuffer = componentSettings.getAsBytesSize("min_index_buffer_size", new ByteSizeValue(48, ByteSizeUnit.MB));
ByteSizeValue maxIndexingBuffer = componentSettings.getAsBytesSize("max_index_buffer_size", null);
if (indexingBuffer.bytes() < minIndexingBuffer.bytes()) {
indexingBuffer = minIndexingBuffer;
}
if (maxIndexingBuffer != null && indexingBuffer.bytes() > maxIndexingBuffer.bytes()) {
indexingBuffer = maxIndexingBuffer;
}
} else {
indexingBuffer = ByteSizeValue.parseBytesSizeValue(indexingBufferSetting, null);
}
this.indexingBuffer = indexingBuffer;
this.minShardIndexBufferSize = componentSettings.getAsBytesSize("min_shard_index_buffer_size", new ByteSizeValue(4, ByteSizeUnit.MB));
// LUCENE MONITOR: Based on this thread, currently (based on Mike), having a large buffer does not make a lot of sense: https://issues.apache.org/jira/browse/LUCENE-2324?focusedCommentId=13005155&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13005155
this.maxShardIndexBufferSize = componentSettings.getAsBytesSize("max_shard_index_buffer_size", new ByteSizeValue(512, ByteSizeUnit.MB));
ByteSizeValue translogBuffer;
String translogBufferSetting = componentSettings.get("translog_buffer_size", "1%");
if (translogBufferSetting.endsWith("%")) {
double percent = Double.parseDouble(translogBufferSetting.substring(0, translogBufferSetting.length() - 1));
translogBuffer = new ByteSizeValue((long) (((double) JvmInfo.jvmInfo().mem().heapMax().bytes()) * (percent / 100)));
ByteSizeValue minTranslogBuffer = componentSettings.getAsBytesSize("min_translog_buffer_size", new ByteSizeValue(256, ByteSizeUnit.KB));
ByteSizeValue maxTranslogBuffer = componentSettings.getAsBytesSize("max_translog_buffer_size", null);
if (translogBuffer.bytes() < minTranslogBuffer.bytes()) {
translogBuffer = minTranslogBuffer;
}
if (maxTranslogBuffer != null && translogBuffer.bytes() > maxTranslogBuffer.bytes()) {
translogBuffer = maxTranslogBuffer;
}
} else {
translogBuffer = ByteSizeValue.parseBytesSizeValue(translogBufferSetting, null);
}
this.translogBuffer = translogBuffer;
this.minShardTranslogBufferSize = componentSettings.getAsBytesSize("min_shard_translog_buffer_size", new ByteSizeValue(2, ByteSizeUnit.KB));
this.maxShardTranslogBufferSize = componentSettings.getAsBytesSize("max_shard_translog_buffer_size", new ByteSizeValue(64, ByteSizeUnit.KB));
this.inactiveTime = componentSettings.getAsTime("shard_inactive_time", TimeValue.timeValueMinutes(30));
// we need to have this relatively small to move a shard from inactive to active fast (enough)
this.interval = componentSettings.getAsTime("interval", TimeValue.timeValueSeconds(30));
logger.debug("using index_buffer_size [{}], with min_shard_index_buffer_size [{}], max_shard_index_buffer_size [{}], shard_inactive_time [{}]", this.indexingBuffer, this.minShardIndexBufferSize, this.maxShardIndexBufferSize, this.inactiveTime);
}
@Override
protected void doStart() throws ElasticsearchException {
indicesService.indicesLifecycle().addListener(listener);
// its fine to run it on the scheduler thread, no busy work
this.scheduler = threadPool.scheduleWithFixedDelay(new ShardsIndicesStatusChecker(), interval);
}
@Override
protected void doStop() throws ElasticsearchException {
indicesService.indicesLifecycle().removeListener(listener);
if (scheduler != null) {
scheduler.cancel(false);
scheduler = null;
}
}
@Override
protected void doClose() throws ElasticsearchException {
}
class ShardsIndicesStatusChecker implements Runnable {
@Override
public void run() {
synchronized (mutex) {
boolean activeInactiveStatusChanges = false;
List<IndexShard> activeToInactiveIndexingShards = Lists.newArrayList();
List<IndexShard> inactiveToActiveIndexingShards = Lists.newArrayList();
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {
long time = threadPool.estimatedTimeInMillis();
Translog translog = ((InternalIndexShard) indexShard).translog();
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
if (status == null) { // not added yet
continue;
}
// check if it is deemed to be inactive (sam translogId and numberOfOperations over a long period of time)
if (status.translogId == translog.currentId() && translog.estimatedNumberOfOperations() == 0) {
if (status.time == -1) { // first time
status.time = time;
}
// inactive?
if (!status.inactiveIndexing) {
// mark it as inactive only if enough time has passed and there are no ongoing merges going on...
if ((time - status.time) > inactiveTime.millis() && indexShard.mergeStats().getCurrent() == 0) {
// inactive for this amount of time, mark it
activeToInactiveIndexingShards.add(indexShard);
status.inactiveIndexing = true;
activeInactiveStatusChanges = true;
logger.debug("marking shard [{}][{}] as inactive (inactive_time[{}]) indexing wise, setting size to [{}]", indexShard.shardId().index().name(), indexShard.shardId().id(), inactiveTime, Engine.INACTIVE_SHARD_INDEXING_BUFFER);
}
}
} else {
if (status.inactiveIndexing) {
inactiveToActiveIndexingShards.add(indexShard);
status.inactiveIndexing = false;
activeInactiveStatusChanges = true;
logger.debug("marking shard [{}][{}] as active indexing wise", indexShard.shardId().index().name(), indexShard.shardId().id());
}
status.time = -1;
}
status.translogId = translog.currentId();
status.translogNumberOfOperations = translog.estimatedNumberOfOperations();
}
}
for (IndexShard indexShard : activeToInactiveIndexingShards) {
// update inactive indexing buffer size
try {
((InternalIndexShard) indexShard).engine().updateIndexingBufferSize(Engine.INACTIVE_SHARD_INDEXING_BUFFER);
((InternalIndexShard) indexShard).translog().updateBuffer(Translog.INACTIVE_SHARD_TRANSLOG_BUFFER);
} catch (EngineClosedException e) {
// ignore
} catch (FlushNotAllowedEngineException e) {
// ignore
}
}
boolean shardsCreatedOrDeleted = IndexingMemoryController.this.shardsCreatedOrDeleted.compareAndSet(true, false);
if (shardsCreatedOrDeleted || activeInactiveStatusChanges) {
calcAndSetShardBuffers("active/inactive[" + activeInactiveStatusChanges + "] created/deleted[" + shardsCreatedOrDeleted + "]");
}
}
}
}
class Listener extends IndicesLifecycle.Listener {
@Override
public void afterIndexShardCreated(IndexShard indexShard) {
synchronized (mutex) {
shardsIndicesStatus.put(indexShard.shardId(), new ShardIndexingStatus());
shardsCreatedOrDeleted.set(true);
}
}
@Override
public void afterIndexShardClosed(ShardId shardId) {
synchronized (mutex) {
shardsIndicesStatus.remove(shardId);
shardsCreatedOrDeleted.set(true);
}
}
}
private void calcAndSetShardBuffers(String reason) {
int shardsCount = countShards();
if (shardsCount == 0) {
return;
}
ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / shardsCount);
if (shardIndexingBufferSize.bytes() < minShardIndexBufferSize.bytes()) {
shardIndexingBufferSize = minShardIndexBufferSize;
}
if (shardIndexingBufferSize.bytes() > maxShardIndexBufferSize.bytes()) {
shardIndexingBufferSize = maxShardIndexBufferSize;
}
ByteSizeValue shardTranslogBufferSize = new ByteSizeValue(translogBuffer.bytes() / shardsCount);
if (shardTranslogBufferSize.bytes() < minShardTranslogBufferSize.bytes()) {
shardTranslogBufferSize = minShardTranslogBufferSize;
}
if (shardTranslogBufferSize.bytes() > maxShardTranslogBufferSize.bytes()) {
shardTranslogBufferSize = maxShardTranslogBufferSize;
}
logger.debug("recalculating shard indexing buffer (reason={}), total is [{}] with [{}] active shards, each shard set to indexing=[{}], translog=[{}]", reason, indexingBuffer, shardsCount, shardIndexingBufferSize, shardTranslogBufferSize);
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
if (status == null || !status.inactiveIndexing) {
try {
((InternalIndexShard) indexShard).engine().updateIndexingBufferSize(shardIndexingBufferSize);
((InternalIndexShard) indexShard).translog().updateBuffer(shardTranslogBufferSize);
} catch (EngineClosedException e) {
// ignore
continue;
} catch (FlushNotAllowedEngineException e) {
// ignore
continue;
} catch (Exception e) {
logger.warn("failed to set shard [{}][{}] index buffer to [{}]", indexShard.shardId().index().name(), indexShard.shardId().id(), shardIndexingBufferSize);
}
}
}
}
}
private int countShards() {
int shardsCount = 0;
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
if (status == null || !status.inactiveIndexing) {
shardsCount++;
}
}
}
return shardsCount;
}
static class ShardIndexingStatus {
long translogId = -1;
int translogNumberOfOperations = -1;
boolean inactiveIndexing = false;
long time = -1; // contains the first time we saw this shard with no operations done on it
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_memory_IndexingMemoryController.java
|
1,698 |
public static class Helper {
public static boolean bytesEqual(BytesReference a, BytesReference b) {
if (a == b) {
return true;
}
if (a.length() != b.length()) {
return false;
}
if (!a.hasArray()) {
a = a.toBytesArray();
}
if (!b.hasArray()) {
b = b.toBytesArray();
}
int bUpTo = b.arrayOffset();
final byte[] aArray = a.array();
final byte[] bArray = b.array();
final int end = a.arrayOffset() + a.length();
for (int aUpTo = a.arrayOffset(); aUpTo < end; aUpTo++, bUpTo++) {
if (aArray[aUpTo] != bArray[bUpTo]) {
return false;
}
}
return true;
}
public static int bytesHashCode(BytesReference a) {
if (!a.hasArray()) {
a = a.toBytesArray();
}
int result = 0;
final int end = a.arrayOffset() + a.length();
for (int i = a.arrayOffset(); i < end; i++) {
result = 31 * result + a.array()[i];
}
return result;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_bytes_BytesReference.java
|
1,322 |
new SingleSourceUnitPackage(pkg, sourceUnitFullPath), moduleManager, CeylonBuilder.getProjectTypeChecker(project), tokens, originalProject) {
@Override
protected boolean reuseExistingDescriptorModels() {
return true;
}
};
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_model_JDTModule.java
|
1,083 |
public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<UpdateRequest, UpdateResponse, UpdateRequestBuilder> {
public UpdateRequestBuilder(Client client) {
super((InternalClient) client, new UpdateRequest());
}
public UpdateRequestBuilder(Client client, String index, String type, String id) {
super((InternalClient) client, new UpdateRequest(index, type, id));
}
/**
* Sets the type of the indexed document.
*/
public UpdateRequestBuilder setType(String type) {
request.type(type);
return this;
}
/**
* Sets the id of the indexed document.
*/
public UpdateRequestBuilder setId(String id) {
request.id(id);
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
public UpdateRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
public UpdateRequestBuilder setParent(String parent) {
request.parent(parent);
return this;
}
/**
* The script to execute. Note, make sure not to send different script each times and instead
* use script params if possible with the same (automatically compiled) script.
*/
public UpdateRequestBuilder setScript(String script) {
request.script(script);
return this;
}
/**
* The language of the script to execute.
*/
public UpdateRequestBuilder setScriptLang(String scriptLang) {
request.scriptLang(scriptLang);
return this;
}
/**
* Sets the script parameters to use with the script.
*/
public UpdateRequestBuilder setScriptParams(Map<String, Object> scriptParams) {
request.scriptParams(scriptParams);
return this;
}
/**
* Add a script parameter.
*/
public UpdateRequestBuilder addScriptParam(String name, Object value) {
request.addScriptParam(name, value);
return this;
}
/**
* Explicitly specify the fields that will be returned. By default, nothing is returned.
*/
public UpdateRequestBuilder setFields(String... fields) {
request.fields(fields);
return this;
}
/**
* Sets the number of retries of a version conflict occurs because the document was updated between
* getting it and updating it. Defaults to 0.
*/
public UpdateRequestBuilder setRetryOnConflict(int retryOnConflict) {
request.retryOnConflict(retryOnConflict);
return this;
}
/**
* Sets the version, which will cause the index operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public UpdateRequestBuilder setVersion(long version) {
request.version(version);
return this;
}
/**
* Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
*/
public UpdateRequestBuilder setVersionType(VersionType versionType) {
request.versionType(versionType);
return this;
}
/**
* Should a refresh be executed post this update operation causing the operation to
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public UpdateRequestBuilder setRefresh(boolean refresh) {
request.refresh(refresh);
return this;
}
/**
* Sets the replication type.
*/
public UpdateRequestBuilder setReplicationType(ReplicationType replicationType) {
request.replicationType(replicationType);
return this;
}
/**
* Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
*/
public UpdateRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
request.consistencyLevel(consistencyLevel);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(IndexRequest indexRequest) {
request.doc(indexRequest);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(XContentBuilder source) {
request.doc(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(Map source) {
request.doc(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(Map source, XContentType contentType) {
request.doc(source, contentType);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(String source) {
request.doc(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(byte[] source) {
request.doc(source);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(byte[] source, int offset, int length) {
request.doc(source, offset, length);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(String field, Object value) {
request.doc(field, value);
return this;
}
/**
* Sets the doc to use for updates when a script is not specified, the doc provided
* is a field and value pairs.
*/
public UpdateRequestBuilder setDoc(Object... source) {
request.doc(source);
return this;
}
/**
* Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException}
* is thrown.
*/
public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) {
request.upsert(indexRequest);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(XContentBuilder source) {
request.upsert(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(Map source) {
request.upsert(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(Map source, XContentType contentType) {
request.upsert(source, contentType);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(String source) {
request.upsert(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(byte[] source) {
request.upsert(source);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length) {
request.upsert(source, offset, length);
return this;
}
/**
* Sets the doc source of the update request to be used when the document does not exists. The doc
* includes field and value pairs.
*/
public UpdateRequestBuilder setUpsert(Object... source) {
request.upsert(source);
return this;
}
public UpdateRequestBuilder setSource(XContentBuilder source) throws Exception {
request.source(source);
return this;
}
public UpdateRequestBuilder setSource(byte[] source) throws Exception {
request.source(source);
return this;
}
public UpdateRequestBuilder setSource(byte[] source, int offset, int length) throws Exception {
request.source(source, offset, length);
return this;
}
public UpdateRequestBuilder setSource(BytesReference source) throws Exception {
request.source(source);
return this;
}
/**
* Sets whether the specified doc parameter should be used as upsert document.
*/
public UpdateRequestBuilder setDocAsUpsert(boolean shouldUpsertDoc) {
request.docAsUpsert(shouldUpsertDoc);
return this;
}
@Override
protected void doExecute(ActionListener<UpdateResponse> listener) {
((Client) client).update(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_update_UpdateRequestBuilder.java
|
12 |
public class TestBed {
static class A {
private int c = 0;
private final Object o;
A(final Object o) {
this.o = o;
}
public void inc() {
c++;
}
}
private static final void doSomethingExpensive(int milliseconds) {
double d=0.0;
Random r = new Random();
for (int i=0;i<10000*milliseconds;i++) d+=Math.pow(1.1,r.nextDouble());
}
/**
* @param args
* @throws java.io.IOException
*/
public static void main(String[] args) throws Exception {
IDHandler.EdgeTypeParse ep = IDHandler.readEdgeType(StaticArrayBuffer.of(new byte[]{36}).asReadBuffer());
System.out.println(ep.typeId + " "+ BaseLabel.VertexLabelEdge.getLongId());
WriteBuffer out = new WriteByteBuffer(20);
IDHandler.writeEdgeType(out, BaseKey.VertexExists.getLongId(),IDHandler.DirectionID.PROPERTY_DIR, BaseKey.VertexExists.isHiddenType());
StaticBuffer b = out.getStaticBuffer();
System.exit(0);
final ScheduledExecutorService exe = new ScheduledThreadPoolExecutor(1,new RejectedExecutionHandler() {
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
r.run();
}
});
ScheduledFuture future = exe.scheduleWithFixedDelay(new Runnable() {
AtomicInteger atomicInt = new AtomicInteger(0);
@Override
public void run() {
try {
for (int i=0;i<10;i++) {
exe.submit(new Runnable() {
private final int number = atomicInt.incrementAndGet();
@Override
public void run() {
try {
Thread.sleep(150);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println(number);
}
});
System.out.println("Submitted: "+i);
// doSomethingExpensive(20);
}
} catch (Exception e) {
e.printStackTrace();
}
}
},0,1, TimeUnit.SECONDS);
Thread.sleep(10000);
// future.get(1,TimeUnit.SECONDS);
System.out.println("Cancel: " + future.cancel(false));
System.out.println("Done: " + future.isDone());
exe.shutdown();
// Thread.sleep(2000);
System.out.println("Terminate: " + exe.awaitTermination(5,TimeUnit.SECONDS));
System.out.println("DONE");
NonBlockingHashMapLong<String> id1 = new NonBlockingHashMapLong<String>(128);
ConcurrentHashMap<Long,String> id2 = new ConcurrentHashMap<Long, String>(128,0.75f,2);
}
public static String toBinary(int b) {
String res = Integer.toBinaryString(b);
while (res.length() < 32) res = "0" + res;
return res;
}
private static void codeSnippets() throws Exception {
TitanGraph g = TitanFactory.open("/tmp/titan");
g.createKeyIndex("name", Vertex.class);
Vertex juno = g.addVertex(null);
juno.setProperty("name", "juno");
juno = g.getVertices("name", "juno").iterator().next();
TransactionalGraph tx = g.newTransaction();
Thread[] threads = new Thread[10];
for (int i = 0; i < threads.length; i++) {
//threads[i]=new Thread(new DoSomething(tx));
threads[i].start();
}
for (int i = 0; i < threads.length; i++) threads[i].join();
tx.commit();
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestBed.java
|
755 |
public class ListAddOperation extends CollectionAddOperation {
private int index = -1;
public ListAddOperation() {
}
public ListAddOperation(String name, int index, Data value) {
super(name, value);
this.index = index;
}
@Override
public void run() throws Exception {
final ListContainer container = getOrCreateListContainer();
response = false;
if (!hasEnoughCapacity(1)) {
return;
}
final CollectionItem item = container.add(index, value);
if (item != null) {
itemId = item.getItemId();
response = true;
}
}
@Override
public Operation getBackupOperation() {
return new CollectionAddBackupOperation(name, itemId, value);
}
@Override
public int getId() {
return CollectionDataSerializerHook.LIST_ADD;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeInt(index);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
index = in.readInt();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_list_ListAddOperation.java
|
4,459 |
class Listener extends IndicesLifecycle.Listener {
@Override
public void afterIndexShardCreated(IndexShard indexShard) {
synchronized (mutex) {
shardsIndicesStatus.put(indexShard.shardId(), new ShardIndexingStatus());
shardsCreatedOrDeleted.set(true);
}
}
@Override
public void afterIndexShardClosed(ShardId shardId) {
synchronized (mutex) {
shardsIndicesStatus.remove(shardId);
shardsCreatedOrDeleted.set(true);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_memory_IndexingMemoryController.java
|
3,134 |
public class QueueIterator<E> implements Iterator<E> {
private final Iterator<Data> iterator;
private final SerializationService serializationService;
private final boolean binary;
public QueueIterator(Iterator<Data> iterator, SerializationService serializationService, boolean binary) {
this.iterator = iterator;
this.serializationService = serializationService;
this.binary = binary;
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public E next() {
Data data = iterator.next();
if (binary) {
return (E) data;
}
return (E) serializationService.toObject(data);
}
@Override
public void remove() {
iterator.remove();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_queue_proxy_QueueIterator.java
|
323 |
public class OStorageMemoryClusterConfiguration extends OAbstractStorageClusterConfiguration {
public OStorageMemoryClusterConfiguration(final String name, final int id, final int iDataSegmentId) {
super(name, id, iDataSegmentId);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_config_OStorageMemoryClusterConfiguration.java
|
295 |
public enum ThreadingModel {
NONE((byte) 0),
OPERATION((byte) 1),
LISTENER((byte) 2),
OPERATION_LISTENER((byte) 3);
private byte id;
ThreadingModel(byte id) {
this.id = id;
}
public byte id() {
return this.id;
}
/**
* <tt>true</tt> if the actual operation the action represents will be executed
* on a different thread than the calling thread (assuming it will be executed
* on the same node).
*/
public boolean threadedOperation() {
return this == OPERATION || this == OPERATION_LISTENER;
}
/**
* <tt>true</tt> if the invocation of the action result listener will be executed
* on a different thread (than the calling thread or an "expensive" thread, like the
* IO thread).
*/
public boolean threadedListener() {
return this == LISTENER || this == OPERATION_LISTENER;
}
public ThreadingModel addListener() {
if (this == NONE) {
return LISTENER;
}
if (this == OPERATION) {
return OPERATION_LISTENER;
}
return this;
}
public ThreadingModel removeListener() {
if (this == LISTENER) {
return NONE;
}
if (this == OPERATION_LISTENER) {
return OPERATION;
}
return this;
}
public ThreadingModel addOperation() {
if (this == NONE) {
return OPERATION;
}
if (this == LISTENER) {
return OPERATION_LISTENER;
}
return this;
}
public ThreadingModel removeOperation() {
if (this == OPERATION) {
return NONE;
}
if (this == OPERATION_LISTENER) {
return LISTENER;
}
return this;
}
public static ThreadingModel fromId(byte id) {
if (id == 0) {
return NONE;
} else if (id == 1) {
return OPERATION;
} else if (id == 2) {
return LISTENER;
} else if (id == 3) {
return OPERATION_LISTENER;
} else {
throw new ElasticsearchIllegalArgumentException("No threading model for [" + id + "]");
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_ThreadingModel.java
|
1,393 |
public interface Custom {
interface Factory<T extends Custom> {
String type();
T readFrom(StreamInput in) throws IOException;
void writeTo(T customIndexMetaData, StreamOutput out) throws IOException;
T fromXContent(XContentParser parser) throws IOException;
void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException;
/**
* Returns true if this custom metadata should be persisted as part of global cluster state
*/
boolean isPersistent();
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_MetaData.java
|
92 |
@SuppressWarnings("serial")
static final class ReduceEntriesTask<K,V>
extends BulkTask<K,V,Map.Entry<K,V>> {
final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
Map.Entry<K,V> result;
ReduceEntriesTask<K,V> rights, nextRight;
ReduceEntriesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
ReduceEntriesTask<K,V> nextRight,
BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.reducer = reducer;
}
public final Map.Entry<K,V> getRawResult() { return result; }
public final void compute() {
final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
if ((reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new ReduceEntriesTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, reducer)).fork();
}
Map.Entry<K,V> r = null;
for (Node<K,V> p; (p = advance()) != null; )
r = (r == null) ? p : reducer.apply(r, p);
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") ReduceEntriesTask<K,V>
t = (ReduceEntriesTask<K,V>)c,
s = t.rights;
while (s != null) {
Map.Entry<K,V> tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
36 |
private static final class NullProposal
implements ICompletionProposal, ICompletionProposalExtension2 {
private List<ICompletionProposal> proposals;
private NullProposal(List<ICompletionProposal> proposals) {
this.proposals = proposals;
}
@Override
public void apply(IDocument document) {}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public String getDisplayString() {
return "";
}
@Override
public Image getImage() {
return null;
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger, int stateMask,
int offset) {}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int offset,
DocumentEvent event) {
for (ICompletionProposal p: proposals) {
if (p instanceof ICompletionProposalExtension2) {
ICompletionProposalExtension2 ext =
(ICompletionProposalExtension2) p;
if (ext.validate(document, offset, event)) {
return true;
}
}
else {
return true;
}
}
return false;
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_LinkedModeCompletionProposal.java
|
176 |
public strictfp class MersenneTwister extends java.util.Random implements Serializable, Cloneable
{
// Serialization
private static final long serialVersionUID = -4035832775130174188L; // locked as of Version 15
// Period parameters
private static final int N = 624;
private static final int M = 397;
private static final int MATRIX_A = 0x9908b0df; // private static final * constant vector a
private static final int UPPER_MASK = 0x80000000; // most significant w-r bits
private static final int LOWER_MASK = 0x7fffffff; // least significant r bits
// Tempering parameters
private static final int TEMPERING_MASK_B = 0x9d2c5680;
private static final int TEMPERING_MASK_C = 0xefc60000;
private int mt[]; // the array for the state vector
private int mti; // mti==N+1 means mt[N] is not initialized
private int mag01[];
// a good initial seed (of int size, though stored in a long)
//private static final long GOOD_SEED = 4357;
/* implemented here because there's a bug in Random's implementation
of the Gaussian code (divide by zero, and log(0), ugh!), yet its
gaussian variables are private so we can't access them here. :-( */
private double __nextNextGaussian;
private boolean __haveNextNextGaussian;
/* We're overriding all internal data, to my knowledge, so this should be okay */
public Object clone()
{
try
{
MersenneTwister f = (MersenneTwister)(super.clone());
f.mt = (int[])(mt.clone());
f.mag01 = (int[])(mag01.clone());
return f;
}
catch (CloneNotSupportedException e) { throw new InternalError(); } // should never happen
}
public boolean stateEquals(Object o)
{
if (o==this) return true;
if (o == null || !(o instanceof MersenneTwister))
return false;
MersenneTwister other = (MersenneTwister) o;
if (mti != other.mti) return false;
for(int x=0;x<mag01.length;x++)
if (mag01[x] != other.mag01[x]) return false;
for(int x=0;x<mt.length;x++)
if (mt[x] != other.mt[x]) return false;
return true;
}
/** Reads the entire state of the MersenneTwister RNG from the stream */
public void readState(DataInputStream stream) throws IOException
{
int len = mt.length;
for(int x=0;x<len;x++) mt[x] = stream.readInt();
len = mag01.length;
for(int x=0;x<len;x++) mag01[x] = stream.readInt();
mti = stream.readInt();
__nextNextGaussian = stream.readDouble();
__haveNextNextGaussian = stream.readBoolean();
}
/** Writes the entire state of the MersenneTwister RNG to the stream */
public void writeState(DataOutputStream stream) throws IOException
{
int len = mt.length;
for(int x=0;x<len;x++) stream.writeInt(mt[x]);
len = mag01.length;
for(int x=0;x<len;x++) stream.writeInt(mag01[x]);
stream.writeInt(mti);
stream.writeDouble(__nextNextGaussian);
stream.writeBoolean(__haveNextNextGaussian);
}
/**
* Constructor using the default seed.
*/
public MersenneTwister()
{
this(System.currentTimeMillis());
}
/**
* Constructor using a given seed. Though you pass this seed in
* as a long, it's best to make sure it's actually an integer.
*/
public MersenneTwister(final long seed)
{
super(seed); /* just in case */
setSeed(seed);
}
/**
* Constructor using an array of integers as seed.
* Your array must have a non-zero length. Only the first 624 integers
* in the array are used; if the array is shorter than this then
* integers are repeatedly used in a wrap-around fashion.
*/
public MersenneTwister(final int[] array)
{
super(System.currentTimeMillis()); /* pick something at random just in case */
setSeed(array);
}
/**
* Initalize the pseudo random number generator. Don't
* pass in a long that's bigger than an int (Mersenne Twister
* only uses the first 32 bits for its seed).
*/
synchronized public void setSeed(final long seed)
{
// it's always good style to call super
super.setSeed(seed);
// Due to a bug in java.util.Random clear up to 1.2, we're
// doing our own Gaussian variable.
__haveNextNextGaussian = false;
mt = new int[N];
mag01 = new int[2];
mag01[0] = 0x0;
mag01[1] = MATRIX_A;
mt[0]= (int)(seed & 0xffffffff);
for (mti=1; mti<N; mti++)
{
mt[mti] =
(1812433253 * (mt[mti-1] ^ (mt[mti-1] >>> 30)) + mti);
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
/* In the previous versions, MSBs of the seed affect */
/* only MSBs of the array mt[]. */
/* 2002/01/09 modified by Makoto Matsumoto */
mt[mti] &= 0xffffffff;
/* for >32 bit machines */
}
}
/**
* Sets the seed of the MersenneTwister using an array of integers.
* Your array must have a non-zero length. Only the first 624 integers
* in the array are used; if the array is shorter than this then
* integers are repeatedly used in a wrap-around fashion.
*/
synchronized public void setSeed(final int[] array)
{
if (array.length == 0)
throw new IllegalArgumentException("Array length must be greater than zero");
int i, j, k;
setSeed(19650218);
i=1; j=0;
k = (N>array.length ? N : array.length);
for (; k!=0; k--)
{
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >>> 30)) * 1664525)) + array[j] + j; /* non linear */
mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */
i++;
j++;
if (i>=N) { mt[0] = mt[N-1]; i=1; }
if (j>=array.length) j=0;
}
for (k=N-1; k!=0; k--)
{
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >>> 30)) * 1566083941)) - i; /* non linear */
mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */
i++;
if (i>=N)
{
mt[0] = mt[N-1]; i=1;
}
}
mt[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */
}
/**
* Returns an integer with <i>bits</i> bits filled with a random number.
*/
synchronized protected int next(final int bits)
{
int y;
if (mti >= N) // generate N words at one time
{
int kk;
final int[] mt = this.mt; // locals are slightly faster
final int[] mag01 = this.mag01; // locals are slightly faster
for (kk = 0; kk < N - M; kk++)
{
y = (mt[kk] & UPPER_MASK) | (mt[kk+1] & LOWER_MASK);
mt[kk] = mt[kk+M] ^ (y >>> 1) ^ mag01[y & 0x1];
}
for (; kk < N-1; kk++)
{
y = (mt[kk] & UPPER_MASK) | (mt[kk+1] & LOWER_MASK);
mt[kk] = mt[kk+(M-N)] ^ (y >>> 1) ^ mag01[y & 0x1];
}
y = (mt[N-1] & UPPER_MASK) | (mt[0] & LOWER_MASK);
mt[N-1] = mt[M-1] ^ (y >>> 1) ^ mag01[y & 0x1];
mti = 0;
}
y = mt[mti++];
y ^= y >>> 11; // TEMPERING_SHIFT_U(y)
y ^= (y << 7) & TEMPERING_MASK_B; // TEMPERING_SHIFT_S(y)
y ^= (y << 15) & TEMPERING_MASK_C; // TEMPERING_SHIFT_T(y)
y ^= (y >>> 18); // TEMPERING_SHIFT_L(y)
return y >>> (32 - bits); // hope that's right!
}
/* If you've got a truly old version of Java, you can omit these
two next methods. */
private synchronized void writeObject(final ObjectOutputStream out)
throws IOException
{
// just so we're synchronized.
out.defaultWriteObject();
}
private synchronized void readObject (final ObjectInputStream in)
throws IOException, ClassNotFoundException
{
// just so we're synchronized.
in.defaultReadObject();
}
/** This method is missing from jdk 1.0.x and below. JDK 1.1
includes this for us, but what the heck.*/
public boolean nextBoolean() {return next(1) != 0;}
/** This generates a coin flip with a probability <tt>probability</tt>
of returning true, else returning false. <tt>probability</tt> must
be between 0.0 and 1.0, inclusive. Not as precise a random real
event as nextBoolean(double), but twice as fast. To explicitly
use this, remember you may need to cast to float first. */
public boolean nextBoolean (final float probability)
{
if (probability < 0.0f || probability > 1.0f)
throw new IllegalArgumentException ("probability must be between 0.0 and 1.0 inclusive.");
if (probability==0.0f) return false; // fix half-open issues
else if (probability==1.0f) return true; // fix half-open issues
return nextFloat() < probability;
}
/** This generates a coin flip with a probability <tt>probability</tt>
of returning true, else returning false. <tt>probability</tt> must
be between 0.0 and 1.0, inclusive. */
public boolean nextBoolean (final double probability)
{
if (probability < 0.0 || probability > 1.0)
throw new IllegalArgumentException ("probability must be between 0.0 and 1.0 inclusive.");
if (probability==0.0) return false; // fix half-open issues
else if (probability==1.0) return true; // fix half-open issues
return nextDouble() < probability;
}
/** This method is missing from JDK 1.1 and below. JDK 1.2
includes this for us, but what the heck. */
public int nextInt(final int n)
{
if (n<=0)
throw new IllegalArgumentException("n must be positive, got: " + n);
if ((n & -n) == n)
return (int)((n * (long)next(31)) >> 31);
int bits, val;
do
{
bits = next(31);
val = bits % n;
}
while(bits - val + (n-1) < 0);
return val;
}
/** This method is for completness' sake.
Returns a long drawn uniformly from 0 to n-1. Suffice it to say,
n must be > 0, or an IllegalArgumentException is raised. */
public long nextLong(final long n)
{
if (n<=0)
throw new IllegalArgumentException("n must be positive, got: " + n);
long bits, val;
do
{
bits = (nextLong() >>> 1);
val = bits % n;
}
while(bits - val + (n-1) < 0);
return val;
}
/** A bug fix for versions of JDK 1.1 and below. JDK 1.2 fixes
this for us, but what the heck. */
public double nextDouble()
{
return (((long)next(26) << 27) + next(27))
/ (double)(1L << 53);
}
/** Returns a double in the range from 0.0 to 1.0, possibly inclusive of 0.0 and 1.0 themselves. Thus:
<p><table border=0>
<th><td>Expression<td>Interval
<tr><td>nextDouble(false, false)<td>(0.0, 1.0)
<tr><td>nextDouble(true, false)<td>[0.0, 1.0)
<tr><td>nextDouble(false, true)<td>(0.0, 1.0]
<tr><td>nextDouble(true, true)<td>[0.0, 1.0]
</table>
<p>This version preserves all possible random values in the double range.
*/
public double nextDouble(boolean includeZero, boolean includeOne)
{
double d = 0.0;
do
{
d = nextDouble(); // grab a value, initially from half-open [0.0, 1.0)
if (includeOne && nextBoolean()) d += 1.0; // if includeOne, with 1/2 probability, push to [1.0, 2.0)
}
while ( (d > 1.0) || // everything above 1.0 is always invalid
(!includeZero && d == 0.0)); // if we're not including zero, 0.0 is invalid
return d;
}
/** A bug fix for versions of JDK 1.1 and below. JDK 1.2 fixes
this for us, but what the heck. */
public float nextFloat()
{
return next(24) / ((float)(1 << 24));
}
/** Returns a float in the range from 0.0f to 1.0f, possibly inclusive of 0.0f and 1.0f themselves. Thus:
<p><table border=0>
<th><td>Expression<td>Interval
<tr><td>nextFloat(false, false)<td>(0.0f, 1.0f)
<tr><td>nextFloat(true, false)<td>[0.0f, 1.0f)
<tr><td>nextFloat(false, true)<td>(0.0f, 1.0f]
<tr><td>nextFloat(true, true)<td>[0.0f, 1.0f]
</table>
<p>This version preserves all possible random values in the float range.
*/
public double nextFloat(boolean includeZero, boolean includeOne)
{
float d = 0.0f;
do
{
d = nextFloat(); // grab a value, initially from half-open [0.0f, 1.0f)
if (includeOne && nextBoolean()) d += 1.0f; // if includeOne, with 1/2 probability, push to [1.0f, 2.0f)
}
while ( (d > 1.0f) || // everything above 1.0f is always invalid
(!includeZero && d == 0.0f)); // if we're not including zero, 0.0f is invalid
return d;
}
/** A bug fix for all versions of the JDK. The JDK appears to
use all four bytes in an integer as independent byte values!
Totally wrong. I've submitted a bug report. */
public void nextBytes(final byte[] bytes)
{
for (int x=0;x<bytes.length;x++) bytes[x] = (byte)next(8);
}
/** For completeness' sake, though it's not in java.util.Random. */
public char nextChar()
{
// chars are 16-bit UniCode values
return (char)(next(16));
}
/** For completeness' sake, though it's not in java.util.Random. */
public short nextShort()
{
return (short)(next(16));
}
/** For completeness' sake, though it's not in java.util.Random. */
public byte nextByte()
{
return (byte)(next(8));
}
/** A bug fix for all JDK code including 1.2. nextGaussian can theoretically
ask for the log of 0 and divide it by 0! See Java bug
<a href="http://developer.java.sun.com/developer/bugParade/bugs/4254501.html">
http://developer.java.sun.com/developer/bugParade/bugs/4254501.html</a>
*/
synchronized public double nextGaussian()
{
if (__haveNextNextGaussian)
{
__haveNextNextGaussian = false;
return __nextNextGaussian;
}
else
{
double v1, v2, s;
do
{
v1 = 2 * nextDouble() - 1; // between -1.0 and 1.0
v2 = 2 * nextDouble() - 1; // between -1.0 and 1.0
s = v1 * v1 + v2 * v2;
} while (s >= 1 || s==0 );
double multiplier = StrictMath.sqrt(-2 * StrictMath.log(s)/s);
__nextNextGaussian = v2 * multiplier;
__haveNextNextGaussian = true;
return v1 * multiplier;
}
}
/**
* Tests the code.
*/
public static void main(String args[])
{
int j;
MersenneTwister r;
// CORRECTNESS TEST
// COMPARE WITH http://www.math.keio.ac.jp/matumoto/CODES/MT2002/mt19937ar.out
r = new MersenneTwister(new int[]{0x123, 0x234, 0x345, 0x456});
System.out.println("Output of MersenneTwister with new (2002/1/26) seeding mechanism");
for (j=0;j<1000;j++)
{
// first, convert the int from signed to "unsigned"
long l = (long)r.nextInt();
if (l < 0 ) l += 4294967296L; // max int value
String s = String.valueOf(l);
while(s.length() < 10) s = " " + s; // buffer
System.out.print(s + " ");
if (j%5==4) System.out.println();
}
// SPEED TEST
final long SEED = 4357;
int xx; long ms;
System.out.println("\nTime to test grabbing 100000000 ints");
r = new MersenneTwister(SEED);
ms = System.currentTimeMillis();
xx=0;
for (j = 0; j < 100000000; j++)
xx += r.nextInt();
System.out.println("Mersenne Twister: " + (System.currentTimeMillis()-ms) + " Ignore this: " + xx);
System.out.println("To compare this with java.util.Random, run this same test on MersenneTwisterFast.");
System.out.println("The comparison with Random is removed from MersenneTwister because it is a proper");
System.out.println("subclass of Random and this unfairly makes some of Random's methods un-inlinable,");
System.out.println("so it would make Random look worse than it is.");
// TEST TO COMPARE TYPE CONVERSION BETWEEN
// MersenneTwisterFast.java AND MersenneTwister.java
System.out.println("\nGrab the first 1000 booleans");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextBoolean() + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
System.out.println("\nGrab 1000 booleans of increasing probability using nextBoolean(double)");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextBoolean((double)(j/999.0)) + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
System.out.println("\nGrab 1000 booleans of increasing probability using nextBoolean(float)");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextBoolean((float)(j/999.0f)) + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
byte[] bytes = new byte[1000];
System.out.println("\nGrab the first 1000 bytes using nextBytes");
r = new MersenneTwister(SEED);
r.nextBytes(bytes);
for (j = 0; j < 1000; j++)
{
System.out.print(bytes[j] + " ");
if (j%16==15) System.out.println();
}
if (!(j%16==15)) System.out.println();
byte b;
System.out.println("\nGrab the first 1000 bytes -- must be same as nextBytes");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print((b = r.nextByte()) + " ");
if (b!=bytes[j]) System.out.print("BAD ");
if (j%16==15) System.out.println();
}
if (!(j%16==15)) System.out.println();
System.out.println("\nGrab the first 1000 shorts");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextShort() + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
System.out.println("\nGrab the first 1000 ints");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextInt() + " ");
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 ints of different sizes");
r = new MersenneTwister(SEED);
int max = 1;
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextInt(max) + " ");
max *= 2;
if (max <= 0) max = 1;
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 longs");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextLong() + " ");
if (j%3==2) System.out.println();
}
if (!(j%3==2)) System.out.println();
System.out.println("\nGrab the first 1000 longs of different sizes");
r = new MersenneTwister(SEED);
long max2 = 1;
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextLong(max2) + " ");
max2 *= 2;
if (max2 <= 0) max2 = 1;
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 floats");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextFloat() + " ");
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 doubles");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextDouble() + " ");
if (j%3==2) System.out.println();
}
if (!(j%3==2)) System.out.println();
System.out.println("\nGrab the first 1000 gaussian doubles");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextGaussian() + " ");
if (j%3==2) System.out.println();
}
if (!(j%3==2)) System.out.println();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_util_MersenneTwister.java
|
646 |
public class PrecompressedArtifactFilter extends GenericFilterBean {
private boolean useWhileInDefaultEnvironment = true;
@Resource(name="blConfiguration")
RuntimeEnvironmentPropertiesConfigurer configurer;
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain) throws IOException, ServletException {
HttpServletRequest request = (HttpServletRequest) servletRequest;
HttpServletResponse response = (HttpServletResponse) servletResponse;
checkOutput: {
if (!configurer.determineEnvironment().equals(configurer.getDefaultEnvironment()) || useWhileInDefaultEnvironment) {
String path = getResourcePath(request);
String gzipPath = path + ".gz";
if (useGzipCompression(request, response, path, gzipPath)) {
File output = new File(getServletContext().getRealPath(gzipPath));
if (output.exists()) {
response.addHeader("Content-Encoding", "gzip");
ServletOutputStream sos = servletResponse.getOutputStream();
BufferedInputStream bis = null;
try {
bis = new BufferedInputStream(new FileInputStream(output));
boolean eof = false;
while (!eof) {
int temp = bis.read();
if (temp < 0) {
eof = true;
} else {
sos.write(temp);
}
}
} finally {
sos.flush();
try {
if (bis != null) {
bis.close();
}
} catch (Exception e) {
//do nothing
}
}
break checkOutput;
}
}
}
chain.doFilter(request, response);
}
}
/**
* Copied from Tomcat
*
* Return the page resource path from the request. For example:
* <pre class="codeHtml">
* <span class="blue">http://www.mycorp.com/banking/secure/login.htm</span> -> <span class="red">/secure/login.htm</span> </pre>
*
* @param request the page servlet request
* @return the page resource path from the request
*/
public String getResourcePath(HttpServletRequest request) {
// Adapted from VelocityViewServlet.handleRequest() method:
// If we get here from RequestDispatcher.include(), getServletPath()
// will return the original (wrong) URI requested. The following
// special attribute holds the correct path. See section 8.3 of the
// Servlet 2.3 specification.
String path = (String)
request.getAttribute("javax.servlet.include.servlet_path");
// Also take into account the PathInfo stated on
// SRV.4.4 Request Path Elements.
String info = (String)
request.getAttribute("javax.servlet.include.path_info");
if (path == null) {
path = request.getServletPath();
info = request.getPathInfo();
}
if (info != null) {
path += info;
}
return path;
}
/**
* Copied from Tomcat
*
* Return true if the response should be GZIP compressed.
*
* @param request the request to test
* @param response the response to test
* @param path the request path to test
* @return true if the response should be GZIP compressed
*/
protected boolean useGzipCompression(HttpServletRequest request, HttpServletResponse response, String path, String gzipPath) throws MalformedURLException {
if (gzipPath == null) {
return false;
}
String temp = path.toLowerCase();
if (temp.endsWith(".gif") || temp.endsWith(".png") || temp.endsWith(".jpg")) {
return false;
}
if (getServletContext().getResource(gzipPath) == null) {
return false;
}
// If Content-Encoding header is already set on response, skip compression
if (response.containsHeader("Content-Encoding")) {
return false;
}
// Are we allowed to compress ?
String s = request.getParameter("gzip");
if ("false".equals(s)) {
return false;
}
Enumeration<?> e = request.getHeaders("Accept-Encoding");
while (e.hasMoreElements()) {
String name = (String) e.nextElement();
if (name.contains("gzip")) {
return true;
}
}
return true;
}
public boolean isUseWhileInDefaultEnvironment() {
return useWhileInDefaultEnvironment;
}
public void setUseWhileInDefaultEnvironment(boolean useWhileInDefaultEnvironment) {
this.useWhileInDefaultEnvironment = useWhileInDefaultEnvironment;
}
public RuntimeEnvironmentPropertiesConfigurer getConfigurer() {
return configurer;
}
public void setConfigurer(RuntimeEnvironmentPropertiesConfigurer configurer) {
this.configurer = configurer;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_web_util_PrecompressedArtifactFilter.java
|
43 |
public interface ONavigableSet<E> extends SortedSet<E> {
/**
* Returns the greatest element in this set strictly less than the given element, or {@code null} if there is no such element.
*
* @param e
* the value to match
* @return the greatest element less than {@code e}, or {@code null} if there is no such element
* @throws ClassCastException
* if the specified element cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified element is null and this set does not permit null elements
*/
E lower(E e);
/**
* Returns the greatest element in this set less than or equal to the given element, or {@code null} if there is no such element.
*
* @param e
* the value to match
* @return the greatest element less than or equal to {@code e}, or {@code null} if there is no such element
* @throws ClassCastException
* if the specified element cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified element is null and this set does not permit null elements
*/
E floor(E e);
/**
* Returns the least element in this set greater than or equal to the given element, or {@code null} if there is no such element.
*
* @param e
* the value to match
* @return the least element greater than or equal to {@code e}, or {@code null} if there is no such element
* @throws ClassCastException
* if the specified element cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified element is null and this set does not permit null elements
*/
E ceiling(E e);
/**
* Returns the least element in this set strictly greater than the given element, or {@code null} if there is no such element.
*
* @param e
* the value to match
* @return the least element greater than {@code e}, or {@code null} if there is no such element
* @throws ClassCastException
* if the specified element cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified element is null and this set does not permit null elements
*/
E higher(E e);
/**
* Retrieves and removes the first (lowest) element, or returns {@code null} if this set is empty.
*
* @return the first element, or {@code null} if this set is empty
*/
E pollFirst();
/**
* Retrieves and removes the last (highest) element, or returns {@code null} if this set is empty.
*
* @return the last element, or {@code null} if this set is empty
*/
E pollLast();
/**
* Returns an iterator over the elements in this set, in ascending order.
*
* @return an iterator over the elements in this set, in ascending order
*/
OLazyIterator<E> iterator();
/**
* Returns a reverse order view of the elements contained in this set. The descending set is backed by this set, so changes to the
* set are reflected in the descending set, and vice-versa. If either set is modified while an iteration over either set is in
* progress (except through the iterator's own {@code remove} operation), the results of the iteration are undefined.
*
* <p>
* The returned set has an ordering equivalent to
* <tt>{@link Collections#reverseOrder(Comparator) Collections.reverseOrder}(comparator())</tt>. The expression
* {@code s.descendingSet().descendingSet()} returns a view of {@code s} essentially equivalent to {@code s}.
*
* @return a reverse order view of this set
*/
ONavigableSet<E> descendingSet();
/**
* Returns an iterator over the elements in this set, in descending order. Equivalent in effect to
* {@code descendingSet().iterator()}.
*
* @return an iterator over the elements in this set, in descending order
*/
Iterator<E> descendingIterator();
/**
* Returns a view of the portion of this set whose elements range from {@code fromElement} to {@code toElement}. If
* {@code fromElement} and {@code toElement} are equal, the returned set is empty unless {@code fromExclusive} and
* {@code toExclusive} are both true. The returned set is backed by this set, so changes in the returned set are reflected in this
* set, and vice-versa. The returned set supports all optional set operations that this set supports.
*
* <p>
* The returned set will throw an {@code IllegalArgumentException} on an attempt to insert an element outside its range.
*
* @param fromElement
* low endpoint of the returned set
* @param fromInclusive
* {@code true} if the low endpoint is to be included in the returned view
* @param toElement
* high endpoint of the returned set
* @param toInclusive
* {@code true} if the high endpoint is to be included in the returned view
* @return a view of the portion of this set whose elements range from {@code fromElement}, inclusive, to {@code toElement},
* exclusive
* @throws ClassCastException
* if {@code fromElement} and {@code toElement} cannot be compared to one another using this set's comparator (or, if
* the set has no comparator, using natural ordering). Implementations may, but are not required to, throw this
* exception if {@code fromElement} or {@code toElement} cannot be compared to elements currently in the set.
* @throws NullPointerException
* if {@code fromElement} or {@code toElement} is null and this set does not permit null elements
* @throws IllegalArgumentException
* if {@code fromElement} is greater than {@code toElement}; or if this set itself has a restricted range, and
* {@code fromElement} or {@code toElement} lies outside the bounds of the range.
*/
ONavigableSet<E> subSet(E fromElement, boolean fromInclusive, E toElement, boolean toInclusive);
/**
* Returns a view of the portion of this set whose elements are less than (or equal to, if {@code inclusive} is true)
* {@code toElement}. The returned set is backed by this set, so changes in the returned set are reflected in this set, and
* vice-versa. The returned set supports all optional set operations that this set supports.
*
* <p>
* The returned set will throw an {@code IllegalArgumentException} on an attempt to insert an element outside its range.
*
* @param toElement
* high endpoint of the returned set
* @param inclusive
* {@code true} if the high endpoint is to be included in the returned view
* @return a view of the portion of this set whose elements are less than (or equal to, if {@code inclusive} is true)
* {@code toElement}
* @throws ClassCastException
* if {@code toElement} is not compatible with this set's comparator (or, if the set has no comparator, if
* {@code toElement} does not implement {@link Comparable}). Implementations may, but are not required to, throw this
* exception if {@code toElement} cannot be compared to elements currently in the set.
* @throws NullPointerException
* if {@code toElement} is null and this set does not permit null elements
* @throws IllegalArgumentException
* if this set itself has a restricted range, and {@code toElement} lies outside the bounds of the range
*/
ONavigableSet<E> headSet(E toElement, boolean inclusive);
/**
* Returns a view of the portion of this set whose elements are greater than (or equal to, if {@code inclusive} is true)
* {@code fromElement}. The returned set is backed by this set, so changes in the returned set are reflected in this set, and
* vice-versa. The returned set supports all optional set operations that this set supports.
*
* <p>
* The returned set will throw an {@code IllegalArgumentException} on an attempt to insert an element outside its range.
*
* @param fromElement
* low endpoint of the returned set
* @param inclusive
* {@code true} if the low endpoint is to be included in the returned view
* @return a view of the portion of this set whose elements are greater than or equal to {@code fromElement}
* @throws ClassCastException
* if {@code fromElement} is not compatible with this set's comparator (or, if the set has no comparator, if
* {@code fromElement} does not implement {@link Comparable}). Implementations may, but are not required to, throw this
* exception if {@code fromElement} cannot be compared to elements currently in the set.
* @throws NullPointerException
* if {@code fromElement} is null and this set does not permit null elements
* @throws IllegalArgumentException
* if this set itself has a restricted range, and {@code fromElement} lies outside the bounds of the range
*/
ONavigableSet<E> tailSet(E fromElement, boolean inclusive);
/**
* {@inheritDoc}
*
* <p>
* Equivalent to {@code subSet(fromElement, true, toElement, false)}.
*
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* {@inheritDoc}
* @throws IllegalArgumentException
* {@inheritDoc}
*/
SortedSet<E> subSet(E fromElement, E toElement);
/**
* {@inheritDoc}
*
* <p>
* Equivalent to {@code headSet(toElement, false)}.
*
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* {@inheritDoc}
* @throws IllegalArgumentException
* {@inheritDoc} na
*/
SortedSet<E> headSet(E toElement);
/**
* {@inheritDoc}
*
* <p>
* Equivalent to {@code tailSet(fromElement, true)}.
*
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* {@inheritDoc}
* @throws IllegalArgumentException
* {@inheritDoc}
*/
SortedSet<E> tailSet(E fromElement);
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_ONavigableSet.java
|
99 |
LifecycleListener listener = new LifecycleListener() {
public void stateChanged(LifecycleEvent event) {
final LifecycleState state = list.poll();
if (state != null && state.equals(event.getState())) {
latch.countDown();
}
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientIssueTest.java
|
2,157 |
static class IteratorBasedIterator extends DocIdSetIterator {
int lastReturn = -1;
private DocIdSetIterator[] iterators = null;
private final long cost;
IteratorBasedIterator(DocIdSet[] sets) throws IOException {
iterators = new DocIdSetIterator[sets.length];
int j = 0;
long cost = Integer.MAX_VALUE;
for (DocIdSet set : sets) {
if (set == null) {
lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching
break;
} else {
DocIdSetIterator dcit = set.iterator();
if (dcit == null) {
lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching
break;
}
iterators[j++] = dcit;
cost = Math.min(cost, dcit.cost());
}
}
this.cost = cost;
if (lastReturn != DocIdSetIterator.NO_MORE_DOCS) {
lastReturn = (iterators.length > 0 ? -1 : DocIdSetIterator.NO_MORE_DOCS);
}
}
@Override
public final int docID() {
return lastReturn;
}
@Override
public final int nextDoc() throws IOException {
if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
DocIdSetIterator dcit = iterators[0];
int target = dcit.nextDoc();
int size = iterators.length;
int skip = 0;
int i = 1;
while (i < size) {
if (i != skip) {
dcit = iterators[i];
int docid = dcit.advance(target);
if (docid > target) {
target = docid;
if (i != 0) {
skip = i;
i = 0;
continue;
} else
skip = 0;
}
}
i++;
}
return (lastReturn = target);
}
@Override
public final int advance(int target) throws IOException {
if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
DocIdSetIterator dcit = iterators[0];
target = dcit.advance(target);
int size = iterators.length;
int skip = 0;
int i = 1;
while (i < size) {
if (i != skip) {
dcit = iterators[i];
int docid = dcit.advance(target);
if (docid > target) {
target = docid;
if (i != 0) {
skip = i;
i = 0;
continue;
} else {
skip = 0;
}
}
}
i++;
}
return (lastReturn = target);
}
@Override
public long cost() {
return cost;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_docset_AndDocIdSet.java
|
1,116 |
public class PercolatorStressBenchmark {
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("cluster.routing.schedule", 200, TimeUnit.MILLISECONDS)
.put("gateway.type", "none")
.put(SETTING_NUMBER_OF_SHARDS, 4)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
}
Node clientNode = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
Client client = clientNode.client();
client.admin().indices().create(createIndexRequest("test")).actionGet();
ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth("test")
.setWaitForGreenStatus()
.execute().actionGet();
if (healthResponse.isTimedOut()) {
System.err.println("Quiting, because cluster health requested timed out...");
return;
} else if (healthResponse.getStatus() != ClusterHealthStatus.GREEN) {
System.err.println("Quiting, because cluster state isn't green...");
return;
}
int COUNT = 200000;
int QUERIES = 100;
int TERM_QUERIES = QUERIES / 2;
int RANGE_QUERIES = QUERIES - TERM_QUERIES;
client.prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("numeric1", 1).endObject()).execute().actionGet();
// register queries
int i = 0;
for (; i < TERM_QUERIES; i++) {
client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject()
.field("query", termQuery("name", "value"))
.endObject())
.execute().actionGet();
}
int[] numbers = new int[RANGE_QUERIES];
for (; i < QUERIES; i++) {
client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject()
.field("query", rangeQuery("numeric1").from(i).to(i))
.endObject())
.execute().actionGet();
numbers[i - TERM_QUERIES] = i;
}
StopWatch stopWatch = new StopWatch().start();
System.out.println("Percolating [" + COUNT + "] ...");
for (i = 1; i <= COUNT; i++) {
XContentBuilder source;
int expectedMatches;
if (i % 2 == 0) {
source = source(Integer.toString(i), "value");
expectedMatches = TERM_QUERIES;
} else {
int number = numbers[i % RANGE_QUERIES];
source = source(Integer.toString(i), number);
expectedMatches = 1;
}
PercolateResponse percolate = client.preparePercolate()
.setIndices("test").setDocumentType("type1")
.setSource(source)
.execute().actionGet();
if (percolate.getMatches().length != expectedMatches) {
System.err.println("No matching number of queries");
}
if ((i % 10000) == 0) {
System.out.println("Percolated " + i + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("Percolation took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
clientNode.close();
for (Node node : nodes) {
node.close();
}
}
private static XContentBuilder source(String id, String nameValue) throws IOException {
return jsonBuilder().startObject().startObject("doc")
.field("id", id)
.field("name", nameValue)
.endObject().endObject();
}
private static XContentBuilder source(String id, int number) throws IOException {
return jsonBuilder().startObject().startObject("doc")
.field("id", id)
.field("numeric1", number)
.field("numeric2", number)
.field("numeric3", number)
.field("numeric4", number)
.field("numeric5", number)
.field("numeric6", number)
.field("numeric7", number)
.field("numeric8", number)
.field("numeric9", number)
.field("numeric10", number)
.endObject().endObject();
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_percolator_PercolatorStressBenchmark.java
|
496 |
public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> {
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
CloseIndexRequest() {
}
/**
* Constructs a new close index request for the specified index.
*/
public CloseIndexRequest(String... indices) {
this.indices = indices;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (indices == null || indices.length == 0) {
validationException = addValidationError("index is missing", validationException);
}
return validationException;
}
/**
* The indices to be closed
* @return the indices to be closed
*/
String[] indices() {
return indices;
}
/**
* Sets the indices to be closed
* @param indices the indices to be closed
* @return the request itself
*/
public CloseIndexRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @return the desired behaviour regarding indices to ignore and wildcard indices expressions
*/
public IndicesOptions indicesOptions() {
return indicesOptions;
}
/**
* Specifies what type of requested indices to ignore and how to deal wild wildcard expressions.
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
* @return the request itself
*/
public CloseIndexRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
readTimeout(in);
indicesOptions = IndicesOptions.readIndicesOptions(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
writeTimeout(out);
indicesOptions.writeIndicesOptions(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_close_CloseIndexRequest.java
|
2,683 |
public class GatewayService extends AbstractLifecycleComponent<GatewayService> implements ClusterStateListener {
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
private final Gateway gateway;
private final ThreadPool threadPool;
private final AllocationService allocationService;
private final ClusterService clusterService;
private final DiscoveryService discoveryService;
private final TimeValue recoverAfterTime;
private final int recoverAfterNodes;
private final int expectedNodes;
private final int recoverAfterDataNodes;
private final int expectedDataNodes;
private final int recoverAfterMasterNodes;
private final int expectedMasterNodes;
private final AtomicBoolean recovered = new AtomicBoolean();
private final AtomicBoolean scheduledRecovery = new AtomicBoolean();
@Inject
public GatewayService(Settings settings, Gateway gateway, AllocationService allocationService, ClusterService clusterService, DiscoveryService discoveryService, ThreadPool threadPool) {
super(settings);
this.gateway = gateway;
this.allocationService = allocationService;
this.clusterService = clusterService;
this.discoveryService = discoveryService;
this.threadPool = threadPool;
// allow to control a delay of when indices will get created
this.recoverAfterTime = componentSettings.getAsTime("recover_after_time", null);
this.recoverAfterNodes = componentSettings.getAsInt("recover_after_nodes", -1);
this.expectedNodes = componentSettings.getAsInt("expected_nodes", -1);
this.recoverAfterDataNodes = componentSettings.getAsInt("recover_after_data_nodes", -1);
this.expectedDataNodes = componentSettings.getAsInt("expected_data_nodes", -1);
// default the recover after master nodes to the minimum master nodes in the discovery
this.recoverAfterMasterNodes = componentSettings.getAsInt("recover_after_master_nodes", settings.getAsInt("discovery.zen.minimum_master_nodes", -1));
this.expectedMasterNodes = componentSettings.getAsInt("expected_master_nodes", -1);
// Add the not recovered as initial state block, we don't allow anything until
this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);
}
@Override
protected void doStart() throws ElasticsearchException {
gateway.start();
// if we received initial state, see if we can recover within the start phase, so we hold the
// node from starting until we recovered properly
if (discoveryService.initialStateReceived()) {
ClusterState clusterState = clusterService.state();
DiscoveryNodes nodes = clusterState.nodes();
if (clusterState.nodes().localNodeMaster() && clusterState.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) {
if (clusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
logger.debug("not recovering from gateway, no master elected yet");
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
} else {
boolean ignoreRecoverAfterTime;
if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
// no expected is set, don't ignore the timeout
ignoreRecoverAfterTime = false;
} else {
// one of the expected is set, see if all of them meet the need, and ignore the timeout in this case
ignoreRecoverAfterTime = true;
if (expectedNodes != -1 && (nodes.masterAndDataNodes().size() < expectedNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedMasterNodes != -1 && (nodes.masterNodes().size() < expectedMasterNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedDataNodes != -1 && (nodes.dataNodes().size() < expectedDataNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
}
performStateRecovery(ignoreRecoverAfterTime);
}
}
} else {
logger.debug("can't wait on start for (possibly) reading state from gateway, will do it asynchronously");
}
clusterService.addLast(this);
}
@Override
protected void doStop() throws ElasticsearchException {
clusterService.remove(this);
gateway.stop();
}
@Override
protected void doClose() throws ElasticsearchException {
gateway.close();
}
@Override
public void clusterChanged(final ClusterChangedEvent event) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (event.state().blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
// we need to clear those flags, since we might need to recover again in case we disconnect
// from the cluster and then reconnect
recovered.set(false);
scheduledRecovery.set(false);
}
if (event.localNodeMaster() && event.state().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) {
ClusterState clusterState = event.state();
DiscoveryNodes nodes = clusterState.nodes();
if (event.state().blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
logger.debug("not recovering from gateway, no master elected yet");
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
} else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) {
logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]");
} else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) {
logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]");
} else {
boolean ignoreRecoverAfterTime;
if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) {
// no expected is set, don't ignore the timeout
ignoreRecoverAfterTime = false;
} else {
// one of the expected is set, see if all of them meet the need, and ignore the timeout in this case
ignoreRecoverAfterTime = true;
if (expectedNodes != -1 && (nodes.masterAndDataNodes().size() < expectedNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedMasterNodes != -1 && (nodes.masterNodes().size() < expectedMasterNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
if (expectedDataNodes != -1 && (nodes.dataNodes().size() < expectedDataNodes)) { // does not meet the expected...
ignoreRecoverAfterTime = false;
}
}
final boolean fIgnoreRecoverAfterTime = ignoreRecoverAfterTime;
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
performStateRecovery(fIgnoreRecoverAfterTime);
}
});
}
}
}
private void performStateRecovery(boolean ignoreRecoverAfterTime) {
final Gateway.GatewayStateRecoveredListener recoveryListener = new GatewayRecoveryListener(new CountDownLatch(1));
if (!ignoreRecoverAfterTime && recoverAfterTime != null) {
if (scheduledRecovery.compareAndSet(false, true)) {
logger.debug("delaying initial state recovery for [{}]", recoverAfterTime);
threadPool.schedule(recoverAfterTime, ThreadPool.Names.GENERIC, new Runnable() {
@Override
public void run() {
if (recovered.compareAndSet(false, true)) {
logger.trace("performing state recovery...");
gateway.performStateRecovery(recoveryListener);
}
}
});
}
} else {
if (recovered.compareAndSet(false, true)) {
logger.trace("performing state recovery...");
gateway.performStateRecovery(recoveryListener);
}
}
}
class GatewayRecoveryListener implements Gateway.GatewayStateRecoveredListener {
private final CountDownLatch latch;
GatewayRecoveryListener(CountDownLatch latch) {
this.latch = latch;
}
@Override
public void onSuccess(final ClusterState recoveredState) {
logger.trace("successful state recovery, importing cluster state...");
clusterService.submitStateUpdateTask("local-gateway-elected-state", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
assert currentState.metaData().indices().isEmpty();
// remove the block, since we recovered from gateway
ClusterBlocks.Builder blocks = ClusterBlocks.builder()
.blocks(currentState.blocks())
.blocks(recoveredState.blocks())
.removeGlobalBlock(STATE_NOT_RECOVERED_BLOCK);
MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData());
// automatically generate a UID for the metadata if we need to
metaDataBuilder.generateUuidIfNeeded();
if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
for (IndexMetaData indexMetaData : recoveredState.metaData()) {
metaDataBuilder.put(indexMetaData, false);
blocks.addBlocks(indexMetaData);
}
// update the state to reflect the new metadata and routing
ClusterState updatedState = ClusterState.builder(currentState)
.blocks(blocks)
.metaData(metaDataBuilder)
.build();
// initialize all index routing tables as empty
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable());
for (ObjectCursor<IndexMetaData> cursor : updatedState.metaData().indices().values()) {
routingTableBuilder.addAsRecovery(cursor.value);
}
// start with 0 based versions for routing table
routingTableBuilder.version(0);
// now, reroute
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build());
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size());
latch.countDown();
}
});
}
@Override
public void onFailure(String message) {
recovered.set(false);
scheduledRecovery.set(false);
// don't remove the block here, we don't want to allow anything in such a case
logger.info("metadata state not restored, reason: {}", message);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_gateway_GatewayService.java
|
1,393 |
public abstract class IdePhasedUnit extends PhasedUnit {
protected WeakReference<TypeChecker> typeCheckerRef = null;
public IdePhasedUnit(VirtualFile unitFile, VirtualFile srcDir,
CompilationUnit cu, Package p, ModuleManager moduleManager,
TypeChecker typeChecker, List<CommonToken> tokenStream) {
super(unitFile, srcDir, cu, p, moduleManager, typeChecker.getContext(), tokenStream);
typeCheckerRef = new WeakReference<TypeChecker>(typeChecker);
}
public IdePhasedUnit(PhasedUnit other) {
super(other);
if (other instanceof IdePhasedUnit) {
typeCheckerRef = new WeakReference<TypeChecker>(((IdePhasedUnit) other).getTypeChecker());
}
}
public TypeChecker getTypeChecker() {
return typeCheckerRef.get();
}
protected Unit createUnit() {
Unit oldUnit = getUnit();
Unit newUnit = newUnit();
if (oldUnit != null) {
newUnit.setFilename(oldUnit.getFilename());
newUnit.setFullPath(oldUnit.getFullPath());
newUnit.setRelativePath(oldUnit.getRelativePath());
newUnit.setPackage(oldUnit.getPackage());
newUnit.getDependentsOf().addAll(oldUnit.getDependentsOf());
}
return newUnit;
}
protected abstract Unit newUnit();
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_typechecker_IdePhasedUnit.java
|
611 |
public class UpdateSettingsResponse extends AcknowledgedResponse {
UpdateSettingsResponse() {
}
UpdateSettingsResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_put_UpdateSettingsResponse.java
|
71 |
@SuppressWarnings("unchecked")
public class OSharedContainerImpl implements OSharedContainer {
protected Map<String, Object> sharedResources = new HashMap<String, Object>();
public synchronized boolean existsResource(final String iName) {
return sharedResources.containsKey(iName);
}
public synchronized <T> T removeResource(final String iName) {
T resource = (T) sharedResources.remove(iName);
if (resource instanceof OSharedResource)
((OSharedResource) resource).releaseExclusiveLock();
return resource;
}
public synchronized <T> T getResource(final String iName, final Callable<T> iCallback) {
T value = (T) sharedResources.get(iName);
if (value == null) {
// CREATE IT
try {
value = iCallback.call();
} catch (Exception e) {
throw new OException("Error on creation of shared resource", e);
}
if (value instanceof OSharedResource)
((OSharedResource) value).acquireExclusiveLock();
sharedResources.put(iName, value);
}
return value;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedContainerImpl.java
|
283 |
public abstract class ActionRequest<T extends ActionRequest> extends TransportRequest {
private boolean listenerThreaded = false;
protected ActionRequest() {
super();
}
protected ActionRequest(ActionRequest request) {
super(request);
// this does not set the listenerThreaded API, if needed, its up to the caller to set it
// since most times, we actually want it to not be threaded...
//this.listenerThreaded = request.listenerThreaded();
}
/**
* Should the response listener be executed on a thread or not.
* <p/>
* <p>When not executing on a thread, it will either be executed on the calling thread, or
* on an expensive, IO based, thread.
*/
public final boolean listenerThreaded() {
return this.listenerThreaded;
}
/**
* Sets if the response listener be executed on a thread or not.
*/
@SuppressWarnings("unchecked")
public final T listenerThreaded(boolean listenerThreaded) {
this.listenerThreaded = listenerThreaded;
return (T) this;
}
public abstract ActionRequestValidationException validate();
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_ActionRequest.java
|
108 |
static class StaticMemberListener implements MembershipListener, InitialMembershipListener {
final CountDownLatch latch;
StaticMemberListener(CountDownLatch latch) {
this.latch = latch;
}
public void init(InitialMembershipEvent event) {
latch.countDown();
}
public void memberAdded(MembershipEvent membershipEvent) {
}
public void memberRemoved(MembershipEvent membershipEvent) {
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientIssueTest.java
|
170 |
public class URLRedirectType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, URLRedirectType> TYPES = new LinkedHashMap<String, URLRedirectType>();
public static final URLRedirectType FORWARD = new URLRedirectType("FORWARD", "Forward URI");
public static final URLRedirectType REDIRECT_PERM = new URLRedirectType("REDIRECT_PERM", "Redirect URI Permanently (301)");
public static final URLRedirectType REDIRECT_TEMP = new URLRedirectType("REDIRECT_TEMP", "Redirect URI Temporarily (302)");
public static URLRedirectType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public URLRedirectType() {
//do nothing
}
public URLRedirectType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
URLRedirectType other = (URLRedirectType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_url_type_URLRedirectType.java
|
765 |
public class OSysBucket extends OBonsaiBucketAbstract {
private static final int SYS_MAGIC_OFFSET = WAL_POSITION_OFFSET + OLongSerializer.LONG_SIZE;
private static final int FREE_SPACE_OFFSET = SYS_MAGIC_OFFSET + OByteSerializer.BYTE_SIZE;
private static final int FREE_LIST_HEAD_OFFSET = FREE_SPACE_OFFSET + OBonsaiBucketPointer.SIZE;
private static final int FREE_LIST_LENGTH_OFFSET = FREE_LIST_HEAD_OFFSET + OBonsaiBucketPointer.SIZE;
private static final byte SYS_MAGIC = (byte) 41;
public OSysBucket(ODirectMemoryPointer pagePointer, TrackMode trackMode) {
super(pagePointer, trackMode);
}
public void init() throws IOException {
setByteValue(SYS_MAGIC_OFFSET, SYS_MAGIC);
setBucketPointer(FREE_SPACE_OFFSET, new OBonsaiBucketPointer(0, OSBTreeBonsaiBucket.MAX_BUCKET_SIZE_BYTES));
setBucketPointer(FREE_LIST_HEAD_OFFSET, OBonsaiBucketPointer.NULL);
setLongValue(FREE_LIST_LENGTH_OFFSET, 0L);
}
public boolean isInitialized() {
return getByteValue(SYS_MAGIC_OFFSET) != 41;
}
public long freeListLength() {
return getLongValue(FREE_LIST_LENGTH_OFFSET);
}
public void setFreeListLength(long length) throws IOException {
setLongValue(FREE_LIST_LENGTH_OFFSET, length);
}
public OBonsaiBucketPointer getFreeSpacePointer() {
return getBucketPointer(FREE_SPACE_OFFSET);
}
public void setFreeSpacePointer(OBonsaiBucketPointer pointer) throws IOException {
setBucketPointer(FREE_SPACE_OFFSET, pointer);
}
public OBonsaiBucketPointer getFreeListHead() {
return getBucketPointer(FREE_LIST_HEAD_OFFSET);
}
public void setFreeListHead(OBonsaiBucketPointer pointer) throws IOException {
setBucketPointer(FREE_LIST_HEAD_OFFSET, pointer);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_sbtreebonsai_local_OSysBucket.java
|
418 |
public class RestoreSnapshotAction extends ClusterAction<RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> {
public static final RestoreSnapshotAction INSTANCE = new RestoreSnapshotAction();
public static final String NAME = "cluster/snapshot/restore";
private RestoreSnapshotAction() {
super(NAME);
}
@Override
public RestoreSnapshotResponse newResponse() {
return new RestoreSnapshotResponse();
}
@Override
public RestoreSnapshotRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new RestoreSnapshotRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_RestoreSnapshotAction.java
|
3,810 |
public class CommonTermsQueryParser implements QueryParser {
public static final String NAME = "common";
static final float DEFAULT_MAX_TERM_DOC_FREQ = 0.01f;
static final Occur DEFAULT_HIGH_FREQ_OCCUR = Occur.SHOULD;
static final Occur DEFAULT_LOW_FREQ_OCCUR = Occur.SHOULD;
static final boolean DEFAULT_DISABLE_COORDS = true;
@Inject
public CommonTermsQueryParser() {
}
@Override
public String[] names() {
return new String[] { NAME };
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
XContentParser.Token token = parser.nextToken();
if (token != XContentParser.Token.FIELD_NAME) {
throw new QueryParsingException(parseContext.index(), "[common] query malformed, no field");
}
String fieldName = parser.currentName();
Object value = null;
float boost = 1.0f;
String queryAnalyzer = null;
String lowFreqMinimumShouldMatch = null;
String highFreqMinimumShouldMatch = null;
boolean disableCoords = DEFAULT_DISABLE_COORDS;
Occur highFreqOccur = DEFAULT_HIGH_FREQ_OCCUR;
Occur lowFreqOccur = DEFAULT_LOW_FREQ_OCCUR;
float maxTermFrequency = DEFAULT_MAX_TERM_DOC_FREQ;
String queryName = null;
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
String innerFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
innerFieldName = parser.currentName();
} else if (token.isValue()) {
if ("low_freq".equals(innerFieldName) || "lowFreq".equals(innerFieldName)) {
lowFreqMinimumShouldMatch = parser.text();
} else if ("high_freq".equals(innerFieldName) || "highFreq".equals(innerFieldName)) {
highFreqMinimumShouldMatch = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + innerFieldName + "] for [" + currentFieldName + "]");
}
}
}
} else {
throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("query".equals(currentFieldName)) {
value = parser.objectText();
} else if ("analyzer".equals(currentFieldName)) {
String analyzer = parser.text();
if (parseContext.analysisService().analyzer(analyzer) == null) {
throw new QueryParsingException(parseContext.index(), "[common] analyzer [" + parser.text() + "] not found");
}
queryAnalyzer = analyzer;
} else if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) {
disableCoords = parser.booleanValue();
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("high_freq_operator".equals(currentFieldName) || "highFreqOperator".equals(currentFieldName)) {
String op = parser.text();
if ("or".equalsIgnoreCase(op)) {
highFreqOccur = BooleanClause.Occur.SHOULD;
} else if ("and".equalsIgnoreCase(op)) {
highFreqOccur = BooleanClause.Occur.MUST;
} else {
throw new QueryParsingException(parseContext.index(),
"[common] query requires operator to be either 'and' or 'or', not [" + op + "]");
}
} else if ("low_freq_operator".equals(currentFieldName) || "lowFreqOperator".equals(currentFieldName)) {
String op = parser.text();
if ("or".equalsIgnoreCase(op)) {
lowFreqOccur = BooleanClause.Occur.SHOULD;
} else if ("and".equalsIgnoreCase(op)) {
lowFreqOccur = BooleanClause.Occur.MUST;
} else {
throw new QueryParsingException(parseContext.index(),
"[common] query requires operator to be either 'and' or 'or', not [" + op + "]");
}
} else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) {
lowFreqMinimumShouldMatch = parser.text();
} else if ("cutoff_frequency".equals(currentFieldName)) {
maxTermFrequency = parser.floatValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]");
}
}
}
parser.nextToken();
} else {
value = parser.objectText();
// move to the next token
token = parser.nextToken();
if (token != XContentParser.Token.END_OBJECT) {
throw new QueryParsingException(
parseContext.index(),
"[common] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?");
}
}
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No text specified for text query");
}
ExtendedCommonTermsQuery commonsQuery = new ExtendedCommonTermsQuery(highFreqOccur, lowFreqOccur, maxTermFrequency, disableCoords);
commonsQuery.setBoost(boost);
Query query = parseQueryString(commonsQuery, value.toString(), fieldName, parseContext, queryAnalyzer, lowFreqMinimumShouldMatch, highFreqMinimumShouldMatch);
if (queryName != null) {
parseContext.addNamedQuery(queryName, query);
}
return query;
}
private final Query parseQueryString(ExtendedCommonTermsQuery query, String queryString, String fieldName, QueryParseContext parseContext,
String queryAnalyzer, String lowFreqMinimumShouldMatch, String highFreqMinimumShouldMatch) throws IOException {
FieldMapper<?> mapper = null;
String field;
MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) {
mapper = smartNameFieldMappers.mapper();
field = mapper.names().indexName();
} else {
field = fieldName;
}
Analyzer analyzer = null;
if (queryAnalyzer == null) {
if (mapper != null) {
analyzer = mapper.searchAnalyzer();
}
if (analyzer == null && smartNameFieldMappers != null) {
analyzer = smartNameFieldMappers.searchAnalyzer();
}
if (analyzer == null) {
analyzer = parseContext.mapperService().searchAnalyzer();
}
} else {
analyzer = parseContext.mapperService().analysisService().analyzer(queryAnalyzer);
if (analyzer == null) {
throw new ElasticsearchIllegalArgumentException("No analyzer found for [" + queryAnalyzer + "]");
}
}
// Logic similar to QueryParser#getFieldQuery
TokenStream source = analyzer.tokenStream(field, queryString.toString());
int count = 0;
try {
source.reset();
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
while (source.incrementToken()) {
BytesRef ref = new BytesRef(termAtt.length() * 4); // oversize for
// UTF-8
UnicodeUtil.UTF16toUTF8(termAtt.buffer(), 0, termAtt.length(), ref);
query.add(new Term(field, ref));
count++;
}
} finally {
source.close();
}
if (count == 0) {
return null;
}
query.setLowFreqMinimumNumberShouldMatch(lowFreqMinimumShouldMatch);
query.setHighFreqMinimumNumberShouldMatch(highFreqMinimumShouldMatch);
return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_CommonTermsQueryParser.java
|
509 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(NightlyTest.class)
public class MapStableReadStressTest extends StressTestSupport {
public static final int CLIENT_THREAD_COUNT = 5;
public static final int MAP_SIZE = 100 * 1000;
private HazelcastInstance client;
private IMap<Integer, Integer> map;
private StressThread[] stressThreads;
@Before
public void setUp() {
super.setUp();
ClientConfig clientConfig = new ClientConfig();
clientConfig.setRedoOperation(true);
client = HazelcastClient.newHazelcastClient(clientConfig);
map = client.getMap("map");
stressThreads = new StressThread[CLIENT_THREAD_COUNT];
for (int k = 0; k < stressThreads.length; k++) {
stressThreads[k] = new StressThread();
stressThreads[k].start();
}
}
@After
public void tearDown() {
super.tearDown();
if (client != null) {
client.shutdown();
}
}
//@Test
public void testChangingCluster() {
test(true);
}
@Test
public void testFixedCluster() {
test(false);
}
public void test(boolean clusterChangeEnabled) {
setClusterChangeEnabled(clusterChangeEnabled);
fillMap();
startAndWaitForTestCompletion();
joinAll(stressThreads);
}
private void fillMap() {
System.out.println("==================================================================");
System.out.println("Inserting data in map");
System.out.println("==================================================================");
for (int k = 0; k < MAP_SIZE; k++) {
map.put(k, k);
if (k % 10000 == 0) {
System.out.println("Inserted data: "+k);
}
}
System.out.println("==================================================================");
System.out.println("Completed with inserting data in map");
System.out.println("==================================================================");
}
public class StressThread extends TestThread {
@Override
public void doRun() throws Exception {
while (!isStopped()) {
int key = random.nextInt(MAP_SIZE);
int value = map.get(key);
assertEquals("The value for the key was not consistent", key, value);
}
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_stress_MapStableReadStressTest.java
|
3,453 |
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
@Override
public void run() {
try {
indexShard.translog().sync();
} catch (Exception e) {
if (indexShard.state() == IndexShardState.STARTED) {
logger.warn("failed to sync translog", e);
}
}
if (indexShard.state() != IndexShardState.CLOSED) {
flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this);
}
}
});
| 1no label
|
src_main_java_org_elasticsearch_index_gateway_local_LocalIndexShardGateway.java
|
413 |
trackedList.addChangeListener(new OMultiValueChangeListener<Integer, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<Integer, String> event) {
changed.value = true;
}
});
| 0true
|
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedListTest.java
|
90 |
public class ClientEntryListenerDisconnectTest {
private static int adds = 0;
private static int evictionsNull = 0;
private ClientEntryListenerDisconnectTest() {
}
public static void main(String[] args) throws InterruptedException {
Config config = new Config();
config.setGroupConfig(new GroupConfig("test", "test"));
config.getNetworkConfig().setPort(6701);
HazelcastInstance hazelcastInstance = Hazelcast.newHazelcastInstance(config);
IMap<Integer, GenericEvent> map = hazelcastInstance.getMap("test");
map.addIndex("userId", false);
Hazelcast.newHazelcastInstance(config);
ClientConfig clientConfig = new ClientConfig();
clientConfig.getNetworkConfig().addAddress("localhost:6701", "localhost:6702");
clientConfig.setGroupConfig(new GroupConfig("test", "test"));
clientConfig.getNetworkConfig().setConnectionAttemptLimit(100);
clientConfig.getNetworkConfig().setSmartRouting(false);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
IMap<Integer, GenericEvent> mapClient = client.getMap("test");
mapClient.addEntryListener(new EntryAdapter<Integer, GenericEvent>() {
public void entryAdded(EntryEvent<Integer, GenericEvent> event) {
adds++;
}
public void entryEvicted(EntryEvent<Integer, GenericEvent> event) {
if (event.getValue() == null) evictionsNull++;
}
}, true);
HazelcastInstance client2 = HazelcastClient.newHazelcastClient(clientConfig);
IMap<Integer, GenericEvent> mapClient2 = client2.getMap("test");
map.put(1, new GenericEvent(1), 5, TimeUnit.SECONDS);
Thread.sleep(20);
mapClient.remove(1);
hazelcastInstance.getLifecycleService().terminate();
Thread.sleep(15000);
mapClient2.put(2, new GenericEvent(2), 1, TimeUnit.SECONDS);
Thread.sleep(20);
mapClient2.remove(2);
mapClient2.put(3, new GenericEvent(3), 1, TimeUnit.SECONDS);
Thread.sleep(15000);
hazelcastInstance = Hazelcast.newHazelcastInstance(config);
map = hazelcastInstance.getMap("test");
map.put(4, new GenericEvent(4), 1, TimeUnit.SECONDS);
map.put(5, new GenericEvent(5), 5, TimeUnit.SECONDS);
map.put(6, new GenericEvent(6), 1, TimeUnit.SECONDS);
map.put(7, new GenericEvent(7), 1, TimeUnit.SECONDS);
Thread.sleep(10000);
if (evictionsNull != 0) {
System.out.println("ERROR: got " + evictionsNull + " evictions with null values");
} else {
System.out.println("OK");
}
mapClient.put(8, new GenericEvent(8), 1, TimeUnit.SECONDS);
Thread.sleep(5000);
if (adds != 8) {
System.out.println("ERROR: got " + adds + " instead of 8");
} else {
System.out.println("OK");
}
System.exit(0);
}
private static class GenericEvent implements Serializable {
private static final long serialVersionUID = -933111044641052844L;
private int userId;
public GenericEvent(int userId) {
this.setUserId(userId);
}
public int getUserId() {
return userId;
}
public void setUserId(int userId) {
this.userId = userId;
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientEntryListenerDisconnectTest.java
|
122 |
public interface OProfilerHookValue {
public Object getValue();
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_profiler_OAbstractProfiler.java
|
379 |
public interface Locale extends Serializable {
String getLocaleCode();
void setLocaleCode(String localeCode);
public String getFriendlyName();
public void setFriendlyName(String friendlyName);
public void setDefaultFlag(Boolean defaultFlag);
public Boolean getDefaultFlag();
public BroadleafCurrency getDefaultCurrency();
public void setDefaultCurrency(BroadleafCurrency currency);
public Boolean getUseInSearchIndex();
public void setUseInSearchIndex(Boolean useInSearchIndex);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_locale_domain_Locale.java
|
55 |
@SuppressWarnings("serial")
static final class ForEachMappingTask<K,V>
extends BulkTask<K,V,Void> {
final BiAction<? super K, ? super V> action;
ForEachMappingTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
BiAction<? super K,? super V> action) {
super(p, b, i, f, t);
this.action = action;
}
public final void compute() {
final BiAction<? super K, ? super V> action;
if ((action = this.action) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
new ForEachMappingTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
action).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
action.apply(p.key, p.val);
propagateCompletion();
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
26 |
}), new Function<Edge, Vertex>() {
@Override
public Vertex apply(@Nullable Edge edge) {
return edge.getEnd();
}
});
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
71 |
public interface TitanVertex extends TitanElement, Vertex {
/* ---------------------------------------------------------------
* Creation and modification methods
* ---------------------------------------------------------------
*/
/**
* Creates a new edge incident on this vertex.
* <p/>
* Creates and returns a new {@link TitanEdge} of the specified label with this vertex being the outgoing vertex
* and the given vertex being the incoming vertex.
*
* @param label label of the edge to be created
* @param vertex incoming vertex of the edge to be created
* @return new edge
*/
public TitanEdge addEdge(EdgeLabel label, TitanVertex vertex);
/**
* Creates a new edge incident on this vertex.
* <p/>
* Creates and returns a new {@link TitanEdge} of the specified label with this vertex being the outgoing vertex
* and the given vertex being the incoming vertex.
* <br />
* Automatically creates the edge label if it does not exist and automatic creation of types is enabled. Otherwise,
* this method with throw an {@link IllegalArgumentException}.
*
* @param label label of the edge to be created
* @param vertex incoming vertex of the edge to be created
* @return new edge
*/
public TitanEdge addEdge(String label, TitanVertex vertex);
/**
* Creates a new property for this vertex and given key with the specified value.
* <p/>
* Creates and returns a new {@link TitanProperty} for the given key on this vertex with the specified
* object being the value.
*
* @param key key of the property to be created
* @param value value of the property to be created
* @return New property
* @throws IllegalArgumentException if the value does not match the data type of the property key.
*/
public TitanProperty addProperty(PropertyKey key, Object value);
/**
* Creates a new property for this vertex and given key with the specified value.
* <p/>
* Creates and returns a new {@link TitanProperty} for the given key on this vertex with the specified
* object being the value.
* <br />
* Automatically creates the property key if it does not exist and automatic creation of types is enabled. Otherwise,
* this method with throw an {@link IllegalArgumentException}.
*
* @param key key of the property to be created
* @param value value of the property to be created
* @return New property
* @throws IllegalArgumentException if the value does not match the data type of the property key.
*/
public TitanProperty addProperty(String key, Object value);
/* ---------------------------------------------------------------
* Vertex Label
* ---------------------------------------------------------------
*/
/**
* Returns the name of the vertex label for this vertex.
*
* @return
*/
public String getLabel();
/**
* Returns the vertex label of this vertex.
*
* @return
*/
public VertexLabel getVertexLabel();
/* ---------------------------------------------------------------
* Incident TitanRelation Access methods
* ---------------------------------------------------------------
*/
/**
* Starts a new {@link TitanVertexQuery} for this vertex.
* <p/>
* Initializes and returns a new {@link TitanVertexQuery} based on this vertex.
*
* @return New TitanQuery for this vertex
* @see TitanVertexQuery
*/
public TitanVertexQuery<? extends TitanVertexQuery> query();
/**
* Returns an iterable over all properties incident on this vertex.
* <p/>
* There is no guarantee concerning the order in which the properties are returned. All properties incident
* on this vertex are returned irrespective of their key.
*
* @return {@link Iterable} over all properties incident on this vertex
*/
public Iterable<TitanProperty> getProperties();
/**
* Returns an iterable over all properties of the specified property key incident on this vertex.
* <p/>
* There is no guarantee concerning the order in which the properties are returned. All returned properties are
* of the specified key.
*
* @param key {@link PropertyKey} of the returned properties
* @return {@link Iterable} over all properties of the specified key incident on this vertex
*/
public Iterable<TitanProperty> getProperties(PropertyKey key);
/**
* Returns an iterable over all properties of the specified property key incident on this vertex.
* <p/>
* There is no guarantee concerning the order in which the properties are returned. All returned properties are
* of the specified key.
*
* @param key key of the returned properties
* @return {@link Iterable} over all properties of the specified key incident on this vertex
*/
public Iterable<TitanProperty> getProperties(String key);
/**
* Returns an iterable over all edges of the specified edge label in the given direction incident on this vertex.
* <p/>
* There is no guarantee concerning the order in which the edges are returned. All returned edges have the given
* label and the direction of the edge from the perspective of this vertex matches the specified direction.
*
* @param labels label of the returned edges
* @param d Direction of the returned edges with respect to this vertex
* @return {@link Iterable} over all edges with the given label and direction incident on this vertex
*/
public Iterable<TitanEdge> getTitanEdges(Direction d, EdgeLabel... labels);
/**
* Returns an iterable over all edges of the specified edge label in the given direction incident on this vertex.
* <p/>
* There is no guarantee concerning the order in which the edges are returned. All returned edges have the given
* label and the direction of the edge from the perspective of this vertex matches the specified direction.
*
* @param labels label of the returned edges
* @param d Direction of the returned edges with respect to this vertex
* @return {@link Iterable} over all edges with the given label and direction incident on this vertex
*/
public Iterable<Edge> getEdges(Direction d, String... labels);
/**
* Returns an iterable over all edges incident on this vertex.
* <p/>
* There is no guarantee concerning the order in which the edges are returned.
*
* @return {@link Iterable} over all edges incident on this vertex
*/
public Iterable<TitanEdge> getEdges();
/**
* Returns an iterable over all relations incident on this vertex.
* <p/>
* There is no guarantee concerning the order in which the relations are returned. Note, that this
* method potentially returns both {@link TitanEdge} and {@link TitanProperty}.
*
* @return {@link Iterable} over all properties and edges incident on this vertex.
*/
public Iterable<TitanRelation> getRelations();
/**
* Returns the number of edges incident on this vertex.
* <p/>
* Returns the total number of edges irrespective of label and direction.
* Note, that loop edges, i.e. edges with identical in- and outgoing vertex, are counted twice.
*
* @return The number of edges incident on this vertex.
*/
public long getEdgeCount();
/**
* Returns the number of properties incident on this vertex.
* <p/>
* Returns the total number of properties irrespective of key.
*
* @return The number of properties incident on this vertex.
*/
public long getPropertyCount();
/**
* Checks whether this vertex has at least one incident edge.
* In other words, it returns getEdgeCount()>0, but might be implemented more efficiently.
*
* @return true, if this vertex has at least one incident edge, else false
*/
public boolean isConnected();
/**
* Checks whether this entity has been loaded into the current transaction and modified.
*
* @return True, has been loaded and modified, else false.
*/
public boolean isModified();
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanVertex.java
|
70 |
public abstract class AllPartitionsClientRequest extends ClientRequest {
@Override
final void process() throws Exception {
ClientEndpoint endpoint = getEndpoint();
OperationFactory operationFactory = new OperationFactoryWrapper(createOperationFactory(), endpoint.getUuid());
Map<Integer, Object> map = clientEngine.invokeOnAllPartitions(getServiceName(), operationFactory);
Object result = reduce(map);
endpoint.sendResponse(result, getCallId());
}
protected abstract OperationFactory createOperationFactory();
protected abstract Object reduce(Map<Integer, Object> map);
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_AllPartitionsClientRequest.java
|
660 |
constructors[COLLECTION_COMPARE_AND_REMOVE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionCompareAndRemoveOperation();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
|
823 |
@Entity
@Table(name = "BLC_OFFER_ITEM_CRITERIA")
@Inheritance(strategy=InheritanceType.JOINED)
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@AdminPresentationClass(friendlyName = "OfferItemCriteriaImpl_baseOfferItemCriteria")
public class OfferItemCriteriaImpl implements OfferItemCriteria {
public static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "OfferItemCriteriaId")
@GenericGenerator(
name="OfferItemCriteriaId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="OfferItemCriteriaImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.offer.domain.OfferItemCriteriaImpl")
}
)
@Column(name = "OFFER_ITEM_CRITERIA_ID")
@AdminPresentation(friendlyName = "OfferItemCriteriaImpl_Item_Criteria_Id", group = "OfferItemCriteriaImpl_Description", visibility = VisibilityEnum.HIDDEN_ALL)
protected Long id;
@Column(name = "QUANTITY", nullable=false)
@AdminPresentation(friendlyName = "OfferItemCriteriaImpl_Quantity", group = "OfferItemCriteriaImpl_Description", visibility =VisibilityEnum.HIDDEN_ALL)
protected Integer quantity;
@Lob
@Type(type = "org.hibernate.type.StringClobType")
@Column(name = "ORDER_ITEM_MATCH_RULE", length = Integer.MAX_VALUE - 1)
@AdminPresentation(friendlyName = "OfferItemCriteriaImpl_Order_Item_Match_Rule", group = "OfferItemCriteriaImpl_Description", visibility = VisibilityEnum.HIDDEN_ALL)
protected String orderItemMatchRule;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Integer getQuantity() {
return quantity;
}
@Override
public void setQuantity(Integer receiveQuantity) {
this.quantity = receiveQuantity;
}
@Override
public String getMatchRule() {
return orderItemMatchRule;
}
@Override
public void setMatchRule(String matchRule) {
this.orderItemMatchRule = matchRule;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((orderItemMatchRule == null) ? 0 : orderItemMatchRule.hashCode());
result = prime * result + ((quantity == null) ? 0 : quantity.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferItemCriteriaImpl other = (OfferItemCriteriaImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (orderItemMatchRule == null) {
if (other.orderItemMatchRule != null)
return false;
} else if (!orderItemMatchRule.equals(other.orderItemMatchRule))
return false;
if (quantity == null) {
if (other.quantity != null)
return false;
} else if (!quantity.equals(other.quantity))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_OfferItemCriteriaImpl.java
|
1,268 |
public class OSingleFileSegment {
protected OStorageLocalAbstract storage;
protected OFile file;
protected OStorageFileConfiguration config;
private boolean wasSoftlyClosedAtPreviousTime = true;
public OSingleFileSegment(final String iPath, final String iType) throws IOException {
file = OFileFactory.instance().create(iType, OSystemVariableResolver.resolveSystemVariables(iPath), "rw");
}
public OSingleFileSegment(final OStorageLocalAbstract iStorage, final OStorageFileConfiguration iConfig) throws IOException {
this(iStorage, iConfig, iConfig.type);
}
public OSingleFileSegment(final OStorageLocalAbstract iStorage, final OStorageFileConfiguration iConfig, final String iType)
throws IOException {
config = iConfig;
storage = iStorage;
file = OFileFactory.instance().create(iType, iStorage.getVariableParser().resolveVariables(iConfig.path), iStorage.getMode());
file.setMaxSize((int) OFileUtils.getSizeAsNumber(iConfig.maxSize));
file.setIncrementSize((int) OFileUtils.getSizeAsNumber(iConfig.incrementSize));
}
public boolean open() throws IOException {
boolean softClosed = file.open();
if (!softClosed) {
// LAST TIME THE FILE WAS NOT CLOSED IN SOFT WAY
OLogManager.instance().warn(this, "segment file '%s' was not closed correctly last time", OFileUtils.getPath(file.getName()));
wasSoftlyClosedAtPreviousTime = false;
}
return softClosed;
}
public void create(final int iStartSize) throws IOException {
file.create(iStartSize);
}
public void close() throws IOException {
if (file != null)
file.close();
}
public void delete() throws IOException {
if (file != null)
file.delete();
}
public void truncate() throws IOException {
// SHRINK TO 0
file.shrink(0);
}
public boolean exists() {
return file.exists();
}
public long getSize() {
return file.getFileSize();
}
public long getFilledUpTo() {
return file.getFilledUpTo();
}
public OStorageFileConfiguration getConfig() {
return config;
}
public OFile getFile() {
return file;
}
public void synch() throws IOException {
file.synch();
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
file.setSoftlyClosed(softlyClosed);
}
public boolean wasSoftlyClosedAtPreviousTime() {
return wasSoftlyClosedAtPreviousTime;
}
public void rename(String iOldName, String iNewName) {
final String osFileName = file.getName();
if (osFileName.startsWith(iOldName)) {
final File newFile = new File(storage.getStoragePath() + "/" + iNewName
+ osFileName.substring(osFileName.lastIndexOf(iOldName) + iOldName.length()));
boolean renamed = file.renameTo(newFile);
while (!renamed) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
renamed = file.renameTo(newFile);
}
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OSingleFileSegment.java
|
46 |
@Component("blRequestDTOCustomPersistenceHandler")
public class RequestDTOCustomPersistenceHandler extends TimeDTOCustomPersistenceHandler {
private static final Log LOG = LogFactory.getLog(RequestDTOCustomPersistenceHandler.class);
@Override
public Boolean canHandleInspect(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
return RequestDTOImpl.class.getName().equals(ceilingEntityFullyQualifiedClassname);
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_RequestDTOCustomPersistenceHandler.java
|
805 |
public abstract class AtomicLongRequest extends PartitionClientRequest implements Portable, SecureRequest {
protected String name;
protected long delta;
protected AtomicLongRequest() {
}
protected AtomicLongRequest(String name, long delta) {
this.name = name;
this.delta = delta;
}
@Override
protected int getPartition() {
ClientEngine clientEngine = getClientEngine();
Data key = clientEngine.getSerializationService().toData(name);
return clientEngine.getPartitionService().getPartitionId(key);
}
@Override
public String getServiceName() {
return AtomicLongService.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return AtomicLongPortableHook.F_ID;
}
@Override
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
writer.writeLong("d", delta);
}
@Override
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
delta = reader.readLong("d");
}
@Override
public Permission getRequiredPermission() {
return new AtomicLongPermission(name, ActionConstants.ACTION_MODIFY);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_client_AtomicLongRequest.java
|
957 |
public abstract class ClusterInfoRequest<T extends ClusterInfoRequest> extends MasterNodeReadOperationRequest<T> {
private String[] indices = Strings.EMPTY_ARRAY;
private String[] types = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.strict();
@SuppressWarnings("unchecked")
public T indices(String... indices) {
this.indices = indices;
return (T) this;
}
@SuppressWarnings("unchecked")
public T types(String... types) {
this.types = types;
return (T) this;
}
@SuppressWarnings("unchecked")
public T indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return (T) this;
}
public String[] indices() {
return indices;
}
public String[] types() {
return types;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
types = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readLocal(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
out.writeStringArray(types);
indicesOptions.writeIndicesOptions(out);
writeLocal(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_master_info_ClusterInfoRequest.java
|
1,175 |
public class ItemEvent<E> extends EventObject {
private final E item;
private final ItemEventType eventType;
private final Member member;
public ItemEvent(String name, int eventType, E item, Member member) {
this(name, ItemEventType.getByType(eventType), item, member);
}
public ItemEvent(String name, ItemEventType itemEventType, E item, Member member) {
super(name);
this.item = item;
this.eventType = itemEventType;
this.member = member;
}
/**
* Returns the event type.
*
* @return the event type.
*/
public ItemEventType getEventType() {
return eventType;
}
/**
* Returns the item related to event.
*
* @return the item.
*/
public E getItem() {
return item;
}
/**
* Returns the member fired this event.
*
* @return the member fired this event.
*/
public Member getMember() {
return member;
}
@Override
public String toString() {
return "ItemEvent{" +
"event=" + eventType +
", item=" + getItem() +
", member=" + getMember() +
"} ";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_core_ItemEvent.java
|
12 |
enum TextCommandType {
GET((byte) 0),
PARTIAL_GET((byte) 1),
GETS((byte) 2),
SET((byte) 3),
APPEND((byte) 4),
PREPEND((byte) 5),
ADD((byte) 6),
REPLACE((byte) 7),
DELETE((byte) 8),
QUIT((byte) 9),
STATS((byte) 10),
GET_END((byte) 11),
ERROR_CLIENT((byte) 12),
ERROR_SERVER((byte) 13),
UNKNOWN((byte) 14),
VERSION((byte) 15),
TOUCH((byte) 16),
INCREMENT((byte) 17),
DECREMENT((byte) 18),
HTTP_GET((byte) 30),
HTTP_POST((byte) 31),
HTTP_PUT((byte) 32),
HTTP_DELETE((byte) 33),
NO_OP((byte) 98),
STOP((byte) 99);
final byte value;
TextCommandType(byte type) {
value = type;
}
public byte getValue() {
return value;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_TextCommandConstants.java
|
447 |
static final class Fields {
static final XContentBuilderString CPU = new XContentBuilderString("cpu");
static final XContentBuilderString PERCENT = new XContentBuilderString("percent");
static final XContentBuilderString OPEN_FILE_DESCRIPTORS = new XContentBuilderString("open_file_descriptors");
static final XContentBuilderString MIN = new XContentBuilderString("min");
static final XContentBuilderString MAX = new XContentBuilderString("max");
static final XContentBuilderString AVG = new XContentBuilderString("avg");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java
|
355 |
@Deprecated
public class AnnotationsCopyClassTransformer implements BroadleafClassTransformer {
protected SupportLogger logger;
protected String moduleName;
protected Map<String, String> xformTemplates = new HashMap<String, String>();
protected static List<String> transformedMethods = new ArrayList<String>();
public AnnotationsCopyClassTransformer(String moduleName) {
this.moduleName = moduleName;
logger = SupportLogManager.getLogger(moduleName, this.getClass());
}
@Override
public void compileJPAProperties(Properties props, Object key) throws Exception {
// When simply copying properties over for Java class files, JPA properties do not need modification
}
@Override
public byte[] transform(ClassLoader loader, String className, Class<?> classBeingRedefined,
ProtectionDomain protectionDomain, byte[] classfileBuffer) throws IllegalClassFormatException {
String convertedClassName = className.replace('/', '.');
if (xformTemplates.containsKey(convertedClassName)) {
String xformKey = convertedClassName;
String[] xformVals = xformTemplates.get(xformKey).split(",");
logger.lifecycle(LifeCycleEvent.START, String.format("Transform - Copying annotations into [%s] from [%s]", xformKey,
StringUtils.join(xformVals, ",")));
try {
// Load the destination class and defrost it so it is eligible for modifications
ClassPool classPool = ClassPool.getDefault();
CtClass clazz = classPool.makeClass(new ByteArrayInputStream(classfileBuffer), false);
clazz.defrost();
for (String xformVal : xformVals) {
// Load the source class
String trimmed = xformVal.trim();
classPool.appendClassPath(new LoaderClassPath(Class.forName(trimmed).getClassLoader()));
CtClass template = classPool.get(trimmed);
// Copy over all declared annotations from fields from the template class
// Note that we do not copy over fields with the @NonCopiedField annotation
CtField[] fieldsToCopy = template.getDeclaredFields();
for (CtField field : fieldsToCopy) {
if (field.hasAnnotation(NonCopied.class)) {
logger.debug(String.format("Not copying annotation from field [%s]", field.getName()));
} else {
logger.debug(String.format("Copying annotation from field [%s]", field.getName()));
ConstPool constPool = clazz.getClassFile().getConstPool();
CtField fieldFromMainClass = clazz.getField(field.getName());
for (Object o : field.getFieldInfo().getAttributes()) {
if (o instanceof AnnotationsAttribute) {
AnnotationsAttribute templateAnnotations = (AnnotationsAttribute) o;
//have to make a copy of the annotations from the target
AnnotationsAttribute copied = (AnnotationsAttribute) templateAnnotations.copy(constPool, null);
//add all the copied annotations into the target class's field.
for (Object attribute : fieldFromMainClass.getFieldInfo().getAttributes()) {
if (attribute instanceof AnnotationsAttribute) {
for (Annotation annotation : copied.getAnnotations()) {
((AnnotationsAttribute) attribute).addAnnotation(annotation);
}
}
}
}
}
}
}
}
logger.lifecycle(LifeCycleEvent.END, String.format("Transform - Copying annotations into [%s] from [%s]", xformKey,
StringUtils.join(xformVals, ",")));
return clazz.toBytecode();
} catch (Exception e) {
throw new RuntimeException("Unable to transform class", e);
}
}
return null;
}
/**
* This method will do its best to return an implementation type for a given classname. This will allow weaving
* template classes to have initialized values.
*
* We provide default implementations for List, Map, and Set, and will attempt to utilize a default constructor for
* other classes.
*
* If the className contains an '[', we will return null.
*/
protected String getImplementationType(String className) {
if (className.equals("java.util.List")) {
return "java.util.ArrayList";
} else if (className.equals("java.util.Map")) {
return "java.util.HashMap";
} else if (className.equals("java.util.Set")) {
return "java.util.HashSet";
} else if (className.contains("[")) {
return null;
}
return className;
}
protected String methodDescription(CtMethod method) {
return method.getDeclaringClass().getName() + "|" + method.getName() + "|" + method.getSignature();
}
public Map<String, String> getXformTemplates() {
return xformTemplates;
}
public void setXformTemplates(Map<String, String> xformTemplates) {
this.xformTemplates = xformTemplates;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_jpa_copy_AnnotationsCopyClassTransformer.java
|
404 |
snapshotsService.createSnapshot(snapshotRequest, new SnapshotsService.CreateSnapshotListener() {
@Override
public void onResponse() {
if (request.waitForCompletion()) {
snapshotsService.addListener(new SnapshotsService.SnapshotCompletionListener() {
SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
@Override
public void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot) {
if (this.snapshotId.equals(snapshotId)) {
listener.onResponse(new CreateSnapshotResponse(snapshot));
snapshotsService.removeListener(this);
}
}
@Override
public void onSnapshotFailure(SnapshotId snapshotId, Throwable t) {
if (this.snapshotId.equals(snapshotId)) {
listener.onFailure(t);
snapshotsService.removeListener(this);
}
}
});
} else {
listener.onResponse(new CreateSnapshotResponse());
}
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_create_TransportCreateSnapshotAction.java
|
618 |
public class MulticastService implements Runnable {
private static final int DATAGRAM_BUFFER_SIZE = 64 * 1024;
private final ILogger logger;
private final MulticastSocket multicastSocket;
private final DatagramPacket datagramPacketSend;
private final DatagramPacket datagramPacketReceive;
private final Object sendLock = new Object();
private final CountDownLatch stopLatch = new CountDownLatch(1);
private final List<MulticastListener> listeners = new CopyOnWriteArrayList<MulticastListener>();
private final Node node;
private final BufferObjectDataOutput sendOutput;
private volatile boolean running = true;
public MulticastService(Node node, MulticastSocket multicastSocket) throws Exception {
this.node = node;
logger = node.getLogger(MulticastService.class.getName());
Config config = node.getConfig();
this.multicastSocket = multicastSocket;
sendOutput = node.getSerializationService().createObjectDataOutput(1024);
datagramPacketReceive = new DatagramPacket(new byte[DATAGRAM_BUFFER_SIZE], DATAGRAM_BUFFER_SIZE);
final MulticastConfig multicastConfig = config.getNetworkConfig().getJoin().getMulticastConfig();
datagramPacketSend = new DatagramPacket(new byte[0], 0, InetAddress
.getByName(multicastConfig.getMulticastGroup()), multicastConfig.getMulticastPort());
running = true;
}
public void addMulticastListener(MulticastListener multicastListener) {
listeners.add(multicastListener);
}
public void removeMulticastListener(MulticastListener multicastListener) {
listeners.remove(multicastListener);
}
public void stop() {
try {
if (!running && multicastSocket.isClosed()) {
return;
}
try {
multicastSocket.close();
} catch (Throwable ignored) {
}
running = false;
if (!stopLatch.await(5, TimeUnit.SECONDS)) {
logger.warning("Failed to shutdown MulticastService in 5 seconds!");
}
} catch (Throwable e) {
logger.warning(e);
}
}
private void cleanup() {
running = false;
try {
sendOutput.close();
datagramPacketReceive.setData(new byte[0]);
datagramPacketSend.setData(new byte[0]);
} catch (Throwable ignored) {
}
stopLatch.countDown();
}
@SuppressWarnings("WhileLoopSpinsOnField")
@Override
public void run() {
try {
while (running) {
try {
final JoinMessage joinMessage = receive();
if (joinMessage != null) {
for (MulticastListener multicastListener : listeners) {
try {
multicastListener.onMessage(joinMessage);
} catch (Exception e) {
logger.warning(e);
}
}
}
} catch (OutOfMemoryError e) {
OutOfMemoryErrorDispatcher.onOutOfMemory(e);
} catch (Exception e) {
logger.warning(e);
}
}
} finally {
cleanup();
}
}
private JoinMessage receive() {
try {
try {
multicastSocket.receive(datagramPacketReceive);
} catch (IOException ignore) {
return null;
}
try {
final byte[] data = datagramPacketReceive.getData();
final int offset = datagramPacketReceive.getOffset();
final BufferObjectDataInput input = node.getSerializationService().createObjectDataInput(data);
input.position(offset);
final byte packetVersion = input.readByte();
if (packetVersion != Packet.VERSION) {
logger.warning("Received a JoinRequest with a different packet version! This -> "
+ Packet.VERSION + ", Incoming -> " + packetVersion
+ ", Sender -> " + datagramPacketReceive.getAddress());
return null;
}
try {
return input.readObject();
} finally {
input.close();
}
} catch (Exception e) {
if (e instanceof EOFException || e instanceof HazelcastSerializationException) {
logger.warning("Received data format is invalid." +
" (An old version of Hazelcast may be running here.)", e);
} else {
throw e;
}
}
} catch (Exception e) {
logger.warning(e);
}
return null;
}
public void send(JoinMessage joinMessage) {
if (!running) return;
final BufferObjectDataOutput out = sendOutput;
synchronized (sendLock) {
try {
out.writeByte(Packet.VERSION);
out.writeObject(joinMessage);
datagramPacketSend.setData(out.toByteArray());
multicastSocket.send(datagramPacketSend);
out.clear();
} catch (IOException e) {
logger.warning("You probably have too long Hazelcast configuration!", e);
}
}
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_cluster_MulticastService.java
|
55 |
public class PaxosClusterMemberEvents implements ClusterMemberEvents, Lifecycle
{
private Cluster cluster;
private AtomicBroadcast atomicBroadcast;
private StringLogger logger;
protected AtomicBroadcastSerializer serializer;
protected Iterable<ClusterMemberListener> listeners = Listeners.newListeners();
private ClusterMembersSnapshot clusterMembersSnapshot;
private ClusterListener.Adapter clusterListener;
private Snapshot snapshot;
private AtomicBroadcastListener atomicBroadcastListener;
private ExecutorService executor;
private final Predicate<ClusterMembersSnapshot> snapshotValidator;
private final Heartbeat heartbeat;
private HeartbeatListenerImpl heartbeatListener;
private ObjectInputStreamFactory lenientObjectInputStream;
private ObjectOutputStreamFactory lenientObjectOutputStream;
public PaxosClusterMemberEvents( final Snapshot snapshot, Cluster cluster, Heartbeat heartbeat,
AtomicBroadcast atomicBroadcast, Logging logging,
Predicate<ClusterMembersSnapshot> validator,
Function2<Iterable<MemberIsAvailable>, MemberIsAvailable,
Iterable<MemberIsAvailable>> snapshotFilter,
ObjectInputStreamFactory lenientObjectInputStream,
ObjectOutputStreamFactory lenientObjectOutputStream)
{
this.snapshot = snapshot;
this.cluster = cluster;
this.heartbeat = heartbeat;
this.atomicBroadcast = atomicBroadcast;
this.lenientObjectInputStream = lenientObjectInputStream;
this.lenientObjectOutputStream = lenientObjectOutputStream;
this.logger = logging.getMessagesLog( getClass() );
clusterListener = new ClusterListenerImpl();
atomicBroadcastListener = new AtomicBroadcastListenerImpl();
this.snapshotValidator = validator;
clusterMembersSnapshot = new ClusterMembersSnapshot( snapshotFilter );
}
@Override
public void addClusterMemberListener( ClusterMemberListener listener )
{
listeners = Listeners.addListener( listener, listeners );
}
@Override
public void removeClusterMemberListener( ClusterMemberListener listener )
{
listeners = Listeners.removeListener( listener, listeners );
}
@Override
public void init()
throws Throwable
{
serializer = new AtomicBroadcastSerializer( lenientObjectInputStream, lenientObjectOutputStream );
cluster.addClusterListener( clusterListener );
atomicBroadcast.addAtomicBroadcastListener( atomicBroadcastListener );
snapshot.setSnapshotProvider( new HighAvailabilitySnapshotProvider() );
heartbeat.addHeartbeatListener( heartbeatListener = new HeartbeatListenerImpl() );
executor = Executors.newSingleThreadExecutor();
}
@Override
public void start()
throws Throwable
{
}
@Override
public void stop()
throws Throwable
{
}
@Override
public void shutdown()
throws Throwable
{
snapshot.setSnapshotProvider( null );
if ( executor != null )
{
executor.shutdown();
executor = null;
}
cluster.removeClusterListener( clusterListener );
atomicBroadcast.removeAtomicBroadcastListener( atomicBroadcastListener );
heartbeat.removeHeartbeatListener( heartbeatListener );
}
private class HighAvailabilitySnapshotProvider implements SnapshotProvider
{
@Override
public void getState( ObjectOutputStream output ) throws IOException
{
output.writeObject( clusterMembersSnapshot );
}
@Override
public void setState( ObjectInputStream input ) throws IOException, ClassNotFoundException
{
clusterMembersSnapshot = ClusterMembersSnapshot.class.cast(input.readObject());
if ( !snapshotValidator.accept( clusterMembersSnapshot ) )
{
executor.submit( new Runnable()
{
@Override
public void run()
{
cluster.leave();
}
} );
}
else
{
// Send current availability events to listeners
Listeners.notifyListeners( listeners, executor, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailableMembers() )
{
listener.memberIsAvailable( memberIsAvailable.getRole(),
memberIsAvailable.getInstanceId(), memberIsAvailable.getRoleUri() );
}
}
} );
}
}
}
public static class UniqueRoleFilter
implements Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>>
{
private final String role;
private final Set<String> roles = new HashSet<String>();
public UniqueRoleFilter( String role )
{
this.role = role;
}
@Override
public Iterable<MemberIsAvailable> apply( Iterable<MemberIsAvailable> previousSnapshot, final MemberIsAvailable newMessage )
{
return Iterables.append( newMessage, Iterables.filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return not( in( newMessage.getInstanceId() ) ).accept( item.getInstanceId() );
}
}, previousSnapshot));
}
}
private static class UniqueInstanceFilter implements Predicate<MemberIsAvailable>
{
private final Set<InstanceId> roles = new HashSet<InstanceId>();
@Override
public boolean accept( MemberIsAvailable item )
{
return roles.add( item.getInstanceId() );
}
}
public static class ClusterMembersSnapshot
implements Serializable
{
private final
Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>> nextSnapshotFunction;
private Iterable<MemberIsAvailable> availableMembers = new ArrayList<MemberIsAvailable>();
public ClusterMembersSnapshot( Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>> nextSnapshotFunction )
{
this.nextSnapshotFunction = nextSnapshotFunction;
}
public void availableMember( MemberIsAvailable memberIsAvailable )
{
availableMembers = toList( nextSnapshotFunction.apply( availableMembers, memberIsAvailable ) );
}
public void unavailableMember( final InstanceId member )
{
availableMembers = toList( filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return !item.getInstanceId().equals( member );
}
}, availableMembers ) );
}
public void unavailableMember( final URI member, final String role )
{
availableMembers = toList( filter(new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return !(item.getClusterUri().equals( member ) && item.getRole().equals( role ));
}
}, availableMembers));
}
public Iterable<MemberIsAvailable> getCurrentAvailableMembers()
{
return availableMembers;
}
public Iterable<MemberIsAvailable> getCurrentAvailable( final InstanceId memberId )
{
return toList( Iterables.filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return item.getInstanceId().equals( memberId );
}
}, availableMembers) );
}
}
private class ClusterListenerImpl extends ClusterListener.Adapter
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
// Catch up with elections
for ( Map.Entry<String, InstanceId> memberRoles : clusterConfiguration.getRoles().entrySet() )
{
elected( memberRoles.getKey(), memberRoles.getValue(),
clusterConfiguration.getUriForId( memberRoles.getValue() ) );
}
}
@Override
public void elected( String role, final InstanceId instanceId, final URI electedMember )
{
if ( role.equals( ClusterConfiguration.COORDINATOR ) )
{
// Use the cluster coordinator as master for HA
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.coordinatorIsElected( instanceId );
}
} );
}
}
@Override
public void leftCluster( final InstanceId member )
{
// Notify unavailability of members
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailable( member ) )
{
listener.memberIsUnavailable( memberIsAvailable.getRole(), member );
}
}
} );
clusterMembersSnapshot.unavailableMember( member );
}
}
private class AtomicBroadcastListenerImpl implements AtomicBroadcastListener
{
@Override
public void receive( Payload payload )
{
try
{
final Object value = serializer.receive( payload );
if ( value instanceof MemberIsAvailable )
{
final MemberIsAvailable memberIsAvailable = (MemberIsAvailable) value;
// Update snapshot
clusterMembersSnapshot.availableMember( memberIsAvailable );
logger.info("Snapshot:"+clusterMembersSnapshot.getCurrentAvailableMembers());
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsAvailable( memberIsAvailable.getRole(),
memberIsAvailable.getInstanceId(), memberIsAvailable.getRoleUri() );
}
} );
}
else if ( value instanceof MemberIsUnavailable )
{
final MemberIsUnavailable memberIsUnavailable = (MemberIsUnavailable) value;
// Update snapshot
clusterMembersSnapshot.unavailableMember( memberIsUnavailable.getClusterUri(),
memberIsUnavailable.getRole() );
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsUnavailable( memberIsUnavailable.getRole(),
memberIsUnavailable.getInstanceId() );
}
} );
}
}
catch ( Throwable t )
{
logger.error( "Could not handle cluster member available message", t );
}
}
}
private class HeartbeatListenerImpl implements HeartbeatListener
{
@Override
public void failed( final InstanceId server )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsFailed( server );
}
} );
}
@Override
public void alive( final InstanceId server )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsAlive( server );
}
} );
}
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_member_paxos_PaxosClusterMemberEvents.java
|
779 |
public class CollectionTxnAddOperation extends CollectionBackupAwareOperation {
private long itemId;
private Data value;
public CollectionTxnAddOperation() {
}
public CollectionTxnAddOperation(String name, long itemId, Data value) {
super(name);
this.itemId = itemId;
this.value = value;
}
@Override
public boolean shouldBackup() {
return false;
}
@Override
public Operation getBackupOperation() {
return new CollectionTxnAddBackupOperation(name, itemId, value);
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_TXN_ADD;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
getOrCreateContainer().commitAdd(itemId, value);
response = true;
}
@Override
public void afterRun() throws Exception {
publishEvent(ItemEventType.ADDED, value);
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(itemId);
value.writeData(out);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
itemId = in.readLong();
value = new Data();
value.readData(in);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_txn_CollectionTxnAddOperation.java
|
1,045 |
public class OCommandExecutorSQLInsert extends OCommandExecutorSQLSetAware implements OCommandDistributedReplicateRequest {
public static final String KEYWORD_INSERT = "INSERT";
private static final String KEYWORD_VALUES = "VALUES";
private String className = null;
private String clusterName = null;
private String indexName = null;
private List<Map<String, Object>> newRecords;
@SuppressWarnings("unchecked")
public OCommandExecutorSQLInsert parse(final OCommandRequest iRequest) {
final ODatabaseRecord database = getDatabase();
init((OCommandRequestText) iRequest);
className = null;
newRecords = null;
content = null;
parserRequiredKeyword("INSERT");
parserRequiredKeyword("INTO");
String subjectName = parserRequiredWord(true, "Invalid subject name. Expected cluster, class or index");
if (subjectName.startsWith(OCommandExecutorSQLAbstract.CLUSTER_PREFIX))
// CLUSTER
clusterName = subjectName.substring(OCommandExecutorSQLAbstract.CLUSTER_PREFIX.length());
else if (subjectName.startsWith(OCommandExecutorSQLAbstract.INDEX_PREFIX))
// INDEX
indexName = subjectName.substring(OCommandExecutorSQLAbstract.INDEX_PREFIX.length());
else {
// CLASS
if (subjectName.startsWith(OCommandExecutorSQLAbstract.CLASS_PREFIX))
subjectName = subjectName.substring(OCommandExecutorSQLAbstract.CLASS_PREFIX.length());
final OClass cls = database.getMetadata().getSchema().getClass(subjectName);
if (cls == null)
throwParsingException("Class " + subjectName + " not found in database");
className = cls.getName();
}
parserSkipWhiteSpaces();
if (parserIsEnded())
throwSyntaxErrorException("Set of fields is missed. Example: (name, surname) or SET name = 'Bill'");
final String temp = parseOptionalWord(true);
if (temp.equals("CLUSTER")) {
clusterName = parserRequiredWord(false);
parserSkipWhiteSpaces();
if (parserIsEnded())
throwSyntaxErrorException("Set of fields is missed. Example: (name, surname) or SET name = 'Bill'");
} else
parserGoBack();
newRecords = new ArrayList<Map<String, Object>>();
if (parserGetCurrentChar() == '(') {
parseValues();
} else {
parserNextWord(true, " ,\r\n");
if (parserGetLastWord().equals(KEYWORD_CONTENT)) {
newRecords = null;
parseContent();
} else if (parserGetLastWord().equals(KEYWORD_SET)) {
final LinkedHashMap<String, Object> fields = new LinkedHashMap<String, Object>();
newRecords.add(fields);
parseSetFields(fields);
}
}
return this;
}
protected void parseValues() {
final int beginFields = parserGetCurrentPosition();
final int endFields = parserText.indexOf(')', beginFields + 1);
if (endFields == -1)
throwSyntaxErrorException("Missed closed brace");
final ArrayList<String> fieldNames = new ArrayList<String>();
parserSetCurrentPosition(OStringSerializerHelper.getParameters(parserText, beginFields, endFields, fieldNames));
if (fieldNames.size() == 0)
throwSyntaxErrorException("Set of fields is empty. Example: (name, surname)");
// REMOVE QUOTATION MARKS IF ANY
for (int i = 0; i < fieldNames.size(); ++i)
fieldNames.set(i, OStringSerializerHelper.removeQuotationMarks(fieldNames.get(i)));
parserRequiredKeyword(KEYWORD_VALUES);
parserSkipWhiteSpaces();
if (parserIsEnded() || parserText.charAt(parserGetCurrentPosition()) != '(') {
throwParsingException("Set of values is missed. Example: ('Bill', 'Stuart', 300)");
}
int blockStart = parserGetCurrentPosition();
int blockEnd = parserGetCurrentPosition();
final List<String> records = OStringSerializerHelper.smartSplit(parserText, new char[] { ',' }, blockStart, -1, true, true,
false);
for (String record : records) {
final List<String> values = new ArrayList<String>();
blockEnd += OStringSerializerHelper.getParameters(record, 0, -1, values);
if (blockEnd == -1)
throw new OCommandSQLParsingException("Missed closed brace. Use " + getSyntax(), parserText, blockStart);
if (values.isEmpty())
throw new OCommandSQLParsingException("Set of values is empty. Example: ('Bill', 'Stuart', 300). Use " + getSyntax(),
parserText, blockStart);
if (values.size() != fieldNames.size())
throw new OCommandSQLParsingException("Fields not match with values", parserText, blockStart);
// TRANSFORM FIELD VALUES
final Map<String, Object> fields = new LinkedHashMap<String, Object>();
for (int i = 0; i < values.size(); ++i)
fields.put(fieldNames.get(i), OSQLHelper.parseValue(this, OStringSerializerHelper.decode(values.get(i).trim()), context));
newRecords.add(fields);
blockStart = blockEnd;
}
}
/**
* Execute the INSERT and return the ODocument object created.
*/
public Object execute(final Map<Object, Object> iArgs) {
if (newRecords == null && content == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
final OCommandParameters commandParameters = new OCommandParameters(iArgs);
if (indexName != null) {
if (newRecords == null)
throw new OCommandExecutionException("No key/value found");
final OIndex<?> index = getDatabase().getMetadata().getIndexManager().getIndex(indexName);
if (index == null)
throw new OCommandExecutionException("Target index '" + indexName + "' not found");
// BIND VALUES
Map<String, Object> result = null;
for (Map<String, Object> candidate : newRecords) {
index.put(getIndexKeyValue(commandParameters, candidate), getIndexValue(commandParameters, candidate));
result = candidate;
}
// RETURN LAST ENTRY
return new ODocument(result);
} else {
// CREATE NEW DOCUMENTS
final List<ODocument> docs = new ArrayList<ODocument>();
if (newRecords != null) {
for (Map<String, Object> candidate : newRecords) {
final ODocument doc = className != null ? new ODocument(className) : new ODocument();
OSQLHelper.bindParameters(doc, candidate, commandParameters, context);
if (clusterName != null) {
doc.save(clusterName);
} else {
doc.save();
}
docs.add(doc);
}
if (docs.size() == 1)
return docs.get(0);
else
return docs;
} else if (content != null) {
final ODocument doc = className != null ? new ODocument(className) : new ODocument();
doc.merge(content, true, false);
doc.save();
return doc;
}
}
return null;
}
private Object getIndexKeyValue(OCommandParameters commandParameters, Map<String, Object> candidate) {
final Object parsedKey = candidate.get(KEYWORD_KEY);
if (parsedKey instanceof OSQLFilterItemField) {
final OSQLFilterItemField f = (OSQLFilterItemField) parsedKey;
if (f.getRoot().equals("?"))
// POSITIONAL PARAMETER
return commandParameters.getNext();
else if (f.getRoot().startsWith(":"))
// NAMED PARAMETER
return commandParameters.getByName(f.getRoot().substring(1));
}
return parsedKey;
}
private OIdentifiable getIndexValue(OCommandParameters commandParameters, Map<String, Object> candidate) {
final Object parsedRid = candidate.get(KEYWORD_RID);
if (parsedRid instanceof OSQLFilterItemField) {
final OSQLFilterItemField f = (OSQLFilterItemField) parsedRid;
if (f.getRoot().equals("?"))
// POSITIONAL PARAMETER
return (OIdentifiable) commandParameters.getNext();
else if (f.getRoot().startsWith(":"))
// NAMED PARAMETER
return (OIdentifiable) commandParameters.getByName(f.getRoot().substring(1));
}
return (OIdentifiable) parsedRid;
}
public boolean isReplicated() {
return indexName != null;
}
@Override
public String getSyntax() {
return "INSERT INTO [class:]<class>|cluster:<cluster>|index:<index> [(<field>[,]*) VALUES (<expression>[,]*)[,]*]|[SET <field> = <expression>|<sub-command>[,]*]|CONTENT {<JSON>}";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLInsert.java
|
1,141 |
static class StatsResult {
final String name;
final long took;
StatsResult(String name, long took) {
this.name = name;
this.took = took;
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_search_aggregations_TermsAggregationSearchBenchmark.java
|
1,395 |
public class ProjectPhasedUnit extends IdePhasedUnit {
private IFolder sourceFolderResource;
private WeakHashMap<EditedPhasedUnit, String> workingCopies = new WeakHashMap<EditedPhasedUnit, String>();
public ProjectPhasedUnit(ResourceVirtualFile unitFile, ResourceVirtualFile srcDir,
CompilationUnit cu, Package p, ModuleManager moduleManager,
TypeChecker typeChecker, List<CommonToken> tokenStream) {
super(unitFile, srcDir, cu, p, moduleManager, typeChecker, tokenStream);
sourceFolderResource = (IFolder) srcDir.getResource();
srcDir.getResource().getProject();
}
public ProjectPhasedUnit(PhasedUnit other) {
super(other);
}
public IFile getSourceFileResource() {
return (IFile) ((ResourceVirtualFile) getUnitFile()).getResource();
}
public IFolder getSourceFolderResource() {
return sourceFolderResource;
}
public IProject getProjectResource() {
return sourceFolderResource.getProject();
}
@Override
protected Unit newUnit() {
return new ProjectSourceFile(this);
}
public void addWorkingCopy(EditedPhasedUnit workingCopy) {
synchronized (workingCopies) {
String fullPath = workingCopy.getUnit() != null ? workingCopy.getUnit().getFullPath() : null;
Iterator<String> itr = workingCopies.values().iterator();
while (itr.hasNext()) {
String workingCopyPath = itr.next();
if (workingCopyPath.equals(fullPath)) {
itr.remove();
}
}
workingCopies.put(workingCopy, fullPath);
}
}
public Iterator<EditedPhasedUnit> getWorkingCopies() {
return workingCopies.keySet().iterator();
}
public void install() {
TypeChecker typechecker = getTypeChecker();
if (typechecker == null) {
return;
}
PhasedUnits phasedUnits = typechecker.getPhasedUnits();
ProjectPhasedUnit oldPhasedUnit = (ProjectPhasedUnit) phasedUnits.getPhasedUnitFromRelativePath(getPathRelativeToSrcDir());
if (oldPhasedUnit == this) {
return; // Nothing to do : the PhasedUnit is already installed in the typechecker
}
if (oldPhasedUnit != null) {
getUnit().getDependentsOf().addAll(oldPhasedUnit.getUnit().getDependentsOf());
Iterator<EditedPhasedUnit> workingCopies = oldPhasedUnit.getWorkingCopies();
while (workingCopies.hasNext()) {
addWorkingCopy(workingCopies.next());
}
oldPhasedUnit.remove();
// pour les ICrossProjectReference, le but c'est d'enlever ce qu'il y avait (binaires ou source)
// Ensuite pour les éléments nouveaux , dans le cas binaire il seront normalement trouvés si le
// classpath est normalement remis à jour, et pour les éléments source, on parcourt tous les projets
//
}
phasedUnits.addPhasedUnit(getUnitFile(), this);
JDTModule module = (JDTModule) getPackage().getModule();
for (JDTModule moduleInReferencingProject : module.getModuleInReferencingProjects()) {
moduleInReferencingProject.addedOriginalUnit(getPathRelativeToSrcDir());
}
// Pour tous les projets dépendants, on appelle addPhasedUnit () sur le module correspondant, qui doit être un module source externe
// Attention : penser à ajouter une étape de retypecheck des modules dépendants à la compil incrémentale. De toute manière ceux qui sont déjà faits ne seront pas refaits.
}
public void remove() {
TypeChecker typechecker = getTypeChecker();
if (typechecker == null) {
return;
}
PhasedUnits phasedUnits = typechecker.getPhasedUnits();
phasedUnits.removePhasedUnitForRelativePath(getPathRelativeToSrcDir()); // remove also the ProjectSourceFile (unit) from the Package
JDTModule module = (JDTModule) getPackage().getModule();
for (JDTModule moduleInReferencingProject : module.getModuleInReferencingProjects()) {
moduleInReferencingProject.removedOriginalUnit(getPathRelativeToSrcDir());
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_typechecker_ProjectPhasedUnit.java
|
167 |
return executeRead(new Callable<Iterable<RawQuery.Result<String>>>() {
@Override
public Iterable<RawQuery.Result<String>> call() throws Exception {
return indexTx.query(query);
}
@Override
public String toString() {
return "RawQuery";
}
});
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_BackendTransaction.java
|
4,133 |
public class IndexDynamicSettingsModule extends AbstractModule {
private final DynamicSettings indexDynamicSettings;
public IndexDynamicSettingsModule() {
indexDynamicSettings = new DynamicSettings();
indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE);
indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*");
indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "*");
indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "*");
indexDynamicSettings.addDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE);
indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION);
indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION);
indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION);
indexDynamicSettings.addDynamicSetting(FsTranslog.INDEX_TRANSLOG_FS_TYPE);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_READ_ONLY);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA);
indexDynamicSettings.addDynamicSetting(IndexShardGatewayService.INDEX_GATEWAY_SNAPSHOT_INTERVAL, Validator.TIME);
indexDynamicSettings.addDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE);
indexDynamicSettings.addDynamicSetting(InternalIndexShard.INDEX_REFRESH_INTERVAL, Validator.TIME);
indexDynamicSettings.addDynamicSetting(LocalGatewayAllocator.INDEX_RECOVERY_INITIAL_SHARDS);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_SIZE, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_SIZE, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, Validator.POSITIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_COMPOUND_FORMAT);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_DOCS, Validator.POSITIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, Validator.POSITIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_COMPOUND_FORMAT);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_INDEX_CONCURRENCY, Validator.NON_NEGATIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_COMPOUND_ON_FLUSH, Validator.BOOLEAN);
indexDynamicSettings.addDynamicSetting(CodecService.INDEX_CODEC_BLOOM_LOAD, Validator.BOOLEAN);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_GC_DELETES, Validator.TIME);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_CODEC);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_FAIL_ON_MERGE_FAILURE);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_REFORMAT);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_LEVEL);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_REFORMAT);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_LEVEL);
indexDynamicSettings.addDynamicSetting(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, Validator.DOUBLE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_INTERVAL, Validator.TIME);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, Validator.INTEGER);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, Validator.TIME);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH);
indexDynamicSettings.addDynamicSetting(InternalIndicesWarmer.INDEX_WARMER_ENABLED);
}
public void addDynamicSettings(String... settings) {
indexDynamicSettings.addDynamicSettings(settings);
}
public void addDynamicSetting(String setting, Validator validator) {
indexDynamicSettings.addDynamicSetting(setting, validator);
}
@Override
protected void configure() {
bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_settings_IndexDynamicSettingsModule.java
|
1,213 |
public class PaymentLogEventType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, PaymentLogEventType> TYPES = new LinkedHashMap<String, PaymentLogEventType>();
public static final PaymentLogEventType START = new PaymentLogEventType("START", "Start");
public static final PaymentLogEventType FINISHED = new PaymentLogEventType("FINISHED", "Finished");
public static PaymentLogEventType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public PaymentLogEventType() {
//do nothing
}
public PaymentLogEventType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PaymentLogEventType other = (PaymentLogEventType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_type_PaymentLogEventType.java
|
164 |
{
int doneRecordCount = 0;
@Override
public boolean accept( LogEntry item )
{
//System.out.println(item);
if( item instanceof LogEntry.Done)
{
doneRecordCount++;
// Accept everything except the second done record we find
if( doneRecordCount == 2)
{
brokenTxIdentifier.set( item.getIdentifier() );
return false;
}
}
// Not a done record, not our concern
return true;
}
};
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_TestPartialTransactionCopier.java
|
496 |
@Repository("blSiteDao")
public class SiteDaoImpl implements SiteDao {
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Override
public Site retrieve(Long id) {
return em.find(SiteImpl.class, id);
}
@Override
public List<Site> readAllActiveSites() {
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<Site> criteria = builder.createQuery(Site.class);
Root<SiteImpl> site = criteria.from(SiteImpl.class);
criteria.select(site);
criteria.where(
builder.and(
builder.or(builder.isNull(site.get("archiveStatus").get("archived").as(String.class)),
builder.notEqual(site.get("archiveStatus").get("archived").as(Character.class), 'Y')),
builder.or(builder.isNull(site.get("deactivated").as(Boolean.class)),
builder.notEqual(site.get("deactivated").as(Boolean.class), true))
)
);
TypedQuery<Site> query = em.createQuery(criteria);
query.setHint(QueryHints.HINT_CACHEABLE, true);
return query.getResultList();
}
@Override
public Site retrieveSiteByDomainOrDomainPrefix(String domain, String domainPrefix) {
if (domain == null) {
return null;
}
List<String> siteIdentifiers = new ArrayList<String>();
siteIdentifiers.add(domain);
siteIdentifiers.add(domainPrefix);
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<Site> criteria = builder.createQuery(Site.class);
Root<SiteImpl> site = criteria.from(SiteImpl.class);
criteria.select(site);
criteria.where(site.get("siteIdentifierValue").as(String.class).in(siteIdentifiers));
TypedQuery<Site> query = em.createQuery(criteria);
query.setHint(QueryHints.HINT_CACHEABLE, true);
List<Site> results = query.getResultList();
for (Site currentSite : results) {
if (SiteResolutionType.DOMAIN.equals(currentSite.getSiteResolutionType())) {
if (domain.equals(currentSite.getSiteIdentifierValue())) {
return currentSite;
}
}
if (SiteResolutionType.DOMAIN_PREFIX.equals(currentSite.getSiteResolutionType())) {
if (domainPrefix.equals(currentSite.getSiteIdentifierValue())) {
return currentSite;
}
}
// We need to forcefully load this collection.
currentSite.getCatalogs().size();
}
return null;
}
@Override
public Site save(Site site) {
return em.merge(site);
}
@Override
public Site retrieveDefaultSite() {
return null;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_site_dao_SiteDaoImpl.java
|
5,840 |
public class DefaultSearchContext extends SearchContext {
private final long id;
private final ShardSearchRequest request;
private final SearchShardTarget shardTarget;
private SearchType searchType;
private final Engine.Searcher engineSearcher;
private final ScriptService scriptService;
private final CacheRecycler cacheRecycler;
private final PageCacheRecycler pageCacheRecycler;
private final IndexShard indexShard;
private final IndexService indexService;
private final ContextIndexSearcher searcher;
private final DfsSearchResult dfsResult;
private final QuerySearchResult queryResult;
private final FetchSearchResult fetchResult;
// lazy initialized only if needed
private ScanContext scanContext;
private float queryBoost = 1.0f;
// timeout in millis
private long timeoutInMillis = -1;
private List<String> groupStats;
private Scroll scroll;
private boolean explain;
private boolean version = false; // by default, we don't return versions
private List<String> fieldNames;
private FieldDataFieldsContext fieldDataFields;
private ScriptFieldsContext scriptFields;
private PartialFieldsContext partialFields;
private FetchSourceContext fetchSourceContext;
private int from = -1;
private int size = -1;
private Sort sort;
private Float minimumScore;
private boolean trackScores = false; // when sorting, track scores as well...
private ParsedQuery originalQuery;
private Query query;
private ParsedFilter postFilter;
private Filter aliasFilter;
private int[] docIdsToLoad;
private int docsIdsToLoadFrom;
private int docsIdsToLoadSize;
private SearchContextAggregations aggregations;
private SearchContextFacets facets;
private SearchContextHighlight highlight;
private SuggestionSearchContext suggest;
private List<RescoreSearchContext> rescore;
private SearchLookup searchLookup;
private boolean queryRewritten;
private volatile long keepAlive;
private volatile long lastAccessTime;
private List<Releasable> clearables = null;
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard,
ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
this.id = id;
this.request = request;
this.searchType = request.searchType();
this.shardTarget = shardTarget;
this.engineSearcher = engineSearcher;
this.scriptService = scriptService;
this.cacheRecycler = cacheRecycler;
this.pageCacheRecycler = pageCacheRecycler;
this.dfsResult = new DfsSearchResult(id, shardTarget);
this.queryResult = new QuerySearchResult(id, shardTarget);
this.fetchResult = new FetchSearchResult(id, shardTarget);
this.indexShard = indexShard;
this.indexService = indexService;
this.searcher = new ContextIndexSearcher(this, engineSearcher);
// initialize the filtering alias based on the provided filters
aliasFilter = indexService.aliasesService().aliasFilter(request.filteringAliases());
}
@Override
public boolean release() throws ElasticsearchException {
if (scanContext != null) {
scanContext.clear();
}
// clear and scope phase we have
searcher.release();
engineSearcher.release();
return true;
}
public boolean clearAndRelease() {
clearReleasables();
return release();
}
/**
* Should be called before executing the main query and after all other parameters have been set.
*/
public void preProcess() {
if (query() == null) {
parsedQuery(ParsedQuery.parsedMatchAllQuery());
}
if (queryBoost() != 1.0f) {
parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new BoostScoreFunction(queryBoost)), parsedQuery()));
}
Filter searchFilter = searchFilter(types());
if (searchFilter != null) {
if (Queries.isConstantMatchAllQuery(query())) {
Query q = new XConstantScoreQuery(searchFilter);
q.setBoost(query().getBoost());
parsedQuery(new ParsedQuery(q, parsedQuery()));
} else {
parsedQuery(new ParsedQuery(new XFilteredQuery(query(), searchFilter), parsedQuery()));
}
}
}
public Filter searchFilter(String[] types) {
Filter filter = mapperService().searchFilter(types);
if (filter == null) {
return aliasFilter;
} else {
filter = filterCache().cache(filter);
if (aliasFilter != null) {
return new AndFilter(ImmutableList.of(filter, aliasFilter));
}
return filter;
}
}
public long id() {
return this.id;
}
public String source() {
return engineSearcher.source();
}
public ShardSearchRequest request() {
return this.request;
}
public SearchType searchType() {
return this.searchType;
}
public SearchContext searchType(SearchType searchType) {
this.searchType = searchType;
return this;
}
public SearchShardTarget shardTarget() {
return this.shardTarget;
}
public int numberOfShards() {
return request.numberOfShards();
}
public boolean hasTypes() {
return request.types() != null && request.types().length > 0;
}
public String[] types() {
return request.types();
}
public float queryBoost() {
return queryBoost;
}
public SearchContext queryBoost(float queryBoost) {
this.queryBoost = queryBoost;
return this;
}
public long nowInMillis() {
return request.nowInMillis();
}
public Scroll scroll() {
return this.scroll;
}
public SearchContext scroll(Scroll scroll) {
this.scroll = scroll;
return this;
}
@Override
public SearchContextAggregations aggregations() {
return aggregations;
}
@Override
public SearchContext aggregations(SearchContextAggregations aggregations) {
this.aggregations = aggregations;
return this;
}
public SearchContextFacets facets() {
return facets;
}
public SearchContext facets(SearchContextFacets facets) {
this.facets = facets;
return this;
}
public SearchContextHighlight highlight() {
return highlight;
}
public void highlight(SearchContextHighlight highlight) {
this.highlight = highlight;
}
public SuggestionSearchContext suggest() {
return suggest;
}
public void suggest(SuggestionSearchContext suggest) {
this.suggest = suggest;
}
public List<RescoreSearchContext> rescore() {
if (rescore == null) {
return Collections.emptyList();
}
return rescore;
}
public void addRescore(RescoreSearchContext rescore) {
if (this.rescore == null) {
this.rescore = new ArrayList<RescoreSearchContext>();
}
this.rescore.add(rescore);
}
public boolean hasFieldDataFields() {
return fieldDataFields != null;
}
public FieldDataFieldsContext fieldDataFields() {
if (fieldDataFields == null) {
fieldDataFields = new FieldDataFieldsContext();
}
return this.fieldDataFields;
}
public boolean hasScriptFields() {
return scriptFields != null;
}
public ScriptFieldsContext scriptFields() {
if (scriptFields == null) {
scriptFields = new ScriptFieldsContext();
}
return this.scriptFields;
}
public boolean hasPartialFields() {
return partialFields != null;
}
public PartialFieldsContext partialFields() {
if (partialFields == null) {
partialFields = new PartialFieldsContext();
}
return this.partialFields;
}
/**
* A shortcut function to see whether there is a fetchSourceContext and it says the source is requested.
*
* @return
*/
public boolean sourceRequested() {
return fetchSourceContext != null && fetchSourceContext.fetchSource();
}
public boolean hasFetchSourceContext() {
return fetchSourceContext != null;
}
public FetchSourceContext fetchSourceContext() {
return this.fetchSourceContext;
}
public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
this.fetchSourceContext = fetchSourceContext;
return this;
}
public ContextIndexSearcher searcher() {
return this.searcher;
}
public IndexShard indexShard() {
return this.indexShard;
}
public MapperService mapperService() {
return indexService.mapperService();
}
public AnalysisService analysisService() {
return indexService.analysisService();
}
public IndexQueryParserService queryParserService() {
return indexService.queryParserService();
}
public SimilarityService similarityService() {
return indexService.similarityService();
}
public ScriptService scriptService() {
return scriptService;
}
public CacheRecycler cacheRecycler() {
return cacheRecycler;
}
public PageCacheRecycler pageCacheRecycler() {
return pageCacheRecycler;
}
public FilterCache filterCache() {
return indexService.cache().filter();
}
public DocSetCache docSetCache() {
return indexService.cache().docSet();
}
public IndexFieldDataService fieldData() {
return indexService.fieldData();
}
public IdCache idCache() {
return indexService.cache().idCache();
}
public long timeoutInMillis() {
return timeoutInMillis;
}
public void timeoutInMillis(long timeoutInMillis) {
this.timeoutInMillis = timeoutInMillis;
}
public SearchContext minimumScore(float minimumScore) {
this.minimumScore = minimumScore;
return this;
}
public Float minimumScore() {
return this.minimumScore;
}
public SearchContext sort(Sort sort) {
this.sort = sort;
return this;
}
public Sort sort() {
return this.sort;
}
public SearchContext trackScores(boolean trackScores) {
this.trackScores = trackScores;
return this;
}
public boolean trackScores() {
return this.trackScores;
}
public SearchContext parsedPostFilter(ParsedFilter postFilter) {
this.postFilter = postFilter;
return this;
}
public ParsedFilter parsedPostFilter() {
return this.postFilter;
}
public Filter aliasFilter() {
return aliasFilter;
}
public SearchContext parsedQuery(ParsedQuery query) {
queryRewritten = false;
this.originalQuery = query;
this.query = query.query();
return this;
}
public ParsedQuery parsedQuery() {
return this.originalQuery;
}
/**
* The query to execute, might be rewritten.
*/
public Query query() {
return this.query;
}
/**
* Has the query been rewritten already?
*/
public boolean queryRewritten() {
return queryRewritten;
}
/**
* Rewrites the query and updates it. Only happens once.
*/
public SearchContext updateRewriteQuery(Query rewriteQuery) {
query = rewriteQuery;
queryRewritten = true;
return this;
}
public int from() {
return from;
}
public SearchContext from(int from) {
this.from = from;
return this;
}
public int size() {
return size;
}
public SearchContext size(int size) {
this.size = size;
return this;
}
public boolean hasFieldNames() {
return fieldNames != null;
}
public List<String> fieldNames() {
if (fieldNames == null) {
fieldNames = Lists.newArrayList();
}
return fieldNames;
}
public void emptyFieldNames() {
this.fieldNames = ImmutableList.of();
}
public boolean explain() {
return explain;
}
public void explain(boolean explain) {
this.explain = explain;
}
@Nullable
public List<String> groupStats() {
return this.groupStats;
}
public void groupStats(List<String> groupStats) {
this.groupStats = groupStats;
}
public boolean version() {
return version;
}
public void version(boolean version) {
this.version = version;
}
public int[] docIdsToLoad() {
return docIdsToLoad;
}
public int docIdsToLoadFrom() {
return docsIdsToLoadFrom;
}
public int docIdsToLoadSize() {
return docsIdsToLoadSize;
}
public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
this.docIdsToLoad = docIdsToLoad;
this.docsIdsToLoadFrom = docsIdsToLoadFrom;
this.docsIdsToLoadSize = docsIdsToLoadSize;
return this;
}
public void accessed(long accessTime) {
this.lastAccessTime = accessTime;
}
public long lastAccessTime() {
return this.lastAccessTime;
}
public long keepAlive() {
return this.keepAlive;
}
public void keepAlive(long keepAlive) {
this.keepAlive = keepAlive;
}
public SearchLookup lookup() {
// TODO: The types should take into account the parsing context in QueryParserContext...
if (searchLookup == null) {
searchLookup = new SearchLookup(mapperService(), fieldData(), request.types());
}
return searchLookup;
}
public DfsSearchResult dfsResult() {
return dfsResult;
}
public QuerySearchResult queryResult() {
return queryResult;
}
public FetchSearchResult fetchResult() {
return fetchResult;
}
@Override
public void addReleasable(Releasable releasable) {
if (clearables == null) {
clearables = new ArrayList<Releasable>();
}
clearables.add(releasable);
}
@Override
public void clearReleasables() {
if (clearables != null) {
Throwable th = null;
for (Releasable releasable : clearables) {
try {
releasable.release();
} catch (Throwable t) {
if (th == null) {
th = t;
}
}
}
clearables.clear();
if (th != null) {
throw new RuntimeException(th);
}
}
}
public ScanContext scanContext() {
if (scanContext == null) {
scanContext = new ScanContext();
}
return scanContext;
}
public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
return mapperService().smartName(name, request.types());
}
public FieldMappers smartNameFieldMappers(String name) {
return mapperService().smartNameFieldMappers(name, request.types());
}
public FieldMapper smartNameFieldMapper(String name) {
return mapperService().smartNameFieldMapper(name, request.types());
}
public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
return mapperService().smartNameObjectMapper(name, request.types());
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_internal_DefaultSearchContext.java
|
309 |
Integer.class, 2, new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
OMMapManagerOld.setOverlapStrategy((Integer) iNewValue);
}
}),
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_config_OGlobalConfiguration.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.