Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
139 |
public class DistributedObjectListenerRequest extends CallableClientRequest implements RetryableRequest {
public DistributedObjectListenerRequest() {
}
@Override
public Object call() throws Exception {
ProxyService proxyService = clientEngine.getProxyService();
String registrationId = proxyService.addProxyListener(new MyDistributedObjectListener());
endpoint.setDistributedObjectListener(registrationId);
return registrationId;
}
@Override
public String getServiceName() {
return null;
}
@Override
public int getFactoryId() {
return ClientPortableHook.ID;
}
@Override
public int getClassId() {
return ClientPortableHook.LISTENER;
}
private class MyDistributedObjectListener implements DistributedObjectListener {
@Override
public void distributedObjectCreated(DistributedObjectEvent event) {
send(event);
}
@Override
public void distributedObjectDestroyed(DistributedObjectEvent event) {
}
private void send(DistributedObjectEvent event) {
if (endpoint.live()) {
PortableDistributedObjectEvent portableEvent = new PortableDistributedObjectEvent(
event.getEventType(), event.getDistributedObject().getName(), event.getServiceName());
endpoint.sendEvent(portableEvent, getCallId());
}
}
}
@Override
public Permission getRequiredPermission() {
return null;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_client_DistributedObjectListenerRequest.java
|
1,061 |
public class OCommandExecutorSQLUpdate extends OCommandExecutorSQLSetAware implements OCommandResultListener {
public static final String KEYWORD_UPDATE = "UPDATE";
private static final String KEYWORD_ADD = "ADD";
private static final String KEYWORD_PUT = "PUT";
private static final String KEYWORD_REMOVE = "REMOVE";
private static final String KEYWORD_INCREMENT = "INCREMENT";
private static final String KEYWORD_MERGE = "MERGE";
private Map<String, Object> setEntries = new LinkedHashMap<String, Object>();
private List<OPair<String, Object>> addEntries = new ArrayList<OPair<String, Object>>();
private Map<String, OPair<String, Object>> putEntries = new LinkedHashMap<String, OPair<String, Object>>();
private List<OPair<String, Object>> removeEntries = new ArrayList<OPair<String, Object>>();
private Map<String, Number> incrementEntries = new LinkedHashMap<String, Number>();
private ODocument merge = null;
private OQuery<?> query;
private OSQLFilter compiledFilter;
private int recordCount = 0;
private String subjectName;
private static final Object EMPTY_VALUE = new Object();
private OCommandParameters parameters;
@SuppressWarnings("unchecked")
public OCommandExecutorSQLUpdate parse(final OCommandRequest iRequest) {
final ODatabaseRecord database = getDatabase();
init((OCommandRequestText) iRequest);
setEntries.clear();
addEntries.clear();
putEntries.clear();
removeEntries.clear();
incrementEntries.clear();
content = null;
merge = null;
query = null;
recordCount = 0;
parserRequiredKeyword(KEYWORD_UPDATE);
subjectName = parserRequiredWord(false, "Invalid target", " =><,\r\n");
if (subjectName == null)
throwSyntaxErrorException("Invalid subject name. Expected cluster, class, index or sub-query");
parserNextWord(true);
String word = parserGetLastWord();
if (parserIsEnded()
|| (!word.equals(KEYWORD_SET) && !word.equals(KEYWORD_ADD) && !word.equals(KEYWORD_PUT) && !word.equals(KEYWORD_REMOVE)
&& !word.equals(KEYWORD_INCREMENT) && !word.equals(KEYWORD_CONTENT) && !word.equals(KEYWORD_MERGE)))
throwSyntaxErrorException("Expected keyword " + KEYWORD_SET + "," + KEYWORD_ADD + "," + KEYWORD_CONTENT + "," + KEYWORD_MERGE
+ "," + KEYWORD_PUT + "," + KEYWORD_REMOVE + " or " + KEYWORD_INCREMENT);
while (!parserIsEnded() && !parserGetLastWord().equals(OCommandExecutorSQLAbstract.KEYWORD_WHERE)) {
word = parserGetLastWord();
if (word.equals(KEYWORD_CONTENT))
parseContent();
else if (word.equals(KEYWORD_MERGE))
parseMerge();
else if (word.equals(KEYWORD_SET))
parseSetFields(setEntries);
else if (word.equals(KEYWORD_ADD))
parseAddFields();
else if (word.equals(KEYWORD_PUT))
parsePutFields();
else if (word.equals(KEYWORD_REMOVE))
parseRemoveFields();
else if (word.equals(KEYWORD_INCREMENT))
parseIncrementFields();
else
break;
parserNextWord(true);
}
final String additionalStatement = parserGetLastWord();
if (subjectName.startsWith("(")) {
subjectName = subjectName.trim();
query = database.command(new OSQLAsynchQuery<ODocument>(subjectName.substring(1, subjectName.length() - 1), this)
.setContext(context));
if (additionalStatement.equals(OCommandExecutorSQLAbstract.KEYWORD_WHERE)
|| additionalStatement.equals(OCommandExecutorSQLAbstract.KEYWORD_LIMIT))
compiledFilter = OSQLEngine.getInstance().parseCondition(parserText.substring(parserGetCurrentPosition()), getContext(),
KEYWORD_WHERE);
} else if (additionalStatement.equals(OCommandExecutorSQLAbstract.KEYWORD_WHERE)
|| additionalStatement.equals(OCommandExecutorSQLAbstract.KEYWORD_LIMIT))
query = new OSQLAsynchQuery<ODocument>("select from " + subjectName + " " + additionalStatement + " "
+ parserText.substring(parserGetCurrentPosition()), this);
else if (additionalStatement != null && !additionalStatement.isEmpty())
throwSyntaxErrorException("Invalid keyword " + additionalStatement);
else
query = new OSQLAsynchQuery<ODocument>("select from " + subjectName, this);
return this;
}
public Object execute(final Map<Object, Object> iArgs) {
if (subjectName == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
parameters = new OCommandParameters(iArgs);
Map<Object, Object> queryArgs;
if(parameters.size() > 0 && parameters.getByName(0) != null ){
queryArgs = new HashMap<Object, Object>();
for (int i = parameterCounter; i < parameters.size(); i++) {
if (parameters.getByName(i) != null)
queryArgs.put(i - parameterCounter, parameters.getByName(i));
}
} else {
queryArgs = iArgs;
}
query.setUseCache(false);
query.setContext(context);
getDatabase().query(query, queryArgs);
return recordCount;
}
/**
* Update current record.
*/
@SuppressWarnings("unchecked")
public boolean result(final Object iRecord) {
final ODocument record = (ODocument) ((OIdentifiable) iRecord).getRecord();
if (compiledFilter != null) {
// ADDITIONAL FILTERING
if (!(Boolean) compiledFilter.evaluate(record, null, context))
return false;
}
final Set<ODocument> updatedRecords = new HashSet<ODocument>();
parameters.reset();
if (content != null) {
// REPLACE ALL THE CONTENT
record.clear();
record.merge(content, false, false);
updatedRecords.add(record);
}
if (merge != null) {
// REPLACE ALL THE CONTENT
record.merge(merge, true, false);
updatedRecords.add(record);
}
// BIND VALUES TO UPDATE
if (!setEntries.isEmpty()) {
Set<ODocument> changedDocuments = OSQLHelper.bindParameters(record, setEntries, parameters, context);
if (changedDocuments != null)
updatedRecords.addAll(changedDocuments);
}
// BIND VALUES TO INCREMENT
for (Map.Entry<String, Number> entry : incrementEntries.entrySet()) {
final Number prevValue = record.field(entry.getKey());
if (prevValue == null)
// NO PREVIOUS VALUE: CONSIDER AS 0
record.field(entry.getKey(), entry.getValue());
else
// COMPUTING INCREMENT
record.field(entry.getKey(), OType.increment(prevValue, entry.getValue()));
updatedRecords.add(record);
}
Object v;
// BIND VALUES TO ADD
Collection<Object> coll;
Object fieldValue;
for (OPair<String, Object> entry : addEntries) {
coll = null;
if (!record.containsField(entry.getKey())) {
// GET THE TYPE IF ANY
if (record.getSchemaClass() != null) {
OProperty prop = record.getSchemaClass().getProperty(entry.getKey());
if (prop != null && prop.getType() == OType.LINKSET)
// SET TYPE
coll = new HashSet<Object>();
}
if (coll == null)
// IN ALL OTHER CASES USE A LIST
coll = new ArrayList<Object>();
record.field(entry.getKey(), coll);
} else {
fieldValue = record.field(entry.getKey());
if (fieldValue instanceof Collection<?>)
coll = (Collection<Object>) fieldValue;
else
continue;
}
v = entry.getValue();
if (v instanceof OSQLFilterItem)
v = ((OSQLFilterItem) v).getValue(record, context);
else if (v instanceof OSQLFunctionRuntime)
v = ((OSQLFunctionRuntime) v).execute(record, null, context);
coll.add(v);
updatedRecords.add(record);
}
// BIND VALUES TO PUT (AS MAP)
Map<String, Object> map;
OPair<String, Object> pair;
for (Entry<String, OPair<String, Object>> entry : putEntries.entrySet()) {
fieldValue = record.field(entry.getKey());
if (fieldValue == null) {
if (record.getSchemaClass() != null) {
final OProperty property = record.getSchemaClass().getProperty(entry.getKey());
if (property != null
&& (property.getType() != null && (!property.getType().equals(OType.EMBEDDEDMAP) && !property.getType().equals(
OType.LINKMAP)))) {
throw new OCommandExecutionException("field " + entry.getKey() + " is not defined as a map");
}
}
fieldValue = new HashMap<String, Object>();
record.field(entry.getKey(), fieldValue);
}
if (fieldValue instanceof Map<?, ?>) {
map = (Map<String, Object>) fieldValue;
pair = entry.getValue();
v = pair.getValue();
if (v instanceof OSQLFilterItem)
v = ((OSQLFilterItem) v).getValue(record, context);
else if (pair.getValue() instanceof OSQLFunctionRuntime)
v = ((OSQLFunctionRuntime) v).execute(record, null, context);
map.put(pair.getKey(), v);
updatedRecords.add(record);
}
}
// REMOVE FIELD IF ANY
for (OPair<String, Object> entry : removeEntries) {
v = entry.getValue();
if (v == EMPTY_VALUE) {
record.removeField(entry.getKey());
updatedRecords.add(record);
} else {
fieldValue = record.field(entry.getKey());
if (fieldValue instanceof Collection<?>) {
coll = (Collection<Object>) fieldValue;
if (coll.remove(v))
updatedRecords.add(record);
} else if (fieldValue instanceof Map<?, ?>) {
map = (Map<String, Object>) fieldValue;
if (map.remove(v) != null)
updatedRecords.add(record);
}
}
}
for (ODocument d : updatedRecords) {
d.setDirty();
d.save();
recordCount++;
}
return true;
}
protected void parseMerge() {
if (!parserIsEnded() && !parserGetLastWord().equals(KEYWORD_WHERE)) {
final String contentAsString = parserRequiredWord(false, "document to merge expected").trim();
merge = new ODocument().fromJSON(contentAsString);
parserSkipWhiteSpaces();
}
if (merge == null)
throwSyntaxErrorException("Document to merge not provided. Example: MERGE { \"name\": \"Jay\" }");
}
private void parseAddFields() {
String fieldName;
String fieldValue;
while (!parserIsEnded() && (addEntries.size() == 0 || parserGetLastSeparator() == ',' || parserGetCurrentChar() == ',')
&& !parserGetLastWord().equals(KEYWORD_WHERE)) {
fieldName = parserRequiredWord(false, "Field name expected");
parserRequiredKeyword("=");
fieldValue = parserRequiredWord(false, "Value expected", " =><,\r\n");
// INSERT TRANSFORMED FIELD VALUE
addEntries.add(new OPair<String, Object>(fieldName, getFieldValueCountingParameters(fieldValue)));
parserSkipWhiteSpaces();
}
if (addEntries.size() == 0)
throwSyntaxErrorException("Entries to add <field> = <value> are missed. Example: name = 'Bill', salary = 300.2.");
}
private void parsePutFields() {
String fieldName;
String fieldKey;
String fieldValue;
while (!parserIsEnded() && (putEntries.size() == 0 || parserGetLastSeparator() == ',' || parserGetCurrentChar() == ',')
&& !parserGetLastWord().equals(KEYWORD_WHERE)) {
fieldName = parserRequiredWord(false, "Field name expected");
parserRequiredKeyword("=");
fieldKey = parserRequiredWord(false, "Key expected");
fieldValue = getBlock(parserRequiredWord(false, "Value expected", " =><,\r\n"));
// INSERT TRANSFORMED FIELD VALUE
putEntries.put(fieldName, new OPair<String, Object>((String) getFieldValueCountingParameters(fieldKey),
getFieldValueCountingParameters(fieldValue)));
parserSkipWhiteSpaces();
}
if (putEntries.size() == 0)
throwSyntaxErrorException("Entries to put <field> = <key>, <value> are missed. Example: name = 'Bill', 30");
}
private void parseRemoveFields() {
String fieldName;
String fieldValue;
Object value;
while (!parserIsEnded() && (removeEntries.size() == 0 || parserGetLastSeparator() == ',' || parserGetCurrentChar() == ',')
&& !parserGetLastWord().equals(KEYWORD_WHERE)) {
fieldName = parserRequiredWord(false, "Field name expected");
final boolean found = parserOptionalKeyword("=", "WHERE");
if (found)
if (parserGetLastWord().equals("WHERE")) {
parserGoBack();
value = EMPTY_VALUE;
} else {
fieldValue = getBlock(parserRequiredWord(false, "Value expected"));
value = getFieldValueCountingParameters(fieldValue);
}
else
value = EMPTY_VALUE;
// INSERT FIELD NAME TO BE REMOVED
removeEntries.add(new OPair<String, Object>(fieldName, value));
parserSkipWhiteSpaces();
}
if (removeEntries.size() == 0)
throwSyntaxErrorException("Field(s) to remove are missed. Example: name, salary");
}
private void parseIncrementFields() {
String fieldName;
String fieldValue;
while (!parserIsEnded() && (incrementEntries.size() == 0 || parserGetLastSeparator() == ',')
&& !parserGetLastWord().equals(KEYWORD_WHERE)) {
fieldName = parserRequiredWord(false, "Field name expected");
parserRequiredKeyword("=");
fieldValue = getBlock(parserRequiredWord(false, "Value expected"));
// INSERT TRANSFORMED FIELD VALUE
incrementEntries.put(fieldName, (Number) getFieldValueCountingParameters(fieldValue));
parserSkipWhiteSpaces();
}
if (incrementEntries.size() == 0)
throwSyntaxErrorException("Entries to increment <field> = <value> are missed. Example: salary = -100");
}
@Override
public String getSyntax() {
return "UPDATE <class>|cluster:<cluster>> [SET|ADD|PUT|REMOVE|INCREMENT|CONTENT {<JSON>}|MERGE {<JSON>}] [[,] <field-name> = <expression>|<sub-command>]* [WHERE <conditions>]";
}
@Override
public void end() {
}
protected String getBlock(String fieldValue) {
if (fieldValue.startsWith("{") || fieldValue.startsWith("[") || fieldValue.startsWith("[")) {
parserSkipWhiteSpaces();
final StringBuilder buffer = new StringBuilder();
parserSetCurrentPosition(OStringSerializerHelper.parse(parserText, buffer, parserGetCurrentPosition(), -1,
OStringSerializerHelper.DEFAULT_FIELD_SEPARATOR, true, true, false, OStringSerializerHelper.DEFAULT_IGNORE_CHARS));
fieldValue = buffer.toString();
}
return fieldValue;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLUpdate.java
|
481 |
public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest> {
private String index;
private String text;
private String analyzer;
private String tokenizer;
private String[] tokenFilters;
private String field;
AnalyzeRequest() {
}
/**
* Constructs a new analyzer request for the provided text.
*
* @param text The text to analyze
*/
public AnalyzeRequest(String text) {
this.text = text;
}
/**
* Constructs a new analyzer request for the provided index and text.
*
* @param index The index name
* @param text The text to analyze
*/
public AnalyzeRequest(@Nullable String index, String text) {
this.index = index;
this.text = text;
}
public String text() {
return this.text;
}
public AnalyzeRequest index(String index) {
this.index = index;
return this;
}
public String index() {
return this.index;
}
public AnalyzeRequest analyzer(String analyzer) {
this.analyzer = analyzer;
return this;
}
public String analyzer() {
return this.analyzer;
}
public AnalyzeRequest tokenizer(String tokenizer) {
this.tokenizer = tokenizer;
return this;
}
public String tokenizer() {
return this.tokenizer;
}
public AnalyzeRequest tokenFilters(String... tokenFilters) {
this.tokenFilters = tokenFilters;
return this;
}
public String[] tokenFilters() {
return this.tokenFilters;
}
public AnalyzeRequest field(String field) {
this.field = field;
return this;
}
public String field() {
return this.field;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (text == null) {
validationException = addValidationError("text is missing", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readOptionalString();
text = in.readString();
analyzer = in.readOptionalString();
tokenizer = in.readOptionalString();
int size = in.readVInt();
if (size > 0) {
tokenFilters = new String[size];
for (int i = 0; i < size; i++) {
tokenFilters[i] = in.readString();
}
}
field = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(index);
out.writeString(text);
out.writeOptionalString(analyzer);
out.writeOptionalString(tokenizer);
if (tokenFilters == null) {
out.writeVInt(0);
} else {
out.writeVInt(tokenFilters.length);
for (String tokenFilter : tokenFilters) {
out.writeString(tokenFilter);
}
}
out.writeOptionalString(field);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeRequest.java
|
3,400 |
public static class SendEventOperation extends AbstractOperation {
private EventPacket eventPacket;
private int orderKey;
public SendEventOperation() {
}
public SendEventOperation(EventPacket eventPacket, int orderKey) {
this.eventPacket = eventPacket;
this.orderKey = orderKey;
}
@Override
public void run() throws Exception {
EventServiceImpl eventService = (EventServiceImpl) getNodeEngine().getEventService();
eventService.executeEvent(eventService.new EventPacketProcessor(eventPacket, orderKey));
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
eventPacket.writeData(out);
out.writeInt(orderKey);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
eventPacket = new EventPacket();
eventPacket.readData(in);
orderKey = in.readInt();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_EventServiceImpl.java
|
44 |
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailable( member ) )
{
listener.memberIsUnavailable( memberIsAvailable.getRole(), member );
}
}
} );
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_member_paxos_PaxosClusterMemberEvents.java
|
268 |
public class NullEmailServiceImpl implements EmailService {
@Override
public boolean sendTemplateEmail(String emailAddress, EmailInfo emailInfo, HashMap<String, Object> props) {
return true;
}
@Override
public boolean sendTemplateEmail(EmailTarget emailTarget, EmailInfo emailInfo, HashMap<String, Object> props) {
return true;
}
@Override
public boolean sendBasicEmail(EmailInfo emailInfo, EmailTarget emailTarget, HashMap<String, Object> props) {
return true;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_NullEmailServiceImpl.java
|
319 |
public interface MergeBeanStatusProvider {
/**
* Typically used by the {@link AbstractMergeBeanPostProcessor} class to determine whether or not certain
* lists should be processed or if they can be safely ignored.
*
* @param bean
* @param beanName
* @param appCtx
* @return whether or not processing should be triggered
*/
public boolean isProcessingEnabled(Object bean, String beanName, ApplicationContext appCtx);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_MergeBeanStatusProvider.java
|
44 |
@Component("blPageTemplateCustomPersistenceHandler")
public class PageTemplateCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
private final Log LOG = LogFactory.getLog(PageTemplateCustomPersistenceHandler.class);
@Resource(name="blPageService")
protected PageService pageService;
@Resource(name="blSandBoxService")
protected SandBoxService sandBoxService;
@Resource(name = "blDynamicFieldPersistenceHandlerHelper")
protected DynamicFieldPersistenceHandlerHelper dynamicFieldUtil;
@Override
public Boolean canHandleFetch(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
return
PageTemplate.class.getName().equals(ceilingEntityFullyQualifiedClassname) &&
persistencePackage.getCustomCriteria() != null &&
persistencePackage.getCustomCriteria().length > 0 &&
persistencePackage.getCustomCriteria()[0].equals("constructForm");
}
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
@Override
public Boolean canHandleInspect(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
@Override
public Boolean canHandleRemove(PersistencePackage persistencePackage) {
return false;
}
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
protected SandBox getSandBox() {
return sandBoxService.retrieveSandboxById(SandBoxContext.getSandBoxContext().getSandBoxId());
}
@Override
public DynamicResultSet inspect(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, InspectHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
String pageTemplateId = persistencePackage.getCustomCriteria()[3];
PageTemplate template = pageService.findPageTemplateById(Long.valueOf(pageTemplateId));
ClassMetadata metadata = new ClassMetadata();
metadata.setCeilingType(PageTemplate.class.getName());
ClassTree entities = new ClassTree(PageTemplateImpl.class.getName());
metadata.setPolymorphicEntities(entities);
Property[] properties = dynamicFieldUtil.buildDynamicPropertyList(template.getFieldGroups(), PageTemplate.class);
metadata.setProperties(properties);
DynamicResultSet results = new DynamicResultSet(metadata);
return results;
} catch (Exception e) {
throw new ServiceException("Unable to perform inspect for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
@Override
public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
String pageId = persistencePackage.getCustomCriteria()[1];
Entity entity = fetchEntityBasedOnId(pageId);
DynamicResultSet results = new DynamicResultSet(new Entity[]{entity}, 1);
return results;
} catch (Exception e) {
throw new ServiceException("Unable to perform fetch for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
protected Entity fetchEntityBasedOnId(String pageId) throws Exception {
Page page = pageService.findPageById(Long.valueOf(pageId));
Map<String, PageField> pageFieldMap = page.getPageFields();
Entity entity = new Entity();
entity.setType(new String[]{PageTemplateImpl.class.getName()});
List<Property> propertiesList = new ArrayList<Property>();
for (FieldGroup fieldGroup : page.getPageTemplate().getFieldGroups()) {
for (FieldDefinition definition : fieldGroup.getFieldDefinitions()) {
Property property = new Property();
propertiesList.add(property);
property.setName(definition.getName());
String value = null;
if (!MapUtils.isEmpty(pageFieldMap)) {
PageField pageField = pageFieldMap.get(definition.getName());
if (pageField == null) {
value = "";
} else {
value = pageField.getValue();
}
}
property.setValue(value);
}
}
Property property = new Property();
propertiesList.add(property);
property.setName("id");
property.setValue(pageId);
entity.setProperties(propertiesList.toArray(new Property[]{}));
return entity;
}
@Override
public Entity update(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
return addOrUpdate(persistencePackage, dynamicEntityDao, helper);
}
@Override
public Entity add(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
return addOrUpdate(persistencePackage, dynamicEntityDao, helper);
}
protected Entity addOrUpdate(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
String pageId = persistencePackage.getCustomCriteria()[1];
Page page = pageService.findPageById(Long.valueOf(pageId));
Property[] properties = dynamicFieldUtil.buildDynamicPropertyList(page.getPageTemplate().getFieldGroups(), PageTemplate.class);
Map<String, FieldMetadata> md = new HashMap<String, FieldMetadata>();
for (Property property : properties) {
md.put(property.getName(), property.getMetadata());
}
boolean validated = helper.validate(persistencePackage.getEntity(), null, md);
if (!validated) {
throw new ValidationException(persistencePackage.getEntity(), "Page dynamic fields failed validation");
}
List<String> templateFieldNames = new ArrayList<String>(20);
for (FieldGroup group : page.getPageTemplate().getFieldGroups()) {
for (FieldDefinition definition: group.getFieldDefinitions()) {
templateFieldNames.add(definition.getName());
}
}
Map<String, PageField> pageFieldMap = page.getPageFields();
for (Property property : persistencePackage.getEntity().getProperties()) {
if (templateFieldNames.contains(property.getName())) {
PageField pageField = pageFieldMap.get(property.getName());
if (pageField != null) {
pageField.setValue(property.getValue());
} else {
pageField = new PageFieldImpl();
pageFieldMap.put(property.getName(), pageField);
pageField.setFieldKey(property.getName());
pageField.setPage(page);
pageField.setValue(property.getValue());
}
}
}
List<String> removeItems = new ArrayList<String>();
for (String key : pageFieldMap.keySet()) {
if (persistencePackage.getEntity().findProperty(key)==null) {
removeItems.add(key);
}
}
if (removeItems.size() > 0) {
for (String removeKey : removeItems) {
PageField pageField = pageFieldMap.remove(removeKey);
pageField.setPage(null);
}
}
pageService.updatePage(page, getSandBox());
return fetchEntityBasedOnId(pageId);
} catch (ValidationException e) {
throw e;
} catch (Exception e) {
throw new ServiceException("Unable to perform update for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_PageTemplateCustomPersistenceHandler.java
|
3,290 |
abstract class AbstractGeoPointIndexFieldData extends AbstractIndexFieldData<AtomicGeoPointFieldData<ScriptDocValues>> implements IndexGeoPointFieldData<AtomicGeoPointFieldData<ScriptDocValues>> {
protected static class Empty extends AtomicGeoPointFieldData<ScriptDocValues> {
private final int numDocs;
Empty(int numDocs) {
this.numDocs = numDocs;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return 0;
}
@Override
public long getMemorySizeInBytes() {
return 0;
}
@Override
public BytesValues getBytesValues(boolean needsHashes) {
return BytesValues.EMPTY;
}
@Override
public GeoPointValues getGeoPointValues() {
return GeoPointValues.EMPTY;
}
@Override
public ScriptDocValues getScriptValues() {
return ScriptDocValues.EMPTY;
}
@Override
public int getNumDocs() {
return numDocs;
}
@Override
public void close() {
// no-op
}
}
protected static class GeoPointEnum {
private final BytesRefIterator termsEnum;
private final GeoPoint next;
private final CharsRef spare;
protected GeoPointEnum(BytesRefIterator termsEnum) {
this.termsEnum = termsEnum;
next = new GeoPoint();
spare = new CharsRef();
}
public GeoPoint next() throws IOException {
final BytesRef term = termsEnum.next();
if (term == null) {
return null;
}
UnicodeUtil.UTF8toUTF16(term, spare);
int commaIndex = -1;
for (int i = 0; i < spare.length; i++) {
if (spare.chars[spare.offset + i] == ',') { // safes a string creation
commaIndex = i;
break;
}
}
if (commaIndex == -1) {
assert false;
return next.reset(0, 0);
}
final double lat = Double.parseDouble(new String(spare.chars, spare.offset, (commaIndex - spare.offset)));
final double lon = Double.parseDouble(new String(spare.chars, (spare.offset + (commaIndex + 1)), spare.length - ((commaIndex + 1) - spare.offset)));
return next.reset(lat, lon);
}
}
public AbstractGeoPointIndexFieldData(Index index, Settings indexSettings, Names fieldNames, FieldDataType fieldDataType, IndexFieldDataCache cache) {
super(index, indexSettings, fieldNames, fieldDataType, cache);
}
@Override
public boolean valuesOrdered() {
// because we might have single values? we can dynamically update a flag to reflect that
// based on the atomic field data loaded
return false;
}
@Override
public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
throw new ElasticsearchIllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_plain_AbstractGeoPointIndexFieldData.java
|
1,452 |
public abstract class TitanHadoopSetupCommon implements TitanHadoopSetup {
private static final StaticBuffer DEFAULT_COLUMN = StaticArrayBuffer.of(new byte[0]);
private static final SliceQuery DEFAULT_SLICE_QUERY = new SliceQuery(DEFAULT_COLUMN, DEFAULT_COLUMN);
@Override
public SliceQuery inputSlice(final FaunusVertexQueryFilter inputFilter) {
//For now, only return the full range because the current input format needs to read the hidden
//vertex-state property to determine if the vertex is a ghost. If we filter, that relation would fall out as well.
return DEFAULT_SLICE_QUERY;
}
@Override
public void close() {
//Do nothing
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_util_input_TitanHadoopSetupCommon.java
|
470 |
public class AliasesExistAction extends IndicesAction<GetAliasesRequest, AliasesExistResponse, AliasesExistRequestBuilder> {
public static final AliasesExistAction INSTANCE = new AliasesExistAction();
public static final String NAME = "indices/exists/aliases";
private AliasesExistAction() {
super(NAME);
}
@Override
public AliasesExistRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new AliasesExistRequestBuilder(client);
}
@Override
public AliasesExistResponse newResponse() {
return new AliasesExistResponse();
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_alias_exists_AliasesExistAction.java
|
5,803 |
public class CustomHighlighter implements Highlighter {
@Override
public String[] names() {
return new String[] { "test-custom" };
}
@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
SearchContextHighlight.Field field = highlighterContext.field;
List<Text> responses = Lists.newArrayList();
responses.add(new StringText("standard response"));
if (field.options() != null) {
for (Map.Entry<String, Object> entry : field.options().entrySet()) {
responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue()));
}
}
return new HighlightField(highlighterContext.fieldName, responses.toArray(new Text[]{}));
}
}
| 1no label
|
src_test_java_org_elasticsearch_search_highlight_CustomHighlighter.java
|
774 |
public class ORecordIteratorClass<REC extends ORecordInternal<?>> extends ORecordIteratorClusters<REC> {
protected final OClass targetClass;
protected boolean polymorphic;
protected boolean useCache;
/**
* This method is only to maintain the retro compatibility with TinkerPop BP 2.2
*/
public ORecordIteratorClass(final ODatabaseRecord iDatabase, final ODatabaseRecordAbstract iLowLevelDatabase,
final String iClassName, final boolean iPolymorphic) {
this(iDatabase, iLowLevelDatabase, iClassName, iPolymorphic, true, false);
}
public ORecordIteratorClass(final ODatabaseRecord iDatabase, final ODatabaseRecord iLowLevelDatabase, final String iClassName,
final boolean iPolymorphic) {
this(iDatabase, iLowLevelDatabase, iClassName, iPolymorphic, true, false);
}
public ORecordIteratorClass(final ODatabaseRecord iDatabase, final ODatabaseRecord iLowLevelDatabase, final String iClassName,
final boolean iPolymorphic, final boolean iUseCache, final boolean iterateThroughTombstones) {
super(iDatabase, iLowLevelDatabase, iUseCache, iterateThroughTombstones);
targetClass = database.getMetadata().getSchema().getClass(iClassName);
if (targetClass == null)
throw new IllegalArgumentException("Class '" + iClassName + "' was not found in database schema");
polymorphic = iPolymorphic;
clusterIds = polymorphic ? targetClass.getPolymorphicClusterIds() : targetClass.getClusterIds();
clusterIds = OClassImpl.readableClusters(iDatabase, clusterIds);
config();
}
@SuppressWarnings("unchecked")
@Override
public REC next() {
final OIdentifiable rec = super.next();
if (rec == null)
return null;
return (REC) rec.getRecord();
}
@SuppressWarnings("unchecked")
@Override
public REC previous() {
final OIdentifiable rec = super.previous();
if (rec == null)
return null;
return (REC) rec.getRecord();
}
@Override
protected boolean include(final ORecord<?> record) {
return record instanceof ODocument && targetClass.isSuperClassOf(((ODocument) record).getSchemaClass());
}
public boolean isPolymorphic() {
return polymorphic;
}
@Override
public String toString() {
return String.format("ORecordIteratorClass.targetClass(%s).polymorphic(%s)", targetClass, polymorphic);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_iterator_ORecordIteratorClass.java
|
4,445 |
class IndexFieldCache implements IndexFieldDataCache, SegmentReader.CoreClosedListener {
@Nullable
private final IndexService indexService;
final Index index;
final FieldMapper.Names fieldNames;
final FieldDataType fieldDataType;
IndexFieldCache(@Nullable IndexService indexService, Index index, FieldMapper.Names fieldNames, FieldDataType fieldDataType) {
this.indexService = indexService;
this.index = index;
this.fieldNames = fieldNames;
this.fieldDataType = fieldDataType;
}
@Override
public <FD extends AtomicFieldData, IFD extends IndexFieldData<FD>> FD load(final AtomicReaderContext context, final IFD indexFieldData) throws Exception {
final Key key = new Key(this, context.reader().getCoreCacheKey());
//noinspection unchecked
return (FD) cache.get(key, new Callable<AtomicFieldData>() {
@Override
public AtomicFieldData call() throws Exception {
SegmentReaderUtils.registerCoreListener(context.reader(), IndexFieldCache.this);
AtomicFieldData fieldData = indexFieldData.loadDirect(context);
if (indexService != null) {
ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null) {
IndexShard shard = indexService.shard(shardId.id());
if (shard != null) {
key.listener = shard.fieldData();
}
}
}
if (key.listener != null) {
key.listener.onLoad(fieldNames, fieldDataType, fieldData);
}
return fieldData;
}
});
}
@Override
public void onClose(Object coreKey) {
cache.invalidate(new Key(this, coreKey));
}
@Override
public void clear() {
for (Key key : cache.asMap().keySet()) {
if (key.indexCache.index.equals(index)) {
cache.invalidate(key);
}
}
}
@Override
public void clear(String fieldName) {
for (Key key : cache.asMap().keySet()) {
if (key.indexCache.index.equals(index)) {
if (key.indexCache.fieldNames.fullName().equals(fieldName)) {
cache.invalidate(key);
}
}
}
}
@Override
public void clear(Object coreCacheKey) {
cache.invalidate(new Key(this, coreCacheKey));
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_fielddata_cache_IndicesFieldDataCache.java
|
458 |
public class PendingClusterTasksResponse extends ActionResponse implements Iterable<PendingClusterTask>, ToXContent {
private List<PendingClusterTask> pendingTasks;
PendingClusterTasksResponse() {
}
PendingClusterTasksResponse(List<PendingClusterTask> pendingTasks) {
this.pendingTasks = pendingTasks;
}
public List<PendingClusterTask> pendingTasks() {
return pendingTasks;
}
/**
* The pending cluster tasks
*/
public List<PendingClusterTask> getPendingTasks() {
return pendingTasks();
}
@Override
public Iterator<PendingClusterTask> iterator() {
return pendingTasks.iterator();
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append("tasks: (").append(pendingTasks.size()).append("):\n");
for (PendingClusterTask pendingClusterTask : this) {
sb.append(pendingClusterTask.getInsertOrder()).append("/").append(pendingClusterTask.getPriority()).append("/").append(pendingClusterTask.getSource()).append("/").append(pendingClusterTask.getTimeInQueue()).append("\n");
}
return sb.toString();
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(Fields.TASKS);
for (PendingClusterTask pendingClusterTask : this) {
builder.startObject();
builder.field(Fields.INSERT_ORDER, pendingClusterTask.insertOrder());
builder.field(Fields.PRIORITY, pendingClusterTask.priority());
builder.field(Fields.SOURCE, pendingClusterTask.source());
builder.field(Fields.TIME_IN_QUEUE_MILLIS, pendingClusterTask.timeInQueueInMillis());
builder.field(Fields.TIME_IN_QUEUE, pendingClusterTask.getTimeInQueue());
builder.endObject();
}
builder.endArray();
return builder;
}
static final class Fields {
static final XContentBuilderString TASKS = new XContentBuilderString("tasks");
static final XContentBuilderString INSERT_ORDER = new XContentBuilderString("insert_order");
static final XContentBuilderString PRIORITY = new XContentBuilderString("priority");
static final XContentBuilderString SOURCE = new XContentBuilderString("source");
static final XContentBuilderString TIME_IN_QUEUE_MILLIS = new XContentBuilderString("time_in_queue_millis");
static final XContentBuilderString TIME_IN_QUEUE = new XContentBuilderString("time_in_queue");
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
pendingTasks = new ArrayList<PendingClusterTask>(size);
for (int i = 0; i < size; i++) {
PendingClusterTask task = new PendingClusterTask();
task.readFrom(in);
pendingTasks.add(task);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(pendingTasks.size());
for (PendingClusterTask task : pendingTasks) {
task.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_tasks_PendingClusterTasksResponse.java
|
1,504 |
public class GroupCountMapReduce {
public static final String KEY_CLOSURE = Tokens.makeNamespace(GroupCountMapReduce.class) + ".keyClosure";
public static final String VALUE_CLOSURE = Tokens.makeNamespace(GroupCountMapReduce.class) + ".valueClosure";
public static final String CLASS = Tokens.makeNamespace(GroupCountMapReduce.class) + ".class";
private static final ScriptEngine engine = new GremlinGroovyScriptEngine();
public enum Counters {
VERTICES_PROCESSED,
OUT_EDGES_PROCESSED
}
public static Configuration createConfiguration(final Class<? extends Element> klass, final String keyClosure, final String valueClosure) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
if (null != keyClosure)
configuration.set(KEY_CLOSURE, keyClosure);
if (null != valueClosure)
configuration.set(VALUE_CLOSURE, valueClosure);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, Text, LongWritable> {
private Closure keyClosure;
private Closure valueClosure;
private boolean isVertex;
private CounterMap<Object> map;
private int mapSpillOver;
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
Configuration hc = DEFAULT_COMPAT.getContextConfiguration(context);
ModifiableHadoopConfiguration titanConf = ModifiableHadoopConfiguration.of(hc);
try {
this.mapSpillOver = titanConf.get(PIPELINE_MAP_SPILL_OVER);
final String keyClosureString = context.getConfiguration().get(KEY_CLOSURE, null);
if (null == keyClosureString)
this.keyClosure = null;
else
this.keyClosure = (Closure) engine.eval(keyClosureString);
final String valueClosureString = context.getConfiguration().get(VALUE_CLOSURE, null);
if (null == valueClosureString)
this.valueClosure = null;
else
this.valueClosure = (Closure) engine.eval(valueClosureString);
} catch (final ScriptException e) {
throw new IOException(e.getMessage(), e);
}
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.map = new CounterMap<Object>();
this.outputs = new SafeMapperOutputs(context);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, Text, LongWritable>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
final Object object = (null == this.keyClosure) ? new FaunusVertex.MicroVertex(value.getLongId()) : this.keyClosure.call(value);
final Number number = (null == this.valueClosure) ? 1 : (Number) this.valueClosure.call(value);
this.map.incr(object, number.longValue() * value.pathCount());
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
final Object object = (null == this.keyClosure) ? new StandardFaunusEdge.MicroEdge(edge.getLongId()) : this.keyClosure.call(edge);
final Number number = (null == this.valueClosure) ? 1 : (Number) this.valueClosure.call(edge);
this.map.incr(object, number.longValue() * edge.pathCount());
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
// protected against memory explosion
if (this.map.size() > this.mapSpillOver) {
this.dischargeMap(context);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
private final Text textWritable = new Text();
private final LongWritable longWritable = new LongWritable();
public void dischargeMap(final Mapper<NullWritable, FaunusVertex, Text, LongWritable>.Context context) throws IOException, InterruptedException {
for (final java.util.Map.Entry<Object, Long> entry : this.map.entrySet()) {
this.textWritable.set(null == entry.getKey() ? Tokens.NULL : entry.getKey().toString());
this.longWritable.set(entry.getValue());
context.write(this.textWritable, this.longWritable);
}
this.map.clear();
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, Text, LongWritable>.Context context) throws IOException, InterruptedException {
this.dischargeMap(context);
this.outputs.close();
}
}
public static class Combiner extends Reducer<Text, LongWritable, Text, LongWritable> {
private final LongWritable longWritable = new LongWritable();
@Override
public void reduce(final Text key, final Iterable<LongWritable> values, final Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable token : values) {
totalCount = totalCount + token.get();
}
this.longWritable.set(totalCount);
context.write(key, this.longWritable);
}
}
public static class Reduce extends Reducer<Text, LongWritable, Text, LongWritable> {
private SafeReducerOutputs outputs;
@Override
public void setup(final Reducer.Context context) throws IOException, InterruptedException {
this.outputs = new SafeReducerOutputs(context);
}
private final LongWritable longWritable = new LongWritable();
@Override
public void reduce(final Text key, final Iterable<LongWritable> values, final Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable token : values) {
totalCount = totalCount + token.get();
}
this.longWritable.set(totalCount);
this.outputs.write(Tokens.SIDEEFFECT, key, this.longWritable);
}
@Override
public void cleanup(final Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_GroupCountMapReduce.java
|
203 |
public class XSimpleQueryParser extends QueryBuilder {
static {
assert Version.LUCENE_46.onOrAfter(Lucene.VERSION) : "Lucene 4.7 adds SimpleQueryParser, remove me!";
}
/** Map of fields to query against with their weights */
protected final Map<String,Float> weights;
/** flags to the parser (to turn features on/off) */
protected final int flags;
/** Enables {@code AND} operator (+) */
public static final int AND_OPERATOR = 1<<0;
/** Enables {@code NOT} operator (-) */
public static final int NOT_OPERATOR = 1<<1;
/** Enables {@code OR} operator (|) */
public static final int OR_OPERATOR = 1<<2;
/** Enables {@code PREFIX} operator (*) */
public static final int PREFIX_OPERATOR = 1<<3;
/** Enables {@code PHRASE} operator (") */
public static final int PHRASE_OPERATOR = 1<<4;
/** Enables {@code PRECEDENCE} operators: {@code (} and {@code )} */
public static final int PRECEDENCE_OPERATORS = 1<<5;
/** Enables {@code ESCAPE} operator (\) */
public static final int ESCAPE_OPERATOR = 1<<6;
/** Enables {@code WHITESPACE} operators: ' ' '\n' '\r' '\t' */
public static final int WHITESPACE_OPERATOR = 1<<7;
private BooleanClause.Occur defaultOperator = BooleanClause.Occur.SHOULD;
/** Creates a new parser searching over a single field. */
public XSimpleQueryParser(Analyzer analyzer, String field) {
this(analyzer, Collections.singletonMap(field, 1.0F));
}
/** Creates a new parser searching over multiple fields with different weights. */
public XSimpleQueryParser(Analyzer analyzer, Map<String, Float> weights) {
this(analyzer, weights, -1);
}
/** Creates a new parser with custom flags used to enable/disable certain features. */
public XSimpleQueryParser(Analyzer analyzer, Map<String, Float> weights, int flags) {
super(analyzer);
this.weights = weights;
this.flags = flags;
}
/** Parses the query text and returns parsed query (or null if empty) */
public Query parse(String queryText) {
char data[] = queryText.toCharArray();
char buffer[] = new char[data.length];
State state = new State(data, buffer, 0, data.length);
parseSubQuery(state);
return state.top;
}
private void parseSubQuery(State state) {
while (state.index < state.length) {
if (state.data[state.index] == '(' && (flags & PRECEDENCE_OPERATORS) != 0) {
// the beginning of a subquery has been found
consumeSubQuery(state);
} else if (state.data[state.index] == ')' && (flags & PRECEDENCE_OPERATORS) != 0) {
// this is an extraneous character so it is ignored
++state.index;
} else if (state.data[state.index] == '"' && (flags & PHRASE_OPERATOR) != 0) {
// the beginning of a phrase has been found
consumePhrase(state);
} else if (state.data[state.index] == '+' && (flags & AND_OPERATOR) != 0) {
// an and operation has been explicitly set
// if an operation has already been set this one is ignored
// if a term (or phrase or subquery) has not been found yet the
// operation is also ignored since there is no previous
// term (or phrase or subquery) to and with
if (state.currentOperation == null && state.top != null) {
state.currentOperation = BooleanClause.Occur.MUST;
}
++state.index;
} else if (state.data[state.index] == '|' && (flags & OR_OPERATOR) != 0) {
// an or operation has been explicitly set
// if an operation has already been set this one is ignored
// if a term (or phrase or subquery) has not been found yet the
// operation is also ignored since there is no previous
// term (or phrase or subquery) to or with
if (state.currentOperation == null && state.top != null) {
state.currentOperation = BooleanClause.Occur.SHOULD;
}
++state.index;
} else if (state.data[state.index] == '-' && (flags & NOT_OPERATOR) != 0) {
// a not operator has been found, so increase the not count
// two not operators in a row negate each other
++state.not;
++state.index;
// continue so the not operator is not reset
// before the next character is determined
continue;
} else if ((state.data[state.index] == ' '
|| state.data[state.index] == '\t'
|| state.data[state.index] == '\n'
|| state.data[state.index] == '\r') && (flags & WHITESPACE_OPERATOR) != 0) {
// ignore any whitespace found as it may have already been
// used a delimiter across a term (or phrase or subquery)
// or is simply extraneous
++state.index;
} else {
// the beginning of a token has been found
consumeToken(state);
}
// reset the not operator as even whitespace is not allowed when
// specifying the not operation for a term (or phrase or subquery)
state.not = 0;
}
}
private void consumeSubQuery(State state) {
assert (flags & PRECEDENCE_OPERATORS) != 0;
int start = ++state.index;
int precedence = 1;
boolean escaped = false;
while (state.index < state.length) {
if (!escaped) {
if (state.data[state.index] == '\\' && (flags & ESCAPE_OPERATOR) != 0) {
// an escape character has been found so
// whatever character is next will become
// part of the subquery unless the escape
// character is the last one in the data
escaped = true;
++state.index;
continue;
} else if (state.data[state.index] == '(') {
// increase the precedence as there is a
// subquery in the current subquery
++precedence;
} else if (state.data[state.index] == ')') {
--precedence;
if (precedence == 0) {
// this should be the end of the subquery
// all characters found will used for
// creating the subquery
break;
}
}
}
escaped = false;
++state.index;
}
if (state.index == state.length) {
// a closing parenthesis was never found so the opening
// parenthesis is considered extraneous and will be ignored
state.index = start;
} else if (state.index == start) {
// a closing parenthesis was found immediately after the opening
// parenthesis so the current operation is reset since it would
// have been applied to this subquery
state.currentOperation = null;
++state.index;
} else {
// a complete subquery has been found and is recursively parsed by
// starting over with a new state object
State subState = new State(state.data, state.buffer, start, state.index);
parseSubQuery(subState);
buildQueryTree(state, subState.top);
++state.index;
}
}
private void consumePhrase(State state) {
assert (flags & PHRASE_OPERATOR) != 0;
int start = ++state.index;
int copied = 0;
boolean escaped = false;
while (state.index < state.length) {
if (!escaped) {
if (state.data[state.index] == '\\' && (flags & ESCAPE_OPERATOR) != 0) {
// an escape character has been found so
// whatever character is next will become
// part of the phrase unless the escape
// character is the last one in the data
escaped = true;
++state.index;
continue;
} else if (state.data[state.index] == '"') {
// this should be the end of the phrase
// all characters found will used for
// creating the phrase query
break;
}
}
escaped = false;
state.buffer[copied++] = state.data[state.index++];
}
if (state.index == state.length) {
// a closing double quote was never found so the opening
// double quote is considered extraneous and will be ignored
state.index = start;
} else if (state.index == start) {
// a closing double quote was found immediately after the opening
// double quote so the current operation is reset since it would
// have been applied to this phrase
state.currentOperation = null;
++state.index;
} else {
// a complete phrase has been found and is parsed through
// through the analyzer from the given field
String phrase = new String(state.buffer, 0, copied);
Query branch = newPhraseQuery(phrase);
buildQueryTree(state, branch);
++state.index;
}
}
private void consumeToken(State state) {
int copied = 0;
boolean escaped = false;
boolean prefix = false;
while (state.index < state.length) {
if (!escaped) {
if (state.data[state.index] == '\\' && (flags & ESCAPE_OPERATOR) != 0) {
// an escape character has been found so
// whatever character is next will become
// part of the term unless the escape
// character is the last one in the data
escaped = true;
prefix = false;
++state.index;
continue;
} else if ((state.data[state.index] == '"' && (flags & PHRASE_OPERATOR) != 0)
|| (state.data[state.index] == '|' && (flags & OR_OPERATOR) != 0)
|| (state.data[state.index] == '+' && (flags & AND_OPERATOR) != 0)
|| (state.data[state.index] == '(' && (flags & PRECEDENCE_OPERATORS) != 0)
|| (state.data[state.index] == ')' && (flags & PRECEDENCE_OPERATORS) != 0)
|| ((state.data[state.index] == ' '
|| state.data[state.index] == '\t'
|| state.data[state.index] == '\n'
|| state.data[state.index] == '\r') && (flags & WHITESPACE_OPERATOR) != 0)) {
// this should be the end of the term
// all characters found will used for
// creating the term query
break;
}
// wildcard tracks whether or not the last character
// was a '*' operator that hasn't been escaped
// there must be at least one valid character before
// searching for a prefixed set of terms
prefix = copied > 0 && state.data[state.index] == '*' && (flags & PREFIX_OPERATOR) != 0;
}
escaped = false;
state.buffer[copied++] = state.data[state.index++];
}
if (copied > 0) {
final Query branch;
if (prefix) {
// if a term is found with a closing '*' it is considered to be a prefix query
// and will have prefix added as an option
String token = new String(state.buffer, 0, copied - 1);
branch = newPrefixQuery(token);
} else {
// a standard term has been found so it will be run through
// the entire analysis chain from the specified schema field
String token = new String(state.buffer, 0, copied);
branch = newDefaultQuery(token);
}
buildQueryTree(state, branch);
}
}
// buildQueryTree should be called after a term, phrase, or subquery
// is consumed to be added to our existing query tree
// this method will only add to the existing tree if the branch contained in state is not null
private void buildQueryTree(State state, Query branch) {
if (branch != null) {
// modify our branch to a BooleanQuery wrapper for not
// this is necessary any time a term, phrase, or subquery is negated
if (state.not % 2 == 1) {
BooleanQuery nq = new BooleanQuery();
nq.add(branch, BooleanClause.Occur.MUST_NOT);
nq.add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD);
branch = nq;
}
// first term (or phrase or subquery) found and will begin our query tree
if (state.top == null) {
state.top = branch;
} else {
// more than one term (or phrase or subquery) found
// set currentOperation to the default if no other operation is explicitly set
if (state.currentOperation == null) {
state.currentOperation = defaultOperator;
}
// operational change requiring a new parent node
// this occurs if the previous operation is not the same as current operation
// because the previous operation must be evaluated separately to preserve
// the proper precedence and the current operation will take over as the top of the tree
if (state.previousOperation != state.currentOperation) {
BooleanQuery bq = new BooleanQuery();
bq.add(state.top, state.currentOperation);
state.top = bq;
}
// reset all of the state for reuse
((BooleanQuery)state.top).add(branch, state.currentOperation);
state.previousOperation = state.currentOperation;
}
// reset the current operation as it was intended to be applied to
// the incoming term (or phrase or subquery) even if branch was null
// due to other possible errors
state.currentOperation = null;
}
}
/**
* Factory method to generate a standard query (no phrase or prefix operators).
*/
protected Query newDefaultQuery(String text) {
BooleanQuery bq = new BooleanQuery(true);
for (Map.Entry<String,Float> entry : weights.entrySet()) {
Query q = createBooleanQuery(entry.getKey(), text, defaultOperator);
if (q != null) {
q.setBoost(entry.getValue());
bq.add(q, BooleanClause.Occur.SHOULD);
}
}
return simplify(bq);
}
/**
* Factory method to generate a phrase query.
*/
protected Query newPhraseQuery(String text) {
BooleanQuery bq = new BooleanQuery(true);
for (Map.Entry<String,Float> entry : weights.entrySet()) {
Query q = createPhraseQuery(entry.getKey(), text);
if (q != null) {
q.setBoost(entry.getValue());
bq.add(q, BooleanClause.Occur.SHOULD);
}
}
return simplify(bq);
}
/**
* Factory method to generate a prefix query.
*/
protected Query newPrefixQuery(String text) {
BooleanQuery bq = new BooleanQuery(true);
for (Map.Entry<String,Float> entry : weights.entrySet()) {
PrefixQuery prefix = new PrefixQuery(new Term(entry.getKey(), text));
prefix.setBoost(entry.getValue());
bq.add(prefix, BooleanClause.Occur.SHOULD);
}
return simplify(bq);
}
/**
* Helper to simplify boolean queries with 0 or 1 clause
*/
protected Query simplify(BooleanQuery bq) {
if (bq.clauses().isEmpty()) {
return null;
} else if (bq.clauses().size() == 1) {
return bq.clauses().get(0).getQuery();
} else {
return bq;
}
}
/**
* Returns the implicit operator setting, which will be
* either {@code SHOULD} or {@code MUST}.
*/
public BooleanClause.Occur getDefaultOperator() {
return defaultOperator;
}
/**
* Sets the implicit operator setting, which must be
* either {@code SHOULD} or {@code MUST}.
*/
public void setDefaultOperator(BooleanClause.Occur operator) {
if (operator != BooleanClause.Occur.SHOULD && operator != BooleanClause.Occur.MUST) {
throw new IllegalArgumentException("invalid operator: only SHOULD or MUST are allowed");
}
this.defaultOperator = operator;
}
static class State {
final char[] data; // the characters in the query string
final char[] buffer; // a temporary buffer used to reduce necessary allocations
int index;
int length;
BooleanClause.Occur currentOperation;
BooleanClause.Occur previousOperation;
int not;
Query top;
State(char[] data, char[] buffer, int index, int length) {
this.data = data;
this.buffer = buffer;
this.index = index;
this.length = length;
}
}
}
| 0true
|
src_main_java_org_apache_lucene_queryparser_XSimpleQueryParser.java
|
26 |
static final class RunAfterBoth extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<?> snd;
final Runnable fn;
final CompletableFuture<Void> dst;
final Executor executor;
RunAfterBoth(CompletableFuture<?> src,
CompletableFuture<?> snd,
Runnable fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<?> b;
final Runnable fn;
final CompletableFuture<Void> dst;
Object r, s; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
(b = this.snd) != null &&
(s = b.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null && (s instanceof AltResult))
ex = ((AltResult)s).ex;
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(fn, dst));
else
fn.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
76 |
public class ThreadAssociatedWithOtherTransactionException extends IllegalStateException
{
public ThreadAssociatedWithOtherTransactionException( Thread thread, Transaction alreadyAssociatedTx,
Transaction tx )
{
super( "Thread '" + thread.getName() + "' tried to resume " + tx + ", but had already " +
alreadyAssociatedTx + " associated" );
}
}
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_ThreadAssociatedWithOtherTransactionException.java
|
7 |
class ClusterInstance
{
private final Executor stateMachineExecutor;
private final Logging logging;
private final MultiPaxosServerFactory factory;
private final ProtocolServer server;
private final MultiPaxosContext ctx;
private final InMemoryAcceptorInstanceStore acceptorInstanceStore;
private final ProverTimeouts timeouts;
private final ClusterInstanceInput input;
private final ClusterInstanceOutput output;
private final URI uri;
public static final Executor DIRECT_EXECUTOR = new Executor()
{
@Override
public void execute( Runnable command )
{
command.run();
}
};
private boolean online = true;
public static ClusterInstance newClusterInstance( InstanceId id, URI uri, ClusterConfiguration configuration, Logging logging )
{
MultiPaxosServerFactory factory = new MultiPaxosServerFactory( configuration, logging);
ClusterInstanceInput input = new ClusterInstanceInput();
ClusterInstanceOutput output = new ClusterInstanceOutput(uri);
ObjectStreamFactory objStreamFactory = new ObjectStreamFactory();
ProverTimeouts timeouts = new ProverTimeouts(uri);
InMemoryAcceptorInstanceStore acceptorInstances = new InMemoryAcceptorInstanceStore();
DelayedDirectExecutor executor = new DelayedDirectExecutor();
final MultiPaxosContext context = new MultiPaxosContext( id,
Iterables.<ElectionRole,ElectionRole>iterable( new ElectionRole( ClusterConfiguration.COORDINATOR ) ),
new ClusterConfiguration( configuration.getName(), logging.getMessagesLog( ClusterConfiguration.class ),
configuration.getMemberURIs() ),
executor, logging, objStreamFactory, objStreamFactory, acceptorInstances, timeouts,
new DefaultElectionCredentialsProvider( id.toIntegerIndex(), new StateVerifierLastTxIdGetter(),
new MemberInfoProvider() ));
context.getClusterContext().setBoundAt( uri );
SnapshotContext snapshotContext = new SnapshotContext( context.getClusterContext(),context.getLearnerContext());
ProtocolServer ps = factory.newProtocolServer( id,
input, output, DIRECT_EXECUTOR, new DelayedDirectExecutor(), timeouts, context, snapshotContext);
return new ClusterInstance( DIRECT_EXECUTOR, logging, factory, ps, context, acceptorInstances, timeouts, input, output, uri );
}
public ClusterInstance( Executor stateMachineExecutor, Logging logging, MultiPaxosServerFactory factory,
ProtocolServer server,
MultiPaxosContext ctx, InMemoryAcceptorInstanceStore acceptorInstanceStore,
ProverTimeouts timeouts, ClusterInstanceInput input, ClusterInstanceOutput output,
URI uri )
{
this.stateMachineExecutor = stateMachineExecutor;
this.logging = logging;
this.factory = factory;
this.server = server;
this.ctx = ctx;
this.acceptorInstanceStore = acceptorInstanceStore;
this.timeouts = timeouts;
this.input = input;
this.output = output;
this.uri = uri;
}
public InstanceId id()
{
return server.getServerId();
}
/** Process a message, returns all messages generated as output. */
public Iterable<Message<? extends MessageType>> process( Message<? extends MessageType> message )
{
if(online)
{
input.process( message );
return output.messages();
}
else
{
return Iterables.empty();
}
}
@Override
public String toString()
{
return "[" + id() + ":" + Iterables.toString( stateMachineStates(), "," ) + "]";
}
private Iterable<String> stateMachineStates()
{
return Iterables.map( new Function<StateMachine, String>()
{
@Override
public String apply( StateMachine stateMachine )
{
return stateMachine.getState().toString();
}
}, server.getStateMachines().getStateMachines() );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ClusterInstance that = (ClusterInstance) o;
if ( !toString().equals( that.toString() ) )
{
return false;
}
if ( !uri.equals( that.uri ) )
{
return false;
}
// TODO: For now, we only look at the states of the underlying state machines,
// and ignore, at our peril, the MultiPaxosContext as part of this equality checks.
// This means the prover ignores lots of possible paths it could generate, as it considers two
// machines with different multi paxos state potentially equal and will ignore exploring both.
// This should be undone as soon as possible. It's here because we need a better mechanism than
// .equals() to compare that two contexts are the same, which is not yet implemented.
return true;
}
@Override
public int hashCode()
{
return toString().hashCode();
}
private StateMachine snapshotStateMachine( Logging logging, MultiPaxosContext snapshotCtx, StateMachine
stateMachine )
{
// This is done this way because all the state machines are sharing one piece of global state
// (MultiPaxosContext), which is snapshotted as one coherent component. This means the state machines
// cannot snapshot themselves, an external service needs to snapshot the full shared state and then create
// new state machines sharing that state.
Object ctx;
Class<? extends MessageType> msgType = stateMachine.getMessageType();
if(msgType == AtomicBroadcastMessage.class)
{
ctx = snapshotCtx.getAtomicBroadcastContext();
}
else if(msgType == AcceptorMessage.class)
{
ctx = snapshotCtx.getAcceptorContext();
}
else if(msgType == ProposerMessage.class)
{
ctx = snapshotCtx.getProposerContext();
}
else if(msgType == LearnerMessage.class)
{
ctx = snapshotCtx.getLearnerContext();
}
else if(msgType == HeartbeatMessage.class)
{
ctx = snapshotCtx.getHeartbeatContext();
}
else if(msgType == ElectionMessage.class)
{
ctx = snapshotCtx.getElectionContext();
}
else if(msgType == SnapshotMessage.class)
{
ctx = new SnapshotContext( snapshotCtx.getClusterContext(), snapshotCtx.getLearnerContext() );
}
else if(msgType == ClusterMessage.class)
{
ctx = snapshotCtx.getClusterContext();
}
else
{
throw new IllegalArgumentException( "I don't know how to snapshot this state machine: " + stateMachine );
}
return new StateMachine( ctx, stateMachine.getMessageType(), stateMachine.getState(), logging );
}
public ClusterInstance newCopy()
{
// A very invasive method of cloning a protocol server. Nonetheless, since this is mostly an experiment at this
// point, it seems we can refactor later on to have a cleaner clone mechanism.
// Because state machines share state, and are simultaneously conceptually unaware of each other, implementing
// a clean snapshot mechanism is very hard. I've opted for having a dirty one here in the test code rather
// than introducing a hack into the runtime code.
ProverTimeouts timeoutsSnapshot = timeouts.snapshot();
InMemoryAcceptorInstanceStore snapshotAcceptorInstances = acceptorInstanceStore.snapshot();
ClusterInstanceOutput output = new ClusterInstanceOutput(uri);
ClusterInstanceInput input = new ClusterInstanceInput();
DelayedDirectExecutor executor = new DelayedDirectExecutor();
ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory();
MultiPaxosContext snapshotCtx = ctx.snapshot( logging, timeoutsSnapshot, executor, snapshotAcceptorInstances,
objectStreamFactory, objectStreamFactory,
new DefaultElectionCredentialsProvider( server.getServerId().toIntegerIndex(),
new StateVerifierLastTxIdGetter(),
new MemberInfoProvider() ) );
List<StateMachine> snapshotMachines = new ArrayList<>();
for ( StateMachine stateMachine : server.getStateMachines().getStateMachines() )
{
snapshotMachines.add( snapshotStateMachine( logging, snapshotCtx, stateMachine ) );
}
ProtocolServer snapshotProtocolServer = factory.constructSupportingInfrastructureFor( server.getServerId(),
input, output, executor, timeoutsSnapshot, stateMachineExecutor,
snapshotCtx, snapshotMachines.toArray( new StateMachine[snapshotMachines.size()] ) );
return new ClusterInstance( stateMachineExecutor, logging, factory, snapshotProtocolServer, snapshotCtx,
snapshotAcceptorInstances, timeoutsSnapshot, input, output, uri );
}
public URI uri()
{
return uri;
}
public boolean hasPendingTimeouts()
{
return timeouts.hasTimeouts();
}
public ClusterAction popTimeout()
{
return timeouts.pop();
}
/** Make this instance stop responding to calls, and cancel all pending timeouts. */
public void crash()
{
timeouts.cancelAllTimeouts();
this.online = false;
}
private static class ClusterInstanceInput implements MessageSource, MessageProcessor
{
private final List<MessageProcessor> processors = new ArrayList<>();
@Override
public boolean process( Message<? extends MessageType> message )
{
for ( MessageProcessor processor : processors )
{
if(!processor.process( message ))
{
return false;
}
}
return true;
}
@Override
public void addMessageProcessor( MessageProcessor messageProcessor )
{
processors.add( messageProcessor );
}
}
private static class ClusterInstanceOutput implements MessageSender
{
private final List<Message<? extends MessageType>> messages = new ArrayList<>();
private final URI uri;
public ClusterInstanceOutput( URI uri )
{
this.uri = uri;
}
@Override
public boolean process( Message<? extends MessageType> message )
{
messages.add( message.setHeader( Message.FROM, uri.toASCIIString() ) );
return true;
}
@Override
public void process( List<Message<? extends MessageType>> msgList )
{
for ( Message<? extends MessageType> msg : msgList )
{
process( msg );
}
}
public Iterable<Message<? extends MessageType>> messages()
{
return messages;
}
}
static class MemberInfoProvider implements HighAvailabilityMemberInfoProvider
{
@Override
public HighAvailabilityMemberState getHighAvailabilityMemberState()
{
throw new UnsupportedOperationException( "TODO" );
}
}
// TODO: Make this emulate commits happening
static class StateVerifierLastTxIdGetter implements LastTxIdGetter
{
@Override
public long getLastTxId()
{
return 0;
}
}
}
| 1no label
|
enterprise_ha_src_test_java_org_neo4j_ha_correctness_ClusterInstance.java
|
619 |
public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder<IndicesStatsRequest, IndicesStatsResponse, IndicesStatsRequestBuilder> {
public IndicesStatsRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new IndicesStatsRequest());
}
/**
* Sets all flags to return all stats.
*/
public IndicesStatsRequestBuilder all() {
request.all();
return this;
}
/**
* Clears all stats.
*/
public IndicesStatsRequestBuilder clear() {
request.clear();
return this;
}
/**
* Document types to return stats for. Mainly affects {@link #setIndexing(boolean)} when
* enabled, returning specific indexing stats for those types.
*/
public IndicesStatsRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
public IndicesStatsRequestBuilder setGroups(String... groups) {
request.groups(groups);
return this;
}
public IndicesStatsRequestBuilder setDocs(boolean docs) {
request.docs(docs);
return this;
}
public IndicesStatsRequestBuilder setStore(boolean store) {
request.store(store);
return this;
}
public IndicesStatsRequestBuilder setIndexing(boolean indexing) {
request.indexing(indexing);
return this;
}
public IndicesStatsRequestBuilder setGet(boolean get) {
request.get(get);
return this;
}
public IndicesStatsRequestBuilder setSearch(boolean search) {
request.search(search);
return this;
}
public IndicesStatsRequestBuilder setMerge(boolean merge) {
request.merge(merge);
return this;
}
public IndicesStatsRequestBuilder setRefresh(boolean refresh) {
request.refresh(refresh);
return this;
}
public IndicesStatsRequestBuilder setFlush(boolean flush) {
request.flush(flush);
return this;
}
public IndicesStatsRequestBuilder setWarmer(boolean warmer) {
request.warmer(warmer);
return this;
}
public IndicesStatsRequestBuilder setFilterCache(boolean filterCache) {
request.filterCache(filterCache);
return this;
}
public IndicesStatsRequestBuilder setIdCache(boolean idCache) {
request.idCache(idCache);
return this;
}
public IndicesStatsRequestBuilder setFieldData(boolean fieldData) {
request.fieldData(fieldData);
return this;
}
public IndicesStatsRequestBuilder setFieldDataFields(String... fields) {
request.fieldDataFields(fields);
return this;
}
public IndicesStatsRequestBuilder setPercolate(boolean percolate) {
request.percolate(percolate);
return this;
}
public IndicesStatsRequestBuilder setSegments(boolean segments) {
request.segments(segments);
return this;
}
public IndicesStatsRequestBuilder setCompletion(boolean completion) {
request.completion(completion);
return this;
}
public IndicesStatsRequestBuilder setCompletionFields(String... fields) {
request.completionFields(fields);
return this;
}
public IndicesStatsRequestBuilder setTranslog(boolean translog) {
request.translog(translog);
return this;
}
@Override
protected void doExecute(ActionListener<IndicesStatsResponse> listener) {
((IndicesAdminClient) client).stats(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_stats_IndicesStatsRequestBuilder.java
|
548 |
public class GetFieldMappingsAction extends IndicesAction<GetFieldMappingsRequest, GetFieldMappingsResponse, GetFieldMappingsRequestBuilder> {
public static final GetFieldMappingsAction INSTANCE = new GetFieldMappingsAction();
public static final String NAME = "mappings/fields/get";
private GetFieldMappingsAction() {
super(NAME);
}
@Override
public GetFieldMappingsRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new GetFieldMappingsRequestBuilder((InternalGenericClient) client);
}
@Override
public GetFieldMappingsResponse newResponse() {
return new GetFieldMappingsResponse();
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_get_GetFieldMappingsAction.java
|
1,508 |
@Component("blCartStateFilter")
/**
* <p>
* This filter should be configured after the BroadleafCommerce CustomerStateFilter listener from Spring Security.
* Retrieves the cart for the current BroadleafCommerce Customer based using the authenticated user OR creates an empty non-modifiable cart and
* stores it in the request.
* </p>
*
* @author bpolster
*/
public class CartStateFilter extends GenericFilterBean implements Ordered {
/** Logger for this class and subclasses */
protected final Log LOG = LogFactory.getLog(getClass());
@Resource(name = "blCartStateRequestProcessor")
protected CartStateRequestProcessor cartStateProcessor;
@Override
@SuppressWarnings("unchecked")
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
cartStateProcessor.process(new ServletWebRequest((HttpServletRequest) request, (HttpServletResponse)response));
chain.doFilter(request, response);
}
@Override
public int getOrder() {
//FilterChainOrder has been dropped from Spring Security 3
//return FilterChainOrder.REMEMBER_ME_FILTER+1;
return 1502;
}
}
| 1no label
|
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_order_security_CartStateFilter.java
|
33 |
public class ThriftBlueprintsTest extends AbstractCassandraBlueprintsTest {
@Override
public void beforeSuite() {
CassandraStorageSetup.startCleanEmbedded();
}
@Override
protected WriteConfiguration getGraphConfig() {
return CassandraStorageSetup.getCassandraGraphConfiguration(getClass().getSimpleName());
}
@Override
public void extraCleanUp(String uid) throws BackendException {
ModifiableConfiguration mc =
new ModifiableConfiguration(GraphDatabaseConfiguration.ROOT_NS, getGraphConfig(), Restriction.NONE);
StoreManager m = new CassandraThriftStoreManager(mc);
m.clearStorage();
m.close();
}
}
| 0true
|
titan-cassandra_src_test_java_com_thinkaurelius_titan_blueprints_ThriftBlueprintsTest.java
|
180 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ClientAtomicReferenceTest {
static final String name = "test1";
static HazelcastInstance client;
static HazelcastInstance server;
static IAtomicReference<String> clientReference;
static IAtomicReference<String> serverReference;
@BeforeClass
public static void init() {
server = Hazelcast.newHazelcastInstance();
client = HazelcastClient.newHazelcastClient();
clientReference = client.getAtomicReference(name);
serverReference = server.getAtomicReference(name);
}
@AfterClass
public static void destroy() {
client.shutdown();
Hazelcast.shutdownAll();
}
@Before
@After
public void after() {
serverReference.set(null);
}
@Test
public void get() throws Exception {
assertNull(clientReference.get());
serverReference.set("foo");
assertEquals("foo", clientReference.get());
}
@Test
public void isNull() throws Exception {
assertTrue(clientReference.isNull());
serverReference.set("foo");
assertFalse(clientReference.isNull());
}
@Test
public void contains() {
assertTrue(clientReference.contains(null));
assertFalse(clientReference.contains("foo"));
serverReference.set("foo");
assertFalse(clientReference.contains(null));
assertTrue(clientReference.contains("foo"));
assertFalse(clientReference.contains("bar"));
}
@Test
public void set() throws Exception {
clientReference.set(null);
assertTrue(serverReference.isNull());
clientReference.set("foo");
assertEquals("foo", serverReference.get());
clientReference.set("foo");
assertEquals("foo", serverReference.get());
clientReference.set("bar");
assertEquals("bar", serverReference.get());
clientReference.set(null);
assertTrue(serverReference.isNull());
}
@Test
public void clear() throws Exception {
clientReference.clear();
assertTrue(serverReference.isNull());
serverReference.set("foo");
clientReference.clear();
assertTrue(serverReference.isNull());
}
@Test
public void getAndSet() throws Exception {
assertNull(clientReference.getAndSet(null));
assertTrue(serverReference.isNull());
assertNull(clientReference.getAndSet("foo"));
assertEquals("foo", serverReference.get());
assertEquals("foo", clientReference.getAndSet("foo"));
assertEquals("foo", serverReference.get());
assertEquals("foo", clientReference.getAndSet("bar"));
assertEquals("bar", serverReference.get());
assertEquals("bar", clientReference.getAndSet(null));
assertTrue(serverReference.isNull());
}
@Test
public void setAndGet() throws Exception {
assertNull(clientReference.setAndGet(null));
assertTrue(serverReference.isNull());
assertEquals("foo", clientReference.setAndGet("foo"));
assertEquals("foo", serverReference.get());
assertEquals("foo", clientReference.setAndGet("foo"));
assertEquals("foo", serverReference.get());
assertEquals("bar", clientReference.setAndGet("bar"));
assertEquals("bar", serverReference.get());
assertNull(clientReference.setAndGet(null));
assertTrue(serverReference.isNull());
}
@Test
public void compareAndSet() throws Exception {
assertTrue(clientReference.compareAndSet(null, null));
assertTrue(serverReference.isNull());
assertFalse(clientReference.compareAndSet("foo", null));
assertTrue(serverReference.isNull());
assertTrue(clientReference.compareAndSet(null, "foo"));
assertEquals("foo", serverReference.get());
assertTrue(clientReference.compareAndSet("foo", "foo"));
assertEquals("foo", serverReference.get());
assertFalse(clientReference.compareAndSet("bar", "foo"));
assertEquals("foo", serverReference.get());
assertTrue(clientReference.compareAndSet("foo", "bar"));
assertEquals("bar", serverReference.get());
assertTrue(clientReference.compareAndSet("bar", null));
assertNull(serverReference.get());
}
@Test(expected = IllegalArgumentException.class)
public void apply_whenCalledWithNullFunction() {
clientReference.apply(null);
}
@Test
public void apply() {
assertEquals("null",clientReference.apply(new AppendFunction("")));
assertEquals(null,clientReference.get());
clientReference.set("foo");
assertEquals("foobar", clientReference.apply(new AppendFunction("bar")));
assertEquals("foo",clientReference.get());
assertEquals(null, clientReference.apply(new NullFunction()));
assertEquals("foo",clientReference.get());
}
@Test(expected = IllegalArgumentException.class)
public void alter_whenCalledWithNullFunction() {
clientReference.alter(null);
}
@Test
public void alter() {
clientReference.alter(new NullFunction());
assertEquals(null,clientReference.get());
clientReference.set("foo");
clientReference.alter(new AppendFunction("bar"));
assertEquals("foobar",clientReference.get());
clientReference.alter(new NullFunction());
assertEquals(null,clientReference.get());
}
@Test(expected = IllegalArgumentException.class)
public void alterAndGet_whenCalledWithNullFunction() {
clientReference.alterAndGet(null);
}
@Test
public void alterAndGet() {
assertNull(clientReference.alterAndGet(new NullFunction()));
assertEquals(null,clientReference.get());
clientReference.set("foo");
assertEquals("foobar",clientReference.alterAndGet(new AppendFunction("bar")));
assertEquals("foobar",clientReference.get());
assertEquals(null,clientReference.alterAndGet(new NullFunction()));
assertEquals(null,clientReference.get());
}
@Test(expected = IllegalArgumentException.class)
public void getAndAlter_whenCalledWithNullFunction() {
clientReference.alterAndGet(null);
}
@Test
public void getAndAlter() {
assertNull(clientReference.getAndAlter(new NullFunction()));
assertEquals(null,clientReference.get());
clientReference.set("foo");
assertEquals("foo",clientReference.getAndAlter(new AppendFunction("bar")));
assertEquals("foobar",clientReference.get());
assertEquals("foobar",clientReference.getAndAlter(new NullFunction()));
assertEquals(null,clientReference.get());
}
private static class AppendFunction implements IFunction<String,String> {
private String add;
private AppendFunction(String add) {
this.add = add;
}
@Override
public String apply(String input) {
return input+add;
}
}
private static class NullFunction implements IFunction<String,String> {
@Override
public String apply(String input) {
return null;
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_atomicreference_ClientAtomicReferenceTest.java
|
234 |
highlighter = new XPostingsHighlighter() {
Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, firstValue.length() + secondValue.length() + 2).iterator();
@Override
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
return new String[][]{new String[]{valuesIterator.next()}};
}
@Override
protected int getOffsetForCurrentValue(String field, int docId) {
return offsetsIterator.next();
}
@Override
protected BreakIterator getBreakIterator(String field) {
return new WholeBreakIterator();
}
};
| 0true
|
src_test_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighterTests.java
|
3,563 |
public class CompletionFieldMapper extends AbstractFieldMapper<String> {
public static final String CONTENT_TYPE = "completion";
public static class Defaults extends AbstractFieldMapper.Defaults {
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.freeze();
}
public static final boolean DEFAULT_PRESERVE_SEPARATORS = true;
public static final boolean DEFAULT_POSITION_INCREMENTS = true;
public static final boolean DEFAULT_HAS_PAYLOADS = false;
public static final int DEFAULT_MAX_INPUT_LENGTH = 50;
}
public static class Fields {
// Mapping field names
public static final String ANALYZER = "analyzer";
public static final ParseField INDEX_ANALYZER = new ParseField("index_analyzer");
public static final ParseField SEARCH_ANALYZER = new ParseField("search_analyzer");
public static final ParseField PRESERVE_SEPARATORS = new ParseField("preserve_separators");
public static final ParseField PRESERVE_POSITION_INCREMENTS = new ParseField("preserve_position_increments");
public static final String PAYLOADS = "payloads";
public static final String TYPE = "type";
public static final ParseField MAX_INPUT_LENGTH = new ParseField("max_input_length", "max_input_len");
// Content field names
public static final String CONTENT_FIELD_NAME_INPUT = "input";
public static final String CONTENT_FIELD_NAME_OUTPUT = "output";
public static final String CONTENT_FIELD_NAME_PAYLOAD = "payload";
public static final String CONTENT_FIELD_NAME_WEIGHT = "weight";
}
public static Set<String> ALLOWED_CONTENT_FIELD_NAMES = Sets.newHashSet(Fields.CONTENT_FIELD_NAME_INPUT,
Fields.CONTENT_FIELD_NAME_OUTPUT, Fields.CONTENT_FIELD_NAME_PAYLOAD, Fields.CONTENT_FIELD_NAME_WEIGHT);
public static class Builder extends AbstractFieldMapper.Builder<Builder, CompletionFieldMapper> {
private boolean preserveSeparators = Defaults.DEFAULT_PRESERVE_SEPARATORS;
private boolean payloads = Defaults.DEFAULT_HAS_PAYLOADS;
private boolean preservePositionIncrements = Defaults.DEFAULT_POSITION_INCREMENTS;
private int maxInputLength = Defaults.DEFAULT_MAX_INPUT_LENGTH;
public Builder(String name) {
super(name, new FieldType(Defaults.FIELD_TYPE));
builder = this;
}
public Builder payloads(boolean payloads) {
this.payloads = payloads;
return this;
}
public Builder preserveSeparators(boolean preserveSeparators) {
this.preserveSeparators = preserveSeparators;
return this;
}
public Builder preservePositionIncrements(boolean preservePositionIncrements) {
this.preservePositionIncrements = preservePositionIncrements;
return this;
}
public Builder maxInputLength(int maxInputLength) {
if (maxInputLength <= 0) {
throw new ElasticsearchIllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]");
}
this.maxInputLength = maxInputLength;
return this;
}
@Override
public CompletionFieldMapper build(Mapper.BuilderContext context) {
return new CompletionFieldMapper(buildNames(context), indexAnalyzer, searchAnalyzer, postingsProvider, similarity, payloads,
preserveSeparators, preservePositionIncrements, maxInputLength, multiFieldsBuilder.build(this, context), copyTo);
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
CompletionFieldMapper.Builder builder = completionField(name);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = entry.getKey();
Object fieldNode = entry.getValue();
if (fieldName.equals("type")) {
continue;
}
if (fieldName.equals("analyzer")) {
NamedAnalyzer analyzer = getNamedAnalyzer(parserContext, fieldNode.toString());
builder.indexAnalyzer(analyzer);
builder.searchAnalyzer(analyzer);
} else if (Fields.INDEX_ANALYZER.match(fieldName)) {
builder.indexAnalyzer(getNamedAnalyzer(parserContext, fieldNode.toString()));
} else if (Fields.SEARCH_ANALYZER.match(fieldName)) {
builder.searchAnalyzer(getNamedAnalyzer(parserContext, fieldNode.toString()));
} else if (fieldName.equals(Fields.PAYLOADS)) {
builder.payloads(Boolean.parseBoolean(fieldNode.toString()));
} else if (Fields.PRESERVE_SEPARATORS.match(fieldName)) {
builder.preserveSeparators(Boolean.parseBoolean(fieldNode.toString()));
} else if (Fields.PRESERVE_POSITION_INCREMENTS.match(fieldName)) {
builder.preservePositionIncrements(Boolean.parseBoolean(fieldNode.toString()));
} else if (Fields.MAX_INPUT_LENGTH.match(fieldName)) {
builder.maxInputLength(Integer.parseInt(fieldNode.toString()));
} else if ("fields".equals(fieldName) || "path".equals(fieldName)) {
parseMultiField(builder, name, node, parserContext, fieldName, fieldNode);
} else {
throw new MapperParsingException("Unknown field [" + fieldName + "]");
}
}
if (builder.searchAnalyzer == null) {
builder.searchAnalyzer(parserContext.analysisService().analyzer("simple"));
}
if (builder.indexAnalyzer == null) {
builder.indexAnalyzer(parserContext.analysisService().analyzer("simple"));
}
// we are just using this as the default to be wrapped by the CompletionPostingsFormatProvider in the SuggesteFieldMapper ctor
builder.postingsFormat(parserContext.postingFormatService().get("default"));
return builder;
}
private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name);
if (analyzer == null) {
throw new ElasticsearchIllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]");
}
return analyzer;
}
}
private static final BytesRef EMPTY = new BytesRef();
private final CompletionPostingsFormatProvider completionPostingsFormatProvider;
private final AnalyzingCompletionLookupProvider analyzingSuggestLookupProvider;
private final boolean payloads;
private final boolean preservePositionIncrements;
private final boolean preserveSeparators;
private int maxInputLength;
public CompletionFieldMapper(Names names, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsProvider, SimilarityProvider similarity, boolean payloads,
boolean preserveSeparators, boolean preservePositionIncrements, int maxInputLength, MultiFields multiFields, CopyTo copyTo) {
super(names, 1.0f, Defaults.FIELD_TYPE, null, indexAnalyzer, searchAnalyzer, postingsProvider, null, similarity, null, null, null, multiFields, copyTo);
analyzingSuggestLookupProvider = new AnalyzingCompletionLookupProvider(preserveSeparators, false, preservePositionIncrements, payloads);
this.completionPostingsFormatProvider = new CompletionPostingsFormatProvider("completion", postingsProvider, analyzingSuggestLookupProvider);
this.preserveSeparators = preserveSeparators;
this.payloads = payloads;
this.preservePositionIncrements = preservePositionIncrements;
this.maxInputLength = maxInputLength;
}
@Override
public PostingsFormatProvider postingsFormatProvider() {
return this.completionPostingsFormatProvider;
}
@Override
public void parse(ParseContext context) throws IOException {
XContentParser parser = context.parser();
XContentParser.Token token = parser.currentToken();
String surfaceForm = null;
BytesRef payload = null;
long weight = -1;
List<String> inputs = Lists.newArrayListWithExpectedSize(4);
if (token == XContentParser.Token.VALUE_STRING) {
inputs.add(parser.text());
multiFields.parse(this, context);
} else {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
if (!ALLOWED_CONTENT_FIELD_NAMES.contains(currentFieldName)) {
throw new ElasticsearchIllegalArgumentException("Unknown field name[" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES);
}
} else if (Fields.CONTENT_FIELD_NAME_PAYLOAD.equals(currentFieldName)) {
if (!isStoringPayloads()) {
throw new MapperException("Payloads disabled in mapping");
}
if (token == XContentParser.Token.START_OBJECT) {
XContentBuilder payloadBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
payload = payloadBuilder.bytes().toBytesRef();
payloadBuilder.close();
} else if (token.isValue()) {
payload = parser.bytesOrNull();
} else {
throw new MapperException("payload doesn't support type " + token);
}
} else if (token == XContentParser.Token.VALUE_STRING) {
if (Fields.CONTENT_FIELD_NAME_OUTPUT.equals(currentFieldName)) {
surfaceForm = parser.text();
}
if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) {
inputs.add(parser.text());
}
} else if (token == XContentParser.Token.VALUE_NUMBER) {
if (Fields.CONTENT_FIELD_NAME_WEIGHT.equals(currentFieldName)) {
NumberType numberType = parser.numberType();
if (NumberType.LONG != numberType && NumberType.INT != numberType) {
throw new ElasticsearchIllegalArgumentException("Weight must be an integer, but was [" + parser.numberValue() + "]");
}
weight = parser.longValue(); // always parse a long to make sure we don't get the overflow value
if (weight < 0 || weight > Integer.MAX_VALUE) {
throw new ElasticsearchIllegalArgumentException("Weight must be in the interval [0..2147483647], but was [" + weight + "]");
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
inputs.add(parser.text());
}
}
}
}
}
payload = payload == null ? EMPTY : payload;
if (surfaceForm == null) { // no surface form use the input
for (String input : inputs) {
BytesRef suggestPayload = analyzingSuggestLookupProvider.buildPayload(new BytesRef(
input), weight, payload);
context.doc().add(getCompletionField(input, suggestPayload));
}
} else {
BytesRef suggestPayload = analyzingSuggestLookupProvider.buildPayload(new BytesRef(
surfaceForm), weight, payload);
for (String input : inputs) {
context.doc().add(getCompletionField(input, suggestPayload));
}
}
}
public Field getCompletionField(String input, BytesRef payload) {
final String originalInput = input;
if (input.length() > maxInputLength) {
final int len = correctSubStringLen(input, Math.min(maxInputLength, input.length()));
input = input.substring(0, len);
}
for (int i = 0; i < input.length(); i++) {
if (isReservedChar(input.charAt(i))) {
throw new ElasticsearchIllegalArgumentException("Illegal input [" + originalInput + "] UTF-16 codepoint [0x"
+ Integer.toHexString((int) input.charAt(i)).toUpperCase(Locale.ROOT)
+ "] at position " + i + " is a reserved character");
}
}
return new SuggestField(names.indexName(), input, this.fieldType, payload, analyzingSuggestLookupProvider);
}
public static int correctSubStringLen(String input, int len) {
if (Character.isHighSurrogate(input.charAt(len - 1))) {
assert input.length() >= len + 1 && Character.isLowSurrogate(input.charAt(len));
return len + 1;
}
return len;
}
public BytesRef buildPayload(BytesRef surfaceForm, long weight, BytesRef payload) throws IOException {
return analyzingSuggestLookupProvider.buildPayload(
surfaceForm, weight, payload);
}
private static final class SuggestField extends Field {
private final BytesRef payload;
private final CompletionTokenStream.ToFiniteStrings toFiniteStrings;
public SuggestField(String name, Reader value, FieldType type, BytesRef payload, CompletionTokenStream.ToFiniteStrings toFiniteStrings) {
super(name, value, type);
this.payload = payload;
this.toFiniteStrings = toFiniteStrings;
}
public SuggestField(String name, String value, FieldType type, BytesRef payload, CompletionTokenStream.ToFiniteStrings toFiniteStrings) {
super(name, value, type);
this.payload = payload;
this.toFiniteStrings = toFiniteStrings;
}
@Override
public TokenStream tokenStream(Analyzer analyzer) throws IOException {
TokenStream ts = super.tokenStream(analyzer);
return new CompletionTokenStream(ts, payload, toFiniteStrings);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name())
.field(Fields.TYPE, CONTENT_TYPE);
if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
builder.field(Fields.ANALYZER, indexAnalyzer.name());
} else {
builder.field(Fields.INDEX_ANALYZER.getPreferredName(), indexAnalyzer.name())
.field(Fields.SEARCH_ANALYZER.getPreferredName(), searchAnalyzer.name());
}
builder.field(Fields.PAYLOADS, this.payloads);
builder.field(Fields.PRESERVE_SEPARATORS.getPreferredName(), this.preserveSeparators);
builder.field(Fields.PRESERVE_POSITION_INCREMENTS.getPreferredName(), this.preservePositionIncrements);
builder.field(Fields.MAX_INPUT_LENGTH.getPreferredName(), this.maxInputLength);
multiFields.toXContent(builder, params);
return builder.endObject();
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public boolean isSortable() {
return false;
}
@Override
public boolean hasDocValues() {
return false;
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
@Override
public FieldDataType defaultFieldDataType() {
return null;
}
@Override
public String value(Object value) {
if (value == null) {
return null;
}
return value.toString();
}
public boolean isStoringPayloads() {
return payloads;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
super.merge(mergeWith, mergeContext);
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
if (payloads != fieldMergeWith.payloads) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different payload values");
}
if (preservePositionIncrements != fieldMergeWith.preservePositionIncrements) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_position_increments' values");
}
if (preserveSeparators != fieldMergeWith.preserveSeparators) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_separators' values");
}
if (!mergeContext.mergeFlags().simulate()) {
this.maxInputLength = fieldMergeWith.maxInputLength;
}
}
// this should be package private but our tests don't allow it.
public static boolean isReservedChar(char character) {
/* we use 0x001F as a SEP_LABEL in the suggester but we can use the UTF-16 representation since they
* are equivalent. We also don't need to convert the input character to UTF-8 here to check for
* the 0x00 end label since all multi-byte UTF-8 chars start with 0x10 binary so if the UTF-16 CP is == 0x00
* it's the single byte UTF-8 CP */
assert XAnalyzingSuggester.PAYLOAD_SEP == XAnalyzingSuggester.SEP_LABEL; // ensure they are the same!
switch(character) {
case XAnalyzingSuggester.END_BYTE:
case XAnalyzingSuggester.SEP_LABEL:
case XAnalyzingSuggester.HOLE_CHARACTER:
return true;
default:
return false;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_core_CompletionFieldMapper.java
|
173 |
public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
/*
* See the internal documentation of class ForkJoinPool for a
* general implementation overview. ForkJoinTasks are mainly
* responsible for maintaining their "status" field amidst relays
* to methods in ForkJoinWorkerThread and ForkJoinPool.
*
* The methods of this class are more-or-less layered into
* (1) basic status maintenance
* (2) execution and awaiting completion
* (3) user-level methods that additionally report results.
* This is sometimes hard to see because this file orders exported
* methods in a way that flows well in javadocs.
*/
/*
* The status field holds run control status bits packed into a
* single int to minimize footprint and to ensure atomicity (via
* CAS). Status is initially zero, and takes on nonnegative
* values until completed, upon which status (anded with
* DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
* undergoing blocking waits by other threads have the SIGNAL bit
* set. Completion of a stolen task with SIGNAL set awakens any
* waiters via notifyAll. Even though suboptimal for some
* purposes, we use basic builtin wait/notify to take advantage of
* "monitor inflation" in JVMs that we would otherwise need to
* emulate to avoid adding further per-task bookkeeping overhead.
* We want these monitors to be "fat", i.e., not use biasing or
* thin-lock techniques, so use some odd coding idioms that tend
* to avoid them, mainly by arranging that every synchronized
* block performs a wait, notifyAll or both.
*
* These control bits occupy only (some of) the upper half (16
* bits) of status field. The lower bits are used for user-defined
* tags.
*/
/** The run status of this task */
volatile int status; // accessed directly by pool and workers
static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
static final int NORMAL = 0xf0000000; // must be negative
static final int CANCELLED = 0xc0000000; // must be < NORMAL
static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
static final int SIGNAL = 0x00010000; // must be >= 1 << 16
static final int SMASK = 0x0000ffff; // short bits for tags
/**
* Marks completion and wakes up threads waiting to join this
* task.
*
* @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
* @return completion status on exit
*/
private int setCompletion(int completion) {
for (int s;;) {
if ((s = status) < 0)
return s;
if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
if ((s >>> 16) != 0)
synchronized (this) { notifyAll(); }
return completion;
}
}
}
/**
* Primary execution method for stolen tasks. Unless done, calls
* exec and records status if completed, but doesn't wait for
* completion otherwise.
*
* @return status on exit from this method
*/
final int doExec() {
int s; boolean completed;
if ((s = status) >= 0) {
try {
completed = exec();
} catch (Throwable rex) {
return setExceptionalCompletion(rex);
}
if (completed)
s = setCompletion(NORMAL);
}
return s;
}
/**
* Tries to set SIGNAL status unless already completed. Used by
* ForkJoinPool. Other variants are directly incorporated into
* externalAwaitDone etc.
*
* @return true if successful
*/
final boolean trySetSignal() {
int s = status;
return s >= 0 && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL);
}
/**
* Blocks a non-worker-thread until completion.
* @return status upon completion
*/
private int externalAwaitDone() {
int s;
ForkJoinPool.externalHelpJoin(this);
boolean interrupted = false;
while ((s = status) >= 0) {
if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0) {
try {
wait();
} catch (InterruptedException ie) {
interrupted = true;
}
}
else
notifyAll();
}
}
}
if (interrupted)
Thread.currentThread().interrupt();
return s;
}
/**
* Blocks a non-worker-thread until completion or interruption.
*/
private int externalInterruptibleAwaitDone() throws InterruptedException {
int s;
if (Thread.interrupted())
throw new InterruptedException();
ForkJoinPool.externalHelpJoin(this);
while ((s = status) >= 0) {
if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0)
wait();
else
notifyAll();
}
}
}
return s;
}
/**
* Implementation for join, get, quietlyJoin. Directly handles
* only cases of already-completed, external wait, and
* unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
*
* @return status upon completion
*/
private int doJoin() {
int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
return (s = status) < 0 ? s :
((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
(w = (wt = (ForkJoinWorkerThread)t).workQueue).
tryUnpush(this) && (s = doExec()) < 0 ? s :
wt.pool.awaitJoin(w, this) :
externalAwaitDone();
}
/**
* Implementation for invoke, quietlyInvoke.
*
* @return status upon completion
*/
private int doInvoke() {
int s; Thread t; ForkJoinWorkerThread wt;
return (s = doExec()) < 0 ? s :
((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
(wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue, this) :
externalAwaitDone();
}
// Exception table support
/**
* Table of exceptions thrown by tasks, to enable reporting by
* callers. Because exceptions are rare, we don't directly keep
* them with task objects, but instead use a weak ref table. Note
* that cancellation exceptions don't appear in the table, but are
* instead recorded as status values.
*
* Note: These statics are initialized below in static block.
*/
private static final ExceptionNode[] exceptionTable;
private static final ReentrantLock exceptionTableLock;
private static final ReferenceQueue<Object> exceptionTableRefQueue;
/**
* Fixed capacity for exceptionTable.
*/
private static final int EXCEPTION_MAP_CAPACITY = 32;
/**
* Key-value nodes for exception table. The chained hash table
* uses identity comparisons, full locking, and weak references
* for keys. The table has a fixed capacity because it only
* maintains task exceptions long enough for joiners to access
* them, so should never become very large for sustained
* periods. However, since we do not know when the last joiner
* completes, we must use weak references and expunge them. We do
* so on each operation (hence full locking). Also, some thread in
* any ForkJoinPool will call helpExpungeStaleExceptions when its
* pool becomes isQuiescent.
*/
static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
final Throwable ex;
ExceptionNode next;
final long thrower; // use id not ref to avoid weak cycles
final int hashCode; // store task hashCode before weak ref disappears
ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
super(task, exceptionTableRefQueue);
this.ex = ex;
this.next = next;
this.thrower = Thread.currentThread().getId();
this.hashCode = System.identityHashCode(task);
}
}
/**
* Records exception and sets status.
*
* @return status on exit
*/
final int recordExceptionalCompletion(Throwable ex) {
int s;
if ((s = status) >= 0) {
int h = System.identityHashCode(this);
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
expungeStaleExceptions();
ExceptionNode[] t = exceptionTable;
int i = h & (t.length - 1);
for (ExceptionNode e = t[i]; ; e = e.next) {
if (e == null) {
t[i] = new ExceptionNode(this, ex, t[i]);
break;
}
if (e.get() == this) // already present
break;
}
} finally {
lock.unlock();
}
s = setCompletion(EXCEPTIONAL);
}
return s;
}
/**
* Records exception and possibly propagates.
*
* @return status on exit
*/
private int setExceptionalCompletion(Throwable ex) {
int s = recordExceptionalCompletion(ex);
if ((s & DONE_MASK) == EXCEPTIONAL)
internalPropagateException(ex);
return s;
}
/**
* Hook for exception propagation support for tasks with completers.
*/
void internalPropagateException(Throwable ex) {
}
/**
* Cancels, ignoring any exceptions thrown by cancel. Used during
* worker and pool shutdown. Cancel is spec'ed not to throw any
* exceptions, but if it does anyway, we have no recourse during
* shutdown, so guard against this case.
*/
static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
if (t != null && t.status >= 0) {
try {
t.cancel(false);
} catch (Throwable ignore) {
}
}
}
/**
* Removes exception node and clears status.
*/
private void clearExceptionalCompletion() {
int h = System.identityHashCode(this);
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
ExceptionNode[] t = exceptionTable;
int i = h & (t.length - 1);
ExceptionNode e = t[i];
ExceptionNode pred = null;
while (e != null) {
ExceptionNode next = e.next;
if (e.get() == this) {
if (pred == null)
t[i] = next;
else
pred.next = next;
break;
}
pred = e;
e = next;
}
expungeStaleExceptions();
status = 0;
} finally {
lock.unlock();
}
}
/**
* Returns a rethrowable exception for the given task, if
* available. To provide accurate stack traces, if the exception
* was not thrown by the current thread, we try to create a new
* exception of the same type as the one thrown, but with the
* recorded exception as its cause. If there is no such
* constructor, we instead try to use a no-arg constructor,
* followed by initCause, to the same effect. If none of these
* apply, or any fail due to other exceptions, we return the
* recorded exception, which is still correct, although it may
* contain a misleading stack trace.
*
* @return the exception, or null if none
*/
private Throwable getThrowableException() {
if ((status & DONE_MASK) != EXCEPTIONAL)
return null;
int h = System.identityHashCode(this);
ExceptionNode e;
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
expungeStaleExceptions();
ExceptionNode[] t = exceptionTable;
e = t[h & (t.length - 1)];
while (e != null && e.get() != this)
e = e.next;
} finally {
lock.unlock();
}
Throwable ex;
if (e == null || (ex = e.ex) == null)
return null;
if (false && e.thrower != Thread.currentThread().getId()) {
Class<? extends Throwable> ec = ex.getClass();
try {
Constructor<?> noArgCtor = null;
Constructor<?>[] cs = ec.getConstructors();// public ctors only
for (int i = 0; i < cs.length; ++i) {
Constructor<?> c = cs[i];
Class<?>[] ps = c.getParameterTypes();
if (ps.length == 0)
noArgCtor = c;
else if (ps.length == 1 && ps[0] == Throwable.class)
return (Throwable)(c.newInstance(ex));
}
if (noArgCtor != null) {
Throwable wx = (Throwable)(noArgCtor.newInstance());
wx.initCause(ex);
return wx;
}
} catch (Exception ignore) {
}
}
return ex;
}
/**
* Poll stale refs and remove them. Call only while holding lock.
*/
private static void expungeStaleExceptions() {
for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
if (x instanceof ExceptionNode) {
int hashCode = ((ExceptionNode)x).hashCode;
ExceptionNode[] t = exceptionTable;
int i = hashCode & (t.length - 1);
ExceptionNode e = t[i];
ExceptionNode pred = null;
while (e != null) {
ExceptionNode next = e.next;
if (e == x) {
if (pred == null)
t[i] = next;
else
pred.next = next;
break;
}
pred = e;
e = next;
}
}
}
}
/**
* If lock is available, poll stale refs and remove them.
* Called from ForkJoinPool when pools become quiescent.
*/
static final void helpExpungeStaleExceptions() {
final ReentrantLock lock = exceptionTableLock;
if (lock.tryLock()) {
try {
expungeStaleExceptions();
} finally {
lock.unlock();
}
}
}
/**
* A version of "sneaky throw" to relay exceptions
*/
static void rethrow(final Throwable ex) {
if (ex != null) {
if (ex instanceof Error)
throw (Error)ex;
if (ex instanceof RuntimeException)
throw (RuntimeException)ex;
ForkJoinTask.<RuntimeException>uncheckedThrow(ex);
}
}
/**
* The sneaky part of sneaky throw, relying on generics
* limitations to evade compiler complaints about rethrowing
* unchecked exceptions
*/
@SuppressWarnings("unchecked") static <T extends Throwable>
void uncheckedThrow(Throwable t) throws T {
if (t != null)
throw (T)t; // rely on vacuous cast
}
/**
* Throws exception, if any, associated with the given status.
*/
private void reportException(int s) {
if (s == CANCELLED)
throw new CancellationException();
if (s == EXCEPTIONAL)
rethrow(getThrowableException());
}
// public methods
/**
* Arranges to asynchronously execute this task in the pool the
* current task is running in, if applicable, or using the {@link
* ForkJoinPool#commonPool()} if not {@link #inForkJoinPool}. While
* it is not necessarily enforced, it is a usage error to fork a
* task more than once unless it has completed and been
* reinitialized. Subsequent modifications to the state of this
* task or any data it operates on are not necessarily
* consistently observable by any thread other than the one
* executing it unless preceded by a call to {@link #join} or
* related methods, or a call to {@link #isDone} returning {@code
* true}.
*
* @return {@code this}, to simplify usage
*/
public final ForkJoinTask<V> fork() {
Thread t;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
((ForkJoinWorkerThread)t).workQueue.push(this);
else
ForkJoinPool.common.externalPush(this);
return this;
}
/**
* Returns the result of the computation when it {@link #isDone is
* done}. This method differs from {@link #get()} in that
* abnormal completion results in {@code RuntimeException} or
* {@code Error}, not {@code ExecutionException}, and that
* interrupts of the calling thread do <em>not</em> cause the
* method to abruptly return by throwing {@code
* InterruptedException}.
*
* @return the computed result
*/
public final V join() {
int s;
if ((s = doJoin() & DONE_MASK) != NORMAL)
reportException(s);
return getRawResult();
}
/**
* Commences performing this task, awaits its completion if
* necessary, and returns its result, or throws an (unchecked)
* {@code RuntimeException} or {@code Error} if the underlying
* computation did so.
*
* @return the computed result
*/
public final V invoke() {
int s;
if ((s = doInvoke() & DONE_MASK) != NORMAL)
reportException(s);
return getRawResult();
}
/**
* Forks the given tasks, returning when {@code isDone} holds for
* each task or an (unchecked) exception is encountered, in which
* case the exception is rethrown. If more than one task
* encounters an exception, then this method throws any one of
* these exceptions. If any task encounters an exception, the
* other may be cancelled. However, the execution status of
* individual tasks is not guaranteed upon exceptional return. The
* status of each task may be obtained using {@link
* #getException()} and related methods to check if they have been
* cancelled, completed normally or exceptionally, or left
* unprocessed.
*
* @param t1 the first task
* @param t2 the second task
* @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
int s1, s2;
t2.fork();
if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
t1.reportException(s1);
if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
t2.reportException(s2);
}
/**
* Forks the given tasks, returning when {@code isDone} holds for
* each task or an (unchecked) exception is encountered, in which
* case the exception is rethrown. If more than one task
* encounters an exception, then this method throws any one of
* these exceptions. If any task encounters an exception, others
* may be cancelled. However, the execution status of individual
* tasks is not guaranteed upon exceptional return. The status of
* each task may be obtained using {@link #getException()} and
* related methods to check if they have been cancelled, completed
* normally or exceptionally, or left unprocessed.
*
* @param tasks the tasks
* @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?>... tasks) {
Throwable ex = null;
int last = tasks.length - 1;
for (int i = last; i >= 0; --i) {
ForkJoinTask<?> t = tasks[i];
if (t == null) {
if (ex == null)
ex = new NullPointerException();
}
else if (i != 0)
t.fork();
else if (t.doInvoke() < NORMAL && ex == null)
ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = tasks[i];
if (t != null) {
if (ex != null)
t.cancel(false);
else if (t.doJoin() < NORMAL)
ex = t.getException();
}
}
if (ex != null)
rethrow(ex);
}
/**
* Forks all tasks in the specified collection, returning when
* {@code isDone} holds for each task or an (unchecked) exception
* is encountered, in which case the exception is rethrown. If
* more than one task encounters an exception, then this method
* throws any one of these exceptions. If any task encounters an
* exception, others may be cancelled. However, the execution
* status of individual tasks is not guaranteed upon exceptional
* return. The status of each task may be obtained using {@link
* #getException()} and related methods to check if they have been
* cancelled, completed normally or exceptionally, or left
* unprocessed.
*
* @param tasks the collection of tasks
* @return the tasks argument, to simplify usage
* @throws NullPointerException if tasks or any element are null
*/
public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
return tasks;
}
@SuppressWarnings("unchecked")
List<? extends ForkJoinTask<?>> ts =
(List<? extends ForkJoinTask<?>>) tasks;
Throwable ex = null;
int last = ts.size() - 1;
for (int i = last; i >= 0; --i) {
ForkJoinTask<?> t = ts.get(i);
if (t == null) {
if (ex == null)
ex = new NullPointerException();
}
else if (i != 0)
t.fork();
else if (t.doInvoke() < NORMAL && ex == null)
ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = ts.get(i);
if (t != null) {
if (ex != null)
t.cancel(false);
else if (t.doJoin() < NORMAL)
ex = t.getException();
}
}
if (ex != null)
rethrow(ex);
return tasks;
}
/**
* Attempts to cancel execution of this task. This attempt will
* fail if the task has already completed or could not be
* cancelled for some other reason. If successful, and this task
* has not started when {@code cancel} is called, execution of
* this task is suppressed. After this method returns
* successfully, unless there is an intervening call to {@link
* #reinitialize}, subsequent calls to {@link #isCancelled},
* {@link #isDone}, and {@code cancel} will return {@code true}
* and calls to {@link #join} and related methods will result in
* {@code CancellationException}.
*
* <p>This method may be overridden in subclasses, but if so, must
* still ensure that these properties hold. In particular, the
* {@code cancel} method itself must not throw exceptions.
*
* <p>This method is designed to be invoked by <em>other</em>
* tasks. To terminate the current task, you can just return or
* throw an unchecked exception from its computation method, or
* invoke {@link #completeExceptionally}.
*
* @param mayInterruptIfRunning this value has no effect in the
* default implementation because interrupts are not used to
* control cancellation.
*
* @return {@code true} if this task is now cancelled
*/
public boolean cancel(boolean mayInterruptIfRunning) {
return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
}
public final boolean isDone() {
return status < 0;
}
public final boolean isCancelled() {
return (status & DONE_MASK) == CANCELLED;
}
/**
* Returns {@code true} if this task threw an exception or was cancelled.
*
* @return {@code true} if this task threw an exception or was cancelled
*/
public final boolean isCompletedAbnormally() {
return status < NORMAL;
}
/**
* Returns {@code true} if this task completed without throwing an
* exception and was not cancelled.
*
* @return {@code true} if this task completed without throwing an
* exception and was not cancelled
*/
public final boolean isCompletedNormally() {
return (status & DONE_MASK) == NORMAL;
}
/**
* Returns the exception thrown by the base computation, or a
* {@code CancellationException} if cancelled, or {@code null} if
* none or if the method has not yet completed.
*
* @return the exception, or {@code null} if none
*/
public final Throwable getException() {
int s = status & DONE_MASK;
return ((s >= NORMAL) ? null :
(s == CANCELLED) ? new CancellationException() :
getThrowableException());
}
/**
* Completes this task abnormally, and if not already aborted or
* cancelled, causes it to throw the given exception upon
* {@code join} and related operations. This method may be used
* to induce exceptions in asynchronous tasks, or to force
* completion of tasks that would not otherwise complete. Its use
* in other situations is discouraged. This method is
* overridable, but overridden versions must invoke {@code super}
* implementation to maintain guarantees.
*
* @param ex the exception to throw. If this exception is not a
* {@code RuntimeException} or {@code Error}, the actual exception
* thrown will be a {@code RuntimeException} with cause {@code ex}.
*/
public void completeExceptionally(Throwable ex) {
setExceptionalCompletion((ex instanceof RuntimeException) ||
(ex instanceof Error) ? ex :
new RuntimeException(ex));
}
/**
* Completes this task, and if not already aborted or cancelled,
* returning the given value as the result of subsequent
* invocations of {@code join} and related operations. This method
* may be used to provide results for asynchronous tasks, or to
* provide alternative handling for tasks that would not otherwise
* complete normally. Its use in other situations is
* discouraged. This method is overridable, but overridden
* versions must invoke {@code super} implementation to maintain
* guarantees.
*
* @param value the result value for this task
*/
public void complete(V value) {
try {
setRawResult(value);
} catch (Throwable rex) {
setExceptionalCompletion(rex);
return;
}
setCompletion(NORMAL);
}
/**
* Completes this task normally without setting a value. The most
* recent value established by {@link #setRawResult} (or {@code
* null} by default) will be returned as the result of subsequent
* invocations of {@code join} and related operations.
*
* @since 1.8
*/
public final void quietlyComplete() {
setCompletion(NORMAL);
}
/**
* Waits if necessary for the computation to complete, and then
* retrieves its result.
*
* @return the computed result
* @throws CancellationException if the computation was cancelled
* @throws ExecutionException if the computation threw an
* exception
* @throws InterruptedException if the current thread is not a
* member of a ForkJoinPool and was interrupted while waiting
*/
public final V get() throws InterruptedException, ExecutionException {
int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
doJoin() : externalInterruptibleAwaitDone();
Throwable ex;
if ((s &= DONE_MASK) == CANCELLED)
throw new CancellationException();
if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
throw new ExecutionException(ex);
return getRawResult();
}
/**
* Waits if necessary for at most the given time for the computation
* to complete, and then retrieves its result, if available.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return the computed result
* @throws CancellationException if the computation was cancelled
* @throws ExecutionException if the computation threw an
* exception
* @throws InterruptedException if the current thread is not a
* member of a ForkJoinPool and was interrupted while waiting
* @throws TimeoutException if the wait timed out
*/
public final V get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
if (Thread.interrupted())
throw new InterruptedException();
// Messy in part because we measure in nanosecs, but wait in millisecs
int s; long ms;
long ns = unit.toNanos(timeout);
if ((s = status) >= 0 && ns > 0L) {
long deadline = System.nanoTime() + ns;
ForkJoinPool p = null;
ForkJoinPool.WorkQueue w = null;
Thread t = Thread.currentThread();
if (t instanceof ForkJoinWorkerThread) {
ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
p = wt.pool;
w = wt.workQueue;
p.helpJoinOnce(w, this); // no retries on failure
}
else
ForkJoinPool.externalHelpJoin(this);
boolean canBlock = false;
boolean interrupted = false;
try {
while ((s = status) >= 0) {
if (w != null && w.qlock < 0)
cancelIgnoringExceptions(this);
else if (!canBlock) {
if (p == null || p.tryCompensate())
canBlock = true;
}
else {
if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0) {
try {
wait(ms);
} catch (InterruptedException ie) {
if (p == null)
interrupted = true;
}
}
else
notifyAll();
}
}
if ((s = status) < 0 || interrupted ||
(ns = deadline - System.nanoTime()) <= 0L)
break;
}
}
} finally {
if (p != null && canBlock)
p.incrementActiveCount();
}
if (interrupted)
throw new InterruptedException();
}
if ((s &= DONE_MASK) != NORMAL) {
Throwable ex;
if (s == CANCELLED)
throw new CancellationException();
if (s != EXCEPTIONAL)
throw new TimeoutException();
if ((ex = getThrowableException()) != null)
throw new ExecutionException(ex);
}
return getRawResult();
}
/**
* Joins this task, without returning its result or throwing its
* exception. This method may be useful when processing
* collections of tasks when some have been cancelled or otherwise
* known to have aborted.
*/
public final void quietlyJoin() {
doJoin();
}
/**
* Commences performing this task and awaits its completion if
* necessary, without returning its result or throwing its
* exception.
*/
public final void quietlyInvoke() {
doInvoke();
}
/**
* Possibly executes tasks until the pool hosting the current task
* {@link ForkJoinPool#isQuiescent is quiescent}. This method may
* be of use in designs in which many tasks are forked, but none
* are explicitly joined, instead executing them until all are
* processed.
*/
public static void helpQuiesce() {
Thread t;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
wt.pool.helpQuiescePool(wt.workQueue);
}
else
ForkJoinPool.quiesceCommonPool();
}
/**
* Resets the internal bookkeeping state of this task, allowing a
* subsequent {@code fork}. This method allows repeated reuse of
* this task, but only if reuse occurs when this task has either
* never been forked, or has been forked, then completed and all
* outstanding joins of this task have also completed. Effects
* under any other usage conditions are not guaranteed.
* This method may be useful when executing
* pre-constructed trees of subtasks in loops.
*
* <p>Upon completion of this method, {@code isDone()} reports
* {@code false}, and {@code getException()} reports {@code
* null}. However, the value returned by {@code getRawResult} is
* unaffected. To clear this value, you can invoke {@code
* setRawResult(null)}.
*/
public void reinitialize() {
if ((status & DONE_MASK) == EXCEPTIONAL)
clearExceptionalCompletion();
else
status = 0;
}
/**
* Returns the pool hosting the current task execution, or null
* if this task is executing outside of any ForkJoinPool.
*
* @see #inForkJoinPool
* @return the pool, or {@code null} if none
*/
public static ForkJoinPool getPool() {
Thread t = Thread.currentThread();
return (t instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread) t).pool : null;
}
/**
* Returns {@code true} if the current thread is a {@link
* ForkJoinWorkerThread} executing as a ForkJoinPool computation.
*
* @return {@code true} if the current thread is a {@link
* ForkJoinWorkerThread} executing as a ForkJoinPool computation,
* or {@code false} otherwise
*/
public static boolean inForkJoinPool() {
return Thread.currentThread() instanceof ForkJoinWorkerThread;
}
/**
* Tries to unschedule this task for execution. This method will
* typically (but is not guaranteed to) succeed if this task is
* the most recently forked task by the current thread, and has
* not commenced executing in another thread. This method may be
* useful when arranging alternative local processing of tasks
* that could have been, but were not, stolen.
*
* @return {@code true} if unforked
*/
public boolean tryUnfork() {
Thread t;
return (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread)t).workQueue.tryUnpush(this) :
ForkJoinPool.tryExternalUnpush(this));
}
/**
* Returns an estimate of the number of tasks that have been
* forked by the current worker thread but not yet executed. This
* value may be useful for heuristic decisions about whether to
* fork other tasks.
*
* @return the number of tasks
*/
public static int getQueuedTaskCount() {
Thread t; ForkJoinPool.WorkQueue q;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
q = ((ForkJoinWorkerThread)t).workQueue;
else
q = ForkJoinPool.commonSubmitterQueue();
return (q == null) ? 0 : q.queueSize();
}
/**
* Returns an estimate of how many more locally queued tasks are
* held by the current worker thread than there are other worker
* threads that might steal them, or zero if this thread is not
* operating in a ForkJoinPool. This value may be useful for
* heuristic decisions about whether to fork other tasks. In many
* usages of ForkJoinTasks, at steady state, each worker should
* aim to maintain a small constant surplus (for example, 3) of
* tasks, and to process computations locally if this threshold is
* exceeded.
*
* @return the surplus number of tasks, which may be negative
*/
public static int getSurplusQueuedTaskCount() {
return ForkJoinPool.getSurplusQueuedTaskCount();
}
// Extension methods
/**
* Returns the result that would be returned by {@link #join}, even
* if this task completed abnormally, or {@code null} if this task
* is not known to have been completed. This method is designed
* to aid debugging, as well as to support extensions. Its use in
* any other context is discouraged.
*
* @return the result, or {@code null} if not completed
*/
public abstract V getRawResult();
/**
* Forces the given value to be returned as a result. This method
* is designed to support extensions, and should not in general be
* called otherwise.
*
* @param value the value
*/
protected abstract void setRawResult(V value);
/**
* Immediately performs the base action of this task and returns
* true if, upon return from this method, this task is guaranteed
* to have completed normally. This method may return false
* otherwise, to indicate that this task is not necessarily
* complete (or is not known to be complete), for example in
* asynchronous actions that require explicit invocations of
* completion methods. This method may also throw an (unchecked)
* exception to indicate abnormal exit. This method is designed to
* support extensions, and should not in general be called
* otherwise.
*
* @return {@code true} if this task is known to have completed normally
*/
protected abstract boolean exec();
/**
* Returns, but does not unschedule or execute, a task queued by
* the current thread but not yet executed, if one is immediately
* available. There is no guarantee that this task will actually
* be polled or executed next. Conversely, this method may return
* null even if a task exists but cannot be accessed without
* contention with other threads. This method is designed
* primarily to support extensions, and is unlikely to be useful
* otherwise.
*
* @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> peekNextLocalTask() {
Thread t; ForkJoinPool.WorkQueue q;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
q = ((ForkJoinWorkerThread)t).workQueue;
else
q = ForkJoinPool.commonSubmitterQueue();
return (q == null) ? null : q.peek();
}
/**
* Unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed, if the
* current thread is operating in a ForkJoinPool. This method is
* designed primarily to support extensions, and is unlikely to be
* useful otherwise.
*
* @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollNextLocalTask() {
Thread t;
return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread)t).workQueue.nextLocalTask() :
null;
}
/**
* If the current thread is operating in a ForkJoinPool,
* unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed, if one is
* available, or if not available, a task that was forked by some
* other thread, if available. Availability may be transient, so a
* {@code null} result does not necessarily imply quiescence of
* the pool this task is operating in. This method is designed
* primarily to support extensions, and is unlikely to be useful
* otherwise.
*
* @return a task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollTask() {
Thread t; ForkJoinWorkerThread wt;
return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
(wt = (ForkJoinWorkerThread)t).pool.nextTaskFor(wt.workQueue) :
null;
}
// tag operations
/**
* Returns the tag for this task.
*
* @return the tag for this task
* @since 1.8
*/
public final short getForkJoinTaskTag() {
return (short)status;
}
/**
* Atomically sets the tag value for this task.
*
* @param tag the tag value
* @return the previous value of the tag
* @since 1.8
*/
public final short setForkJoinTaskTag(short tag) {
for (int s;;) {
if (U.compareAndSwapInt(this, STATUS, s = status,
(s & ~SMASK) | (tag & SMASK)))
return (short)s;
}
}
/**
* Atomically conditionally sets the tag value for this task.
* Among other applications, tags can be used as visit markers
* in tasks operating on graphs, as in methods that check: {@code
* if (task.compareAndSetForkJoinTaskTag((short)0, (short)1))}
* before processing, otherwise exiting because the node has
* already been visited.
*
* @param e the expected tag value
* @param tag the new tag value
* @return true if successful; i.e., the current value was
* equal to e and is now tag.
* @since 1.8
*/
public final boolean compareAndSetForkJoinTaskTag(short e, short tag) {
for (int s;;) {
if ((short)(s = status) != e)
return false;
if (U.compareAndSwapInt(this, STATUS, s,
(s & ~SMASK) | (tag & SMASK)))
return true;
}
}
/**
* Adaptor for Runnables. This implements RunnableFuture
* to be compliant with AbstractExecutorService constraints
* when used in ForkJoinPool.
*/
static final class AdaptedRunnable<T> extends ForkJoinTask<T>
implements RunnableFuture<T> {
final Runnable runnable;
T result;
AdaptedRunnable(Runnable runnable, T result) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
this.result = result; // OK to set this even before completion
}
public final T getRawResult() { return result; }
public final void setRawResult(T v) { result = v; }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
}
/**
* Adaptor for Runnables without results
*/
static final class AdaptedRunnableAction extends ForkJoinTask<Void>
implements RunnableFuture<Void> {
final Runnable runnable;
AdaptedRunnableAction(Runnable runnable) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
}
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
}
/**
* Adaptor for Callables
*/
static final class AdaptedCallable<T> extends ForkJoinTask<T>
implements RunnableFuture<T> {
final Callable<? extends T> callable;
T result;
AdaptedCallable(Callable<? extends T> callable) {
if (callable == null) throw new NullPointerException();
this.callable = callable;
}
public final T getRawResult() { return result; }
public final void setRawResult(T v) { result = v; }
public final boolean exec() {
try {
result = callable.call();
return true;
} catch (Error err) {
throw err;
} catch (RuntimeException rex) {
throw rex;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
public final void run() { invoke(); }
private static final long serialVersionUID = 2838392045355241008L;
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code run}
* method of the given {@code Runnable} as its action, and returns
* a null result upon {@link #join}.
*
* @param runnable the runnable action
* @return the task
*/
public static ForkJoinTask<?> adapt(Runnable runnable) {
return new AdaptedRunnableAction(runnable);
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code run}
* method of the given {@code Runnable} as its action, and returns
* the given result upon {@link #join}.
*
* @param runnable the runnable action
* @param result the result upon completion
* @return the task
*/
public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
return new AdaptedRunnable<T>(runnable, result);
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code call}
* method of the given {@code Callable} as its action, and returns
* its result upon {@link #join}, translating any checked exceptions
* encountered into {@code RuntimeException}.
*
* @param callable the callable action
* @return the task
*/
public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
return new AdaptedCallable<T>(callable);
}
// Serialization support
private static final long serialVersionUID = -7721805057305804111L;
/**
* Saves this task to a stream (that is, serializes it).
*
* @serialData the current run status and the exception thrown
* during execution, or {@code null} if none
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeObject(getException());
}
/**
* Reconstitutes this task from a stream (that is, deserializes it).
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
Object ex = s.readObject();
if (ex != null)
setExceptionalCompletion((Throwable)ex);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long STATUS;
static {
exceptionTableLock = new ReentrantLock();
exceptionTableRefQueue = new ReferenceQueue<Object>();
exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
try {
U = getUnsafe();
Class<?> k = ForkJoinTask.class;
STATUS = U.objectFieldOffset
(k.getDeclaredField("status"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| 0true
|
src_main_java_jsr166y_ForkJoinTask.java
|
3,125 |
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
long gcDeletesInMillis = settings.getAsTime(INDEX_GC_DELETES, TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis)).millis();
if (gcDeletesInMillis != InternalEngine.this.gcDeletesInMillis) {
logger.info("updating index.gc_deletes from [{}] to [{}]", TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis), TimeValue.timeValueMillis(gcDeletesInMillis));
InternalEngine.this.gcDeletesInMillis = gcDeletesInMillis;
}
final boolean compoundOnFlush = settings.getAsBoolean(INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush);
if (compoundOnFlush != InternalEngine.this.compoundOnFlush) {
logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush, compoundOnFlush);
InternalEngine.this.compoundOnFlush = compoundOnFlush;
indexWriter.getConfig().setUseCompoundFile(compoundOnFlush);
}
int indexConcurrency = settings.getAsInt(INDEX_INDEX_CONCURRENCY, InternalEngine.this.indexConcurrency);
boolean failOnMergeFailure = settings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure);
String codecName = settings.get(INDEX_CODEC, InternalEngine.this.codecName);
final boolean codecBloomLoad = settings.getAsBoolean(CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter());
boolean requiresFlushing = false;
if (indexConcurrency != InternalEngine.this.indexConcurrency ||
!codecName.equals(InternalEngine.this.codecName) ||
failOnMergeFailure != InternalEngine.this.failOnMergeFailure ||
codecBloomLoad != codecService.isLoadBloomFilter()) {
rwl.readLock().lock();
try {
if (indexConcurrency != InternalEngine.this.indexConcurrency) {
logger.info("updating index.index_concurrency from [{}] to [{}]", InternalEngine.this.indexConcurrency, indexConcurrency);
InternalEngine.this.indexConcurrency = indexConcurrency;
// we have to flush in this case, since it only applies on a new index writer
requiresFlushing = true;
}
if (!codecName.equals(InternalEngine.this.codecName)) {
logger.info("updating index.codec from [{}] to [{}]", InternalEngine.this.codecName, codecName);
InternalEngine.this.codecName = codecName;
// we want to flush in this case, so the new codec will be reflected right away...
requiresFlushing = true;
}
if (failOnMergeFailure != InternalEngine.this.failOnMergeFailure) {
logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure, failOnMergeFailure);
InternalEngine.this.failOnMergeFailure = failOnMergeFailure;
}
if (codecBloomLoad != codecService.isLoadBloomFilter()) {
logger.info("updating {} from [{}] to [{}]", CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter(), codecBloomLoad);
codecService.setLoadBloomFilter(codecBloomLoad);
// we need to flush in this case, to load/unload the bloom filters
requiresFlushing = true;
}
} finally {
rwl.readLock().unlock();
}
if (requiresFlushing) {
flush(new Flush().type(Flush.Type.NEW_WRITER));
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_engine_internal_InternalEngine.java
|
420 |
public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> {
/**
* Constructs new restore snapshot request builder
*
* @param clusterAdminClient cluster admin client
*/
public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest());
}
/**
* Constructs new restore snapshot request builder with specified repository and snapshot names
*
* @param clusterAdminClient cluster admin client
* @param repository reposiory name
* @param name snapshot name
*/
public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String name) {
super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest(repository, name));
}
/**
* Sets snapshot name
*
* @param snapshot snapshot name
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSnapshot(String snapshot) {
request.snapshot(snapshot);
return this;
}
/**
* Sets repository name
*
* @param repository repository name
* @return this builder
*/
public RestoreSnapshotRequestBuilder setRepository(String repository) {
request.repository(repository);
return this;
}
/**
* Sets the list of indices that should be restored from snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
* indices in the snapshot.
*
* @param indices list of indices
* @return this builder
*/
public RestoreSnapshotRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
* @return this request
*/
public RestoreSnapshotRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request.indicesOptions(indicesOptions);
return this;
}
/**
* Sets rename pattern that should be applied to restored indices.
* <p/>
* Indices that match the rename pattern will be renamed according to {@link #setRenameReplacement(String)}. The
* rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)}
* The request will fail if two or more indices will be renamed into the same name.
*
* @param renamePattern rename pattern
* @return this builder
*/
public RestoreSnapshotRequestBuilder setRenamePattern(String renamePattern) {
request.renamePattern(renamePattern);
return this;
}
/**
* Sets rename replacement
* <p/>
* See {@link #setRenamePattern(String)} for more information.
*
* @param renameReplacement rename replacement
* @return
*/
public RestoreSnapshotRequestBuilder setRenameReplacement(String renameReplacement) {
request.renameReplacement(renameReplacement);
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Settings settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Settings.Builder settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific restore settings in JSON, YAML or properties format
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets repository-specific restore settings
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Map<String, Object> source) {
request.settings(source);
return this;
}
/**
* If this parameter is set to true the operation will wait for completion of restore process before returning.
*
* @param waitForCompletion if true the operation will wait for completion
* @return this builder
*/
public RestoreSnapshotRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
request.waitForCompletion(waitForCompletion);
return this;
}
/**
* If set to true the restore procedure will restore global cluster state.
* <p/>
* The global cluster state includes persistent settings and index template definitions.
*
* @param restoreGlobalState true if global state should be restored from the snapshot
* @return this request
*/
public RestoreSnapshotRequestBuilder setRestoreGlobalState(boolean restoreGlobalState) {
request.includeGlobalState(restoreGlobalState);
return this;
}
@Override
protected void doExecute(ActionListener<RestoreSnapshotResponse> listener) {
((ClusterAdminClient) client).restoreSnapshot(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_RestoreSnapshotRequestBuilder.java
|
3,472 |
public class CacheMapLoader implements MapStore, MapLoaderLifecycleSupport {
private String type;
public void init(final HazelcastInstance hazelcastInstance,
final Properties properties, final String mapName) {
type = mapName;
}
public void destroy() {
}
public Object load(final Object key) {
return type + ":" + key;
}
public Map loadAll(final Collection keys) {
return null;
}
public Set loadAllKeys() {
return null;
}
@Override
public void store(Object key, Object value) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void storeAll(Map map) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void delete(Object key) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void deleteAll(Collection keys) {
//To change body of implemented methods use File | Settings | File Templates.
}
}
| 1no label
|
hazelcast-spring_src_test_java_com_hazelcast_spring_cache_CacheMapLoader.java
|
168 |
public abstract class SpeedTestMonoThread extends SpeedTestAbstract {
protected SpeedTestMonoThread() {
}
protected SpeedTestMonoThread(final long iCycles) {
super(iCycles);
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_test_SpeedTestMonoThread.java
|
192 |
public interface CLibrary {
void memoryMove(long src, long dest, long len);
}
| 0true
|
nativeos_src_main_java_com_orientechnologies_nio_CLibrary.java
|
1,158 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_AMOUNT_ITEM")
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.READONLY,
booleanOverrideValue = true))
}
)
public class AmountItemImpl implements AmountItem, CurrencyCodeIdentifiable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "AmountItemId")
@GenericGenerator(
name="AmountItemId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="AmountItemImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.payment.domain.AmountItemImpl")
}
)
@Column(name = "AMOUNT_ITEM_ID")
protected Long id;
@Column(name = "SHORT_DESCRIPTION", nullable=true)
@Index(name="SHORT_DESCRIPTION_INDEX", columnNames={"SHORT_DESCRIPTION"})
@AdminPresentation(friendlyName = "AmountItemImpl_Short_Description", order=1000, prominent=true, gridOrder = 1000)
protected String shortDescription;
@Column(name = "DESCRIPTION")
@AdminPresentation(friendlyName = "AmountItemImpl_Description", order=2000)
protected String description;
@Column(name = "UNIT_PRICE", nullable=false, precision=19, scale=5)
@AdminPresentation(friendlyName = "AmountItemImpl_Unit_Price", order=3000, gridOrder = 2000, prominent=true, fieldType=
SupportedFieldType.MONEY)
protected BigDecimal unitPrice;
@Column(name = "QUANTITY", nullable=false)
@AdminPresentation(friendlyName = "AmountItemImpl_Quantity", order=4000, prominent=true, gridOrder = 3000)
protected Long quantity;
@Column(name = "SYSTEM_ID")
@AdminPresentation(friendlyName = "AmountItemImpl_SystemId", order=5000)
protected String systemId;
@ManyToOne(targetEntity = PaymentInfoImpl.class, optional = true)
@JoinColumn(name = "PAYMENT_ID")
@Index(name="AMOUNTITEM_PAYMENTINFO_INDEX", columnNames={"PAYMENT_ID"})
@AdminPresentation(excluded = true)
protected PaymentInfo paymentInfo;
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#getShortDescription()
*/
@Override
public String getShortDescription() {
return shortDescription;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#setShortDescription(java.lang.String)
*/
@Override
public void setShortDescription(String shortDescription) {
this.shortDescription = shortDescription;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#getDescription()
*/
@Override
public String getDescription() {
return description;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#setDescription(java.lang.String)
*/
@Override
public void setDescription(String description) {
this.description = description;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#getUnitPrice()
*/
@Override
public BigDecimal getUnitPrice() {
return unitPrice;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#setUnitPrice(java.math.BigDecimal)
*/
@Override
public void setUnitPrice(BigDecimal unitPrice) {
this.unitPrice = unitPrice;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#getQuantity()
*/
@Override
public Long getQuantity() {
return quantity;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.payment.domain.AmountItem#setQuantity(java.lang.Long)
*/
@Override
public void setQuantity(Long quantity) {
this.quantity = quantity;
}
@Override
public PaymentInfo getPaymentInfo() {
return paymentInfo;
}
@Override
public void setPaymentInfo(PaymentInfo paymentInfo) {
this.paymentInfo = paymentInfo;
}
@Override
public String getSystemId() {
return systemId;
}
@Override
public void setSystemId(String systemId) {
this.systemId = systemId;
}
@Override
public String getCurrencyCode() {
return ((CurrencyCodeIdentifiable) paymentInfo).getCurrencyCode();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((description == null) ? 0 : description.hashCode());
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((quantity == null) ? 0 : quantity.hashCode());
result = prime * result + ((shortDescription == null) ? 0 : shortDescription.hashCode());
result = prime * result + ((unitPrice == null) ? 0 : unitPrice.hashCode());
result = prime * result + ((systemId == null) ? 0 : systemId.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
AmountItemImpl other = (AmountItemImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (description == null) {
if (other.description != null)
return false;
} else if (!description.equals(other.description))
return false;
if (quantity == null) {
if (other.quantity != null)
return false;
} else if (!quantity.equals(other.quantity))
return false;
if (shortDescription == null) {
if (other.shortDescription != null)
return false;
} else if (!shortDescription.equals(other.shortDescription))
return false;
if (unitPrice == null) {
if (other.unitPrice != null)
return false;
} else if (!unitPrice.equals(other.unitPrice))
return false;
if (systemId == null) {
if (other.systemId != null)
return false;
} else if (!systemId.equals(other.systemId))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_domain_AmountItemImpl.java
|
993 |
@Entity
@DiscriminatorColumn(name = "TYPE")
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_FULFILLMENT_GROUP_FEE")
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region = "blOrderElements")
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.READONLY,
booleanOverrideValue = true))
}
)
public class FulfillmentGroupFeeImpl implements FulfillmentGroupFee, CurrencyCodeIdentifiable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "FulfillmentGroupFeeId")
@GenericGenerator(
name="FulfillmentGroupFeeId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="FulfillmentGroupFeeImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.order.domain.FulfillmentGroupFeeImpl")
}
)
@Column(name = "FULFILLMENT_GROUP_FEE_ID")
protected Long id;
@ManyToOne(targetEntity = FulfillmentGroupImpl.class, optional = false)
@JoinColumn(name = "FULFILLMENT_GROUP_ID")
protected FulfillmentGroup fulfillmentGroup;
@Column(name = "AMOUNT", precision=19, scale=5)
@AdminPresentation(friendlyName = "FulfillmentGroupFeeImpl_Amount", prominent = true, gridOrder = 2000, order = 2000)
protected BigDecimal amount;
@Column(name = "NAME")
@AdminPresentation(friendlyName = "FulfillmentGroupFeeImpl_Name", prominent = true, gridOrder = 1000, order = 1000)
protected String name;
@Column(name = "REPORTING_CODE")
@AdminPresentation(friendlyName = "FulfillmentGroupFeeImpl_Reporting_Code", order = 3000)
protected String reportingCode;
@Column(name = "FEE_TAXABLE_FLAG")
@AdminPresentation(friendlyName = "FulfillmentGroupFeeImpl_Taxable", order = 5000)
protected Boolean feeTaxable = false;
@OneToMany(fetch = FetchType.LAZY, targetEntity = TaxDetailImpl.class, cascade = {CascadeType.ALL})
@JoinTable(name = "BLC_FG_FEE_TAX_XREF", joinColumns = @JoinColumn(name = "FULFILLMENT_GROUP_FEE_ID"), inverseJoinColumns = @JoinColumn(name = "TAX_DETAIL_ID"))
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
protected List<TaxDetail> taxes = new ArrayList<TaxDetail>();
@Column(name = "TOTAL_FEE_TAX", precision=19, scale=5)
@AdminPresentation(friendlyName = "FulfillmentGroupFeeImpl_Total_Fee_Tax", order=4000, fieldType=SupportedFieldType.MONEY)
protected BigDecimal totalTax;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public FulfillmentGroup getFulfillmentGroup() {
return fulfillmentGroup;
}
@Override
public void setFulfillmentGroup(FulfillmentGroup fulfillmentGroup) {
this.fulfillmentGroup = fulfillmentGroup;
}
@Override
public Money getAmount() {
return amount == null ? null : BroadleafCurrencyUtils.getMoney(amount, getFulfillmentGroup().getOrder().getCurrency());
}
@Override
public void setAmount(Money amount) {
this.amount = Money.toAmount(amount);
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public String getReportingCode() {
return reportingCode;
}
@Override
public void setReportingCode(String reportingCode) {
this.reportingCode = reportingCode;
}
@Override
public Boolean isTaxable() {
return feeTaxable == null ? true : feeTaxable;
}
@Override
public void setTaxable(Boolean taxable) {
this.feeTaxable = taxable;
}
@Override
public List<TaxDetail> getTaxes() {
return taxes;
}
@Override
public void setTaxes(List<TaxDetail> taxes) {
this.taxes = taxes;
}
@Override
public Money getTotalTax() {
return totalTax == null ? null : BroadleafCurrencyUtils.getMoney(totalTax, getFulfillmentGroup().getOrder().getCurrency());
}
@Override
public void setTotalTax(Money totalTax) {
this.totalTax = Money.toAmount(totalTax);
}
@Override
public String getCurrencyCode() {
return ((CurrencyCodeIdentifiable) fulfillmentGroup).getCurrencyCode();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((amount == null) ? 0 : amount.hashCode());
result = prime * result + ((fulfillmentGroup == null) ? 0 : fulfillmentGroup.hashCode());
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((reportingCode == null) ? 0 : reportingCode.hashCode());
result = prime * result + ((taxes == null) ? 0 : taxes.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FulfillmentGroupFeeImpl other = (FulfillmentGroupFeeImpl) obj;
if (amount == null) {
if (other.amount != null) {
return false;
}
} else if (!amount.equals(other.amount)) {
return false;
}
if (fulfillmentGroup == null) {
if (other.fulfillmentGroup != null) {
return false;
}
} else if (!fulfillmentGroup.equals(other.fulfillmentGroup)) {
return false;
}
if (id == null) {
if (other.id != null) {
return false;
}
} else if (!id.equals(other.id)) {
return false;
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
if (reportingCode == null) {
if (other.reportingCode != null) {
return false;
}
} else if (!reportingCode.equals(other.reportingCode)) {
return false;
}
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_FulfillmentGroupFeeImpl.java
|
719 |
public class DeleteRequest extends ShardReplicationOperationRequest<DeleteRequest> {
private String type;
private String id;
@Nullable
private String routing;
private boolean refresh;
private long version;
private VersionType versionType = VersionType.INTERNAL;
/**
* Constructs a new delete request against the specified index. The {@link #type(String)} and {@link #id(String)}
* must be set.
*/
public DeleteRequest(String index) {
this.index = index;
}
/**
* Constructs a new delete request against the specified index with the type and id.
*
* @param index The index to get the document from
* @param type The type of the document
* @param id The id of the document
*/
public DeleteRequest(String index, String type, String id) {
this.index = index;
this.type = type;
this.id = id;
}
public DeleteRequest(DeleteRequest request) {
super(request);
this.type = request.type();
this.id = request.id();
this.routing = request.routing();
this.refresh = request.refresh();
this.version = request.version();
this.versionType = request.versionType();
}
public DeleteRequest() {
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (type == null) {
validationException = addValidationError("type is missing", validationException);
}
if (id == null) {
validationException = addValidationError("id is missing", validationException);
}
return validationException;
}
/**
* The type of the document to delete.
*/
public String type() {
return type;
}
/**
* Sets the type of the document to delete.
*/
public DeleteRequest type(String type) {
this.type = type;
return this;
}
/**
* The id of the document to delete.
*/
public String id() {
return id;
}
/**
* Sets the id of the document to delete.
*/
public DeleteRequest id(String id) {
this.id = id;
return this;
}
/**
* Sets the parent id of this document. Will simply set the routing to this value, as it is only
* used for routing with delete requests.
*/
public DeleteRequest parent(String parent) {
if (routing == null) {
routing = parent;
}
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
public DeleteRequest routing(String routing) {
if (routing != null && routing.length() == 0) {
this.routing = null;
} else {
this.routing = routing;
}
return this;
}
/**
* Controls the shard routing of the delete request. Using this value to hash the shard
* and not the id.
*/
public String routing() {
return this.routing;
}
/**
* Should a refresh be executed post this index operation causing the operation to
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public DeleteRequest refresh(boolean refresh) {
this.refresh = refresh;
return this;
}
public boolean refresh() {
return this.refresh;
}
/**
* Sets the version, which will cause the delete operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public DeleteRequest version(long version) {
this.version = version;
return this;
}
public long version() {
return this.version;
}
public DeleteRequest versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
public VersionType versionType() {
return this.versionType;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
type = in.readSharedString();
id = in.readString();
routing = in.readOptionalString();
refresh = in.readBoolean();
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeSharedString(type);
out.writeString(id);
out.writeOptionalString(routing());
out.writeBoolean(refresh);
out.writeLong(version);
out.writeByte(versionType.getValue());
}
@Override
public String toString() {
return "delete {[" + index + "][" + type + "][" + id + "]}";
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_delete_DeleteRequest.java
|
705 |
class TransportHandler extends BaseTransportRequestHandler<BulkRequest> {
@Override
public BulkRequest newInstance() {
return new BulkRequest();
}
@Override
public void messageReceived(final BulkRequest request, final TransportChannel channel) throws Exception {
// no need to use threaded listener, since we just send a response
request.listenerThreaded(false);
execute(request, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [" + BulkAction.NAME + "] and request [" + request + "]", e1);
}
}
});
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_bulk_TransportBulkAction.java
|
228 |
@Repository("blModuleConfigurationDao")
public class ModuleConfigurationDaoImpl implements ModuleConfigurationDao {
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
protected Long currentDateResolution = 10000L;
protected Date cachedDate = SystemTime.asDate();
protected Date getCurrentDateAfterFactoringInDateResolution() {
Date returnDate = SystemTime.getCurrentDateWithinTimeResolution(cachedDate, currentDateResolution);
if (returnDate != cachedDate) {
if (SystemTime.shouldCacheDate()) {
cachedDate = returnDate;
}
}
return returnDate;
}
@Override
public ModuleConfiguration readById(Long id) {
return em.find(AbstractModuleConfiguration.class, id);
}
@Override
public ModuleConfiguration save(ModuleConfiguration config) {
if (config.getIsDefault()) {
Query batchUpdate = em.createNamedQuery("BC_BATCH_UPDATE_MODULE_CONFIG_DEFAULT");
batchUpdate.setParameter("configType", config.getModuleConfigurationType().getType());
batchUpdate.executeUpdate();
}
return em.merge(config);
}
@Override
public void delete(ModuleConfiguration config) {
((Status) config).setArchived('Y');
em.merge(config);
}
@SuppressWarnings("unchecked")
@Override
public List<ModuleConfiguration> readAllByType(ModuleConfigurationType type) {
Query query = em.createNamedQuery("BC_READ_MODULE_CONFIG_BY_TYPE");
query.setParameter("configType", type.getType());
query.setHint(QueryHints.CACHEABLE, true);
return query.getResultList();
}
@SuppressWarnings("unchecked")
@Override
public List<ModuleConfiguration> readActiveByType(ModuleConfigurationType type) {
Query query = em.createNamedQuery("BC_READ_ACTIVE_MODULE_CONFIG_BY_TYPE");
query.setParameter("configType", type.getType());
Date myDate = getCurrentDateAfterFactoringInDateResolution();
query.setParameter("currentDate", myDate);
query.setHint(QueryHints.CACHEABLE, true);
return query.getResultList();
}
@SuppressWarnings("unchecked")
@Override
public List<ModuleConfiguration> readByType(Class<? extends ModuleConfiguration> type) {
//TODO change this to a JPA criteria expression
Query query = em.createQuery("SELECT config FROM " + type.getName() + " config");
query.setHint(QueryHints.CACHEABLE, true);
return query.getResultList();
}
@Override
public Long getCurrentDateResolution() {
return currentDateResolution;
}
@Override
public void setCurrentDateResolution(Long currentDateResolution) {
this.currentDateResolution = currentDateResolution;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_dao_ModuleConfigurationDaoImpl.java
|
102 |
public class TestTransactionImpl
{
@Test
public void shouldBeAbleToAccessAllExceptionsOccurringInSynchronizationsBeforeCompletion()
throws IllegalStateException, RollbackException
{
TxManager mockedTxManager = mock( TxManager.class );
TransactionImpl tx = new TransactionImpl( getNewGlobalId( DEFAULT_SEED, 0 ), mockedTxManager, ForceMode.forced,
TransactionStateFactory.noStateFactory( new DevNullLoggingService() ),
new SystemOutLogging().getMessagesLog( TxManager.class ) );
// Evil synchronizations
final RuntimeException firstException = new RuntimeException( "Ex1" );
Synchronization meanSync1 = new Synchronization()
{
@Override
public void beforeCompletion()
{
throw firstException;
}
@Override
public void afterCompletion( int status )
{
}
};
final RuntimeException secondException = new RuntimeException( "Ex1" );
Synchronization meanSync2 = new Synchronization()
{
@Override
public void beforeCompletion()
{
throw secondException;
}
@Override
public void afterCompletion( int status )
{
}
};
tx.registerSynchronization( meanSync1 );
tx.registerSynchronization( meanSync2 );
tx.doBeforeCompletion();
assertThat( tx.getRollbackCause(),
is( instanceOf( MultipleCauseException.class ) ) );
MultipleCauseException error = (MultipleCauseException) tx.getRollbackCause();
assertThat( error.getCause(), is( (Throwable) firstException ) );
assertThat( error.getCauses().size(), is( 2 ) );
assertThat( error.getCauses().get( 0 ), is( (Throwable) firstException ) );
assertThat( error.getCauses().get( 1 ), is( (Throwable) secondException ) );
}
@Test
public void shouldNotThrowMultipleCauseIfOnlyOneErrorOccursInBeforeCompletion() throws IllegalStateException,
RollbackException
{
TxManager mockedTxManager = mock( TxManager.class );
TransactionImpl tx = new TransactionImpl( getNewGlobalId( DEFAULT_SEED, 0 ), mockedTxManager, ForceMode.forced,
TransactionStateFactory.noStateFactory( new DevNullLoggingService() ),
new SystemOutLogging().getMessagesLog( TxManager.class ) );
// Evil synchronizations
final RuntimeException firstException = new RuntimeException( "Ex1" );
Synchronization meanSync1 = new Synchronization()
{
@Override
public void beforeCompletion()
{
throw firstException;
}
@Override
public void afterCompletion( int status )
{
}
};
tx.registerSynchronization( meanSync1 );
tx.doBeforeCompletion();
assertThat( tx.getRollbackCause(), is( (Throwable) firstException ) );
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestTransactionImpl.java
|
2,897 |
private static class InstanceOfPredicate implements Predicate, DataSerializable {
private Class klass;
public InstanceOfPredicate(Class klass) {
this.klass = klass;
}
@Override
public boolean apply(Map.Entry mapEntry) {
Object value = mapEntry.getValue();
if (value == null) {
return false;
}
return klass.isAssignableFrom(value.getClass());
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(klass.getName());
}
@Override
public void readData(ObjectDataInput in) throws IOException {
String klassName = in.readUTF();
try {
klass = in.getClassLoader().loadClass(klassName);
} catch (ClassNotFoundException e) {
throw new HazelcastSerializationException("Failed to load class: " + klass, e);
}
}
@Override
public String toString() {
return " instanceOf (" + klass.getName() + ")";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_Predicates.java
|
4,964 |
public static PathTrie.Decoder REST_DECODER = new PathTrie.Decoder() {
@Override
public String decode(String value) {
return RestUtils.decodeComponent(value);
}
};
| 1no label
|
src_main_java_org_elasticsearch_rest_support_RestUtils.java
|
7 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(SlowTest.class)
public class RestTest {
final static Config config = new XmlConfigBuilder().build();
@Before
@After
public void killAllHazelcastInstances() throws IOException {
Hazelcast.shutdownAll();
}
@Test
public void testTtl_issue1783() throws IOException, InterruptedException {
final Config conf = new Config();
String name = "map";
final CountDownLatch latch = new CountDownLatch(1);
final MapConfig mapConfig = conf.getMapConfig(name);
mapConfig.setTimeToLiveSeconds(3);
mapConfig.addEntryListenerConfig(new EntryListenerConfig()
.setImplementation(new EntryAdapter() {
@Override
public void entryEvicted(EntryEvent event) {
latch.countDown();
}
}));
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(conf);
final HTTPCommunicator communicator = new HTTPCommunicator(instance);
communicator.put(name, "key", "value");
String value = communicator.get(name, "key");
assertNotNull(value);
assertEquals("value", value);
assertTrue(latch.await(30, TimeUnit.SECONDS));
value = communicator.get(name, "key");
assertTrue(value.isEmpty());
}
@Test
public void testRestSimple() throws IOException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
final HTTPCommunicator communicator = new HTTPCommunicator(instance);
final String name = "testRestSimple";
for (int i = 0; i < 100; i++) {
assertEquals(HttpURLConnection.HTTP_OK, communicator.put(name, String.valueOf(i), String.valueOf(i * 10)));
}
for (int i = 0; i < 100; i++) {
String actual = communicator.get(name, String.valueOf(i));
assertEquals(String.valueOf(i * 10), actual);
}
communicator.deleteAll(name);
for (int i = 0; i < 100; i++) {
String actual = communicator.get(name, String.valueOf(i));
assertEquals("", actual);
}
for (int i = 0; i < 100; i++) {
assertEquals(HttpURLConnection.HTTP_OK, communicator.put(name, String.valueOf(i), String.valueOf(i * 10)));
}
for (int i = 0; i < 100; i++) {
assertEquals(String.valueOf(i * 10), communicator.get(name, String.valueOf(i)));
}
for (int i = 0; i < 100; i++) {
assertEquals(HttpURLConnection.HTTP_OK, communicator.delete(name, String.valueOf(i)));
}
for (int i = 0; i < 100; i++) {
assertEquals("", communicator.get(name, String.valueOf(i)));
}
for (int i = 0; i < 100; i++) {
assertEquals(HttpURLConnection.HTTP_OK, communicator.offer(name, String.valueOf(i)));
}
for (int i = 0; i < 100; i++) {
assertEquals(String.valueOf(i), communicator.poll(name, 2));
}
}
@Test
public void testQueueSizeEmpty() throws IOException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
final HTTPCommunicator communicator = new HTTPCommunicator(instance);
final String name = "testQueueSizeEmpty";
IQueue queue = instance.getQueue(name);
Assert.assertEquals(queue.size(), communicator.size(name));
}
@Test
public void testQueueSizeNonEmpty() throws IOException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
final HTTPCommunicator communicator = new HTTPCommunicator(instance);
final String name = "testQueueSizeNotEmpty";
final int num_items = 100;
IQueue queue = instance.getQueue(name);
for (int i = 0; i < num_items; i++) {
queue.add(i);
}
Assert.assertEquals(queue.size(), communicator.size(name));
}
private class HTTPCommunicator {
final HazelcastInstance instance;
final String address;
HTTPCommunicator(HazelcastInstance instance) {
this.instance = instance;
this.address = "http:/" + instance.getCluster().getLocalMember().getInetSocketAddress().toString() + "/hazelcast/rest/";
}
public String poll(String queueName, long timeout) {
String url = address + "queues/" + queueName + "/" + String.valueOf(timeout);
String result = doGet(url);
return result;
}
public int size(String queueName) {
String url = address + "queues/" + queueName + "/size";
Integer result = Integer.parseInt(doGet(url));
return result;
}
public int offer(String queueName, String data) throws IOException {
String url = address + "queues/" + queueName;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("POST");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
/** post the data */
OutputStream out = null;
out = urlConnection.getOutputStream();
Writer writer = new OutputStreamWriter(out, "UTF-8");
writer.write(data);
writer.close();
out.close();
return urlConnection.getResponseCode();
}
public String get(String mapName, String key) {
String url = address + "maps/" + mapName + "/" + key;
String result = doGet(url);
return result;
}
public int put(String mapName, String key, String value) throws IOException {
String url = address + "maps/" + mapName + "/" + key;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("POST");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
/** post the data */
OutputStream out = urlConnection.getOutputStream();
Writer writer = new OutputStreamWriter(out, "UTF-8");
writer.write(value);
writer.close();
out.close();
return urlConnection.getResponseCode();
}
public int deleteAll(String mapName) throws IOException {
String url = address + "maps/" + mapName;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("DELETE");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
return urlConnection.getResponseCode();
}
public int delete(String mapName, String key) throws IOException {
String url = address + "maps/" + mapName + "/" + key;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("DELETE");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
return urlConnection.getResponseCode();
}
private String doGet(final String url) {
String result = null;
try {
HttpURLConnection httpUrlConnection = (HttpURLConnection) (new URL(url)).openConnection();
BufferedReader rd = new BufferedReader(new InputStreamReader(httpUrlConnection.getInputStream()));
StringBuilder data = new StringBuilder(150);
String line;
while ((line = rd.readLine()) != null) data.append(line);
rd.close();
result = data.toString();
httpUrlConnection.disconnect();
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_ascii_RestTest.java
|
3,541 |
public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
public static class Defaults {
public static final FieldType FIELD_TYPE = new FieldType();
public static final boolean DOC_VALUES = false;
static {
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(true);
FIELD_TYPE.setStored(false);
FIELD_TYPE.setStoreTermVectors(false);
FIELD_TYPE.setOmitNorms(false);
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
FIELD_TYPE.freeze();
}
public static final float BOOST = 1.0f;
public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
}
public abstract static class Builder<T extends Builder, Y extends AbstractFieldMapper> extends Mapper.Builder<T, Y> {
protected final FieldType fieldType;
protected Boolean docValues;
protected float boost = Defaults.BOOST;
protected boolean omitNormsSet = false;
protected String indexName;
protected NamedAnalyzer indexAnalyzer;
protected NamedAnalyzer searchAnalyzer;
protected Boolean includeInAll;
protected boolean indexOptionsSet = false;
protected PostingsFormatProvider postingsProvider;
protected DocValuesFormatProvider docValuesProvider;
protected SimilarityProvider similarity;
protected Loading normsLoading;
@Nullable
protected Settings fieldDataSettings;
protected final MultiFields.Builder multiFieldsBuilder;
protected CopyTo copyTo;
protected Builder(String name, FieldType fieldType) {
super(name);
this.fieldType = fieldType;
multiFieldsBuilder = new MultiFields.Builder();
}
public T index(boolean index) {
this.fieldType.setIndexed(index);
return builder;
}
public T store(boolean store) {
this.fieldType.setStored(store);
return builder;
}
public T docValues(boolean docValues) {
this.docValues = docValues;
return builder;
}
public T storeTermVectors(boolean termVectors) {
if (termVectors) {
this.fieldType.setStoreTermVectors(termVectors);
} // don't set it to false, it is default and might be flipped by a more specific option
return builder;
}
public T storeTermVectorOffsets(boolean termVectorOffsets) {
if (termVectorOffsets) {
this.fieldType.setStoreTermVectors(termVectorOffsets);
}
this.fieldType.setStoreTermVectorOffsets(termVectorOffsets);
return builder;
}
public T storeTermVectorPositions(boolean termVectorPositions) {
if (termVectorPositions) {
this.fieldType.setStoreTermVectors(termVectorPositions);
}
this.fieldType.setStoreTermVectorPositions(termVectorPositions);
return builder;
}
public T storeTermVectorPayloads(boolean termVectorPayloads) {
if (termVectorPayloads) {
this.fieldType.setStoreTermVectors(termVectorPayloads);
}
this.fieldType.setStoreTermVectorPayloads(termVectorPayloads);
return builder;
}
public T tokenized(boolean tokenized) {
this.fieldType.setTokenized(tokenized);
return builder;
}
public T boost(float boost) {
this.boost = boost;
return builder;
}
public T omitNorms(boolean omitNorms) {
this.fieldType.setOmitNorms(omitNorms);
this.omitNormsSet = true;
return builder;
}
public T indexOptions(IndexOptions indexOptions) {
this.fieldType.setIndexOptions(indexOptions);
this.indexOptionsSet = true;
return builder;
}
public T indexName(String indexName) {
this.indexName = indexName;
return builder;
}
public T indexAnalyzer(NamedAnalyzer indexAnalyzer) {
this.indexAnalyzer = indexAnalyzer;
return builder;
}
public T searchAnalyzer(NamedAnalyzer searchAnalyzer) {
this.searchAnalyzer = searchAnalyzer;
return builder;
}
public T includeInAll(Boolean includeInAll) {
this.includeInAll = includeInAll;
return builder;
}
public T postingsFormat(PostingsFormatProvider postingsFormat) {
this.postingsProvider = postingsFormat;
return builder;
}
public T docValuesFormat(DocValuesFormatProvider docValuesFormat) {
this.docValuesProvider = docValuesFormat;
return builder;
}
public T similarity(SimilarityProvider similarity) {
this.similarity = similarity;
return builder;
}
public T normsLoading(Loading normsLoading) {
this.normsLoading = normsLoading;
return builder;
}
public T fieldDataSettings(Settings settings) {
this.fieldDataSettings = settings;
return builder;
}
public T multiFieldPathType(ContentPath.Type pathType) {
multiFieldsBuilder.pathType(pathType);
return builder;
}
public T addMultiField(Mapper.Builder mapperBuilder) {
multiFieldsBuilder.add(mapperBuilder);
return builder;
}
public T copyTo(CopyTo copyTo) {
this.copyTo = copyTo;
return builder;
}
public Names buildNames(BuilderContext context) {
return new Names(name, buildIndexName(context), indexName == null ? name : indexName, buildFullName(context), context.path().sourcePath());
}
public String buildIndexName(BuilderContext context) {
String actualIndexName = indexName == null ? name : indexName;
return context.path().pathAsText(actualIndexName);
}
public String buildFullName(BuilderContext context) {
return context.path().fullPathAsText(name);
}
}
private static final ThreadLocal<List<Field>> FIELD_LIST = new ThreadLocal<List<Field>>() {
protected List<Field> initialValue() {
return new ArrayList<Field>(2);
}
};
protected final Names names;
protected float boost;
protected final FieldType fieldType;
private final boolean docValues;
protected final NamedAnalyzer indexAnalyzer;
protected NamedAnalyzer searchAnalyzer;
protected PostingsFormatProvider postingsFormat;
protected DocValuesFormatProvider docValuesFormat;
protected final SimilarityProvider similarity;
protected Loading normsLoading;
protected Settings customFieldDataSettings;
protected FieldDataType fieldDataType;
protected final MultiFields multiFields;
protected CopyTo copyTo;
protected AbstractFieldMapper(Names names, float boost, FieldType fieldType, Boolean docValues, NamedAnalyzer indexAnalyzer,
NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsFormat,
DocValuesFormatProvider docValuesFormat, SimilarityProvider similarity,
Loading normsLoading, @Nullable Settings fieldDataSettings, Settings indexSettings) {
this(names, boost, fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsFormat, docValuesFormat, similarity,
normsLoading, fieldDataSettings, indexSettings, MultiFields.empty(), null);
}
protected AbstractFieldMapper(Names names, float boost, FieldType fieldType, Boolean docValues, NamedAnalyzer indexAnalyzer,
NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsFormat,
DocValuesFormatProvider docValuesFormat, SimilarityProvider similarity,
Loading normsLoading, @Nullable Settings fieldDataSettings, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
this.names = names;
this.boost = boost;
this.fieldType = fieldType;
this.fieldType.freeze();
// automatically set to keyword analyzer if its indexed and not analyzed
if (indexAnalyzer == null && !this.fieldType.tokenized() && this.fieldType.indexed()) {
this.indexAnalyzer = Lucene.KEYWORD_ANALYZER;
} else {
this.indexAnalyzer = indexAnalyzer;
}
// automatically set to keyword analyzer if its indexed and not analyzed
if (searchAnalyzer == null && !this.fieldType.tokenized() && this.fieldType.indexed()) {
this.searchAnalyzer = Lucene.KEYWORD_ANALYZER;
} else {
this.searchAnalyzer = searchAnalyzer;
}
if (postingsFormat == null) {
if (defaultPostingFormat() != null) {
postingsFormat = PostingFormats.getAsProvider(defaultPostingFormat());
}
}
this.postingsFormat = postingsFormat;
this.docValuesFormat = docValuesFormat;
this.similarity = similarity;
this.normsLoading = normsLoading;
this.customFieldDataSettings = fieldDataSettings;
if (fieldDataSettings == null) {
this.fieldDataType = defaultFieldDataType();
} else {
// create a new field data type, with the default settings as well as the "new ones"
this.fieldDataType = new FieldDataType(defaultFieldDataType().getType(),
ImmutableSettings.builder().put(defaultFieldDataType().getSettings()).put(fieldDataSettings)
);
}
if (docValues != null) {
this.docValues = docValues;
} else if (fieldDataType == null) {
this.docValues = false;
} else {
this.docValues = FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(fieldDataType.getFormat(indexSettings));
}
this.multiFields = multiFields;
this.copyTo = copyTo;
}
@Nullable
protected String defaultPostingFormat() {
return null;
}
@Nullable
protected String defaultDocValuesFormat() {
return null;
}
@Override
public String name() {
return names.name();
}
@Override
public Names names() {
return this.names;
}
public abstract FieldType defaultFieldType();
public abstract FieldDataType defaultFieldDataType();
@Override
public final FieldDataType fieldDataType() {
return fieldDataType;
}
@Override
public FieldType fieldType() {
return fieldType;
}
@Override
public float boost() {
return this.boost;
}
@Override
public Analyzer indexAnalyzer() {
return this.indexAnalyzer;
}
@Override
public Analyzer searchAnalyzer() {
return this.searchAnalyzer;
}
@Override
public Analyzer searchQuoteAnalyzer() {
return this.searchAnalyzer;
}
@Override
public SimilarityProvider similarity() {
return similarity;
}
@Override
public CopyTo copyTo() {
return copyTo;
}
@Override
public void parse(ParseContext context) throws IOException {
final List<Field> fields = FIELD_LIST.get();
assert fields.isEmpty();
try {
parseCreateField(context, fields);
for (Field field : fields) {
if (!customBoost()) {
field.setBoost(boost);
}
if (context.listener().beforeFieldAdded(this, field, context)) {
context.doc().add(field);
}
}
} catch (Exception e) {
throw new MapperParsingException("failed to parse [" + names.fullName() + "]", e);
} finally {
fields.clear();
}
multiFields.parse(this, context);
if (copyTo != null) {
copyTo.parse(context);
}
}
/**
* Parse the field value and populate <code>fields</code>.
*/
protected abstract void parseCreateField(ParseContext context, List<Field> fields) throws IOException;
/**
* Derived classes can override it to specify that boost value is set by derived classes.
*/
protected boolean customBoost() {
return false;
}
@Override
public void traverse(FieldMapperListener fieldMapperListener) {
fieldMapperListener.fieldMapper(this);
multiFields.traverse(fieldMapperListener);
}
@Override
public void traverse(ObjectMapperListener objectMapperListener) {
// nothing to do here...
}
@Override
public Object valueForSearch(Object value) {
return value;
}
@Override
public BytesRef indexedValueForSearch(Object value) {
return BytesRefs.toBytesRef(value);
}
@Override
public Query queryStringTermQuery(Term term) {
return null;
}
@Override
public boolean useTermQueryWithQueryString() {
return false;
}
@Override
public Query termQuery(Object value, @Nullable QueryParseContext context) {
return new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)));
}
@Override
public Filter termFilter(Object value, @Nullable QueryParseContext context) {
return new TermFilter(names().createIndexNameTerm(indexedValueForSearch(value)));
}
@Override
public Filter termsFilter(List values, @Nullable QueryParseContext context) {
BytesRef[] bytesRefs = new BytesRef[values.size()];
for (int i = 0; i < bytesRefs.length; i++) {
bytesRefs[i] = indexedValueForSearch(values.get(i));
}
return new TermsFilter(names.indexName(), bytesRefs);
}
/**
* A terms filter based on the field data cache
*/
@Override
public Filter termsFilter(IndexFieldDataService fieldDataService, List values, @Nullable QueryParseContext context) {
// create with initial size large enough to avoid rehashing
ObjectOpenHashSet<BytesRef> terms =
new ObjectOpenHashSet<BytesRef>((int) (values.size() * (1 + ObjectOpenHashSet.DEFAULT_LOAD_FACTOR)));
for (int i = 0, len = values.size(); i < len; i++) {
terms.add(indexedValueForSearch(values.get(i)));
}
return FieldDataTermsFilter.newBytes(fieldDataService.getForField(this), terms);
}
@Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return new TermRangeQuery(names.indexName(),
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
upperTerm == null ? null : indexedValueForSearch(upperTerm),
includeLower, includeUpper);
}
@Override
public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {
return new TermRangeFilter(names.indexName(),
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
upperTerm == null ? null : indexedValueForSearch(upperTerm),
includeLower, includeUpper);
}
@Override
public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
return new FuzzyQuery(names.createIndexNameTerm(indexedValueForSearch(value)), fuzziness.asDistance(value), prefixLength, maxExpansions, transpositions);
}
@Override
public Query prefixQuery(Object value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) {
PrefixQuery query = new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)));
if (method != null) {
query.setRewriteMethod(method);
}
return query;
}
@Override
public Filter prefixFilter(Object value, @Nullable QueryParseContext context) {
return new PrefixFilter(names().createIndexNameTerm(indexedValueForSearch(value)));
}
@Override
public Query regexpQuery(Object value, int flags, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) {
RegexpQuery query = new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags);
if (method != null) {
query.setRewriteMethod(method);
}
return query;
}
@Override
public Filter regexpFilter(Object value, int flags, @Nullable QueryParseContext parseContext) {
return new RegexpFilter(names().createIndexNameTerm(indexedValueForSearch(value)), flags);
}
@Override
public Filter nullValueFilter() {
return null;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
if (!this.getClass().equals(mergeWith.getClass())) {
String mergedType = mergeWith.getClass().getSimpleName();
if (mergeWith instanceof AbstractFieldMapper) {
mergedType = ((AbstractFieldMapper) mergeWith).contentType();
}
mergeContext.addConflict("mapper [" + names.fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
// different types, return
return;
}
AbstractFieldMapper fieldMergeWith = (AbstractFieldMapper) mergeWith;
if (this.fieldType().indexed() != fieldMergeWith.fieldType().indexed() || this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different index values");
}
if (this.fieldType().stored() != fieldMergeWith.fieldType().stored()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different store values");
}
if (!this.hasDocValues() && fieldMergeWith.hasDocValues()) {
// don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitely set
// when the doc_values field data format is configured
mergeContext.addConflict("mapper [" + names.fullName() + "] has different " + TypeParsers.DOC_VALUES + " values");
}
if (this.fieldType().omitNorms() != fieldMergeWith.fieldType.omitNorms()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different `norms.enabled` values");
}
if (this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different tokenize values");
}
if (this.fieldType().storeTermVectors() != fieldMergeWith.fieldType().storeTermVectors()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector values");
}
if (this.fieldType().storeTermVectorOffsets() != fieldMergeWith.fieldType().storeTermVectorOffsets()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_offsets values");
}
if (this.fieldType().storeTermVectorPositions() != fieldMergeWith.fieldType().storeTermVectorPositions()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_positions values");
}
if (this.fieldType().storeTermVectorPayloads() != fieldMergeWith.fieldType().storeTermVectorPayloads()) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_payloads values");
}
if (this.indexAnalyzer == null) {
if (fieldMergeWith.indexAnalyzer != null) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
}
} else if (fieldMergeWith.indexAnalyzer == null) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
} else if (!this.indexAnalyzer.name().equals(fieldMergeWith.indexAnalyzer.name())) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
}
if (this.similarity == null) {
if (fieldMergeWith.similarity() != null) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity");
}
} else if (fieldMergeWith.similarity() == null) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity");
} else if (!this.similarity().equals(fieldMergeWith.similarity())) {
mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity");
}
multiFields.merge(mergeWith, mergeContext);
if (!mergeContext.mergeFlags().simulate()) {
// apply changeable values
this.boost = fieldMergeWith.boost;
this.normsLoading = fieldMergeWith.normsLoading;
this.copyTo = fieldMergeWith.copyTo;
if (fieldMergeWith.postingsFormat != null) {
this.postingsFormat = fieldMergeWith.postingsFormat;
}
if (fieldMergeWith.docValuesFormat != null) {
this.docValuesFormat = fieldMergeWith.docValuesFormat;
}
if (fieldMergeWith.searchAnalyzer != null) {
this.searchAnalyzer = fieldMergeWith.searchAnalyzer;
}
if (fieldMergeWith.customFieldDataSettings != null) {
if (!Objects.equal(fieldMergeWith.customFieldDataSettings, this.customFieldDataSettings)) {
this.customFieldDataSettings = fieldMergeWith.customFieldDataSettings;
this.fieldDataType = new FieldDataType(defaultFieldDataType().getType(),
ImmutableSettings.builder().put(defaultFieldDataType().getSettings()).put(this.customFieldDataSettings)
);
}
}
}
}
@Override
public PostingsFormatProvider postingsFormatProvider() {
return postingsFormat;
}
@Override
public DocValuesFormatProvider docValuesFormatProvider() {
return docValuesFormat;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(names.name());
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
doXContentBody(builder, includeDefaults, params);
return builder.endObject();
}
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
builder.field("type", contentType());
if (includeDefaults || !names.name().equals(names.indexNameClean())) {
builder.field("index_name", names.indexNameClean());
}
if (includeDefaults || boost != 1.0f) {
builder.field("boost", boost);
}
FieldType defaultFieldType = defaultFieldType();
if (includeDefaults || fieldType.indexed() != defaultFieldType.indexed() ||
fieldType.tokenized() != defaultFieldType.tokenized()) {
builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
}
if (includeDefaults || fieldType.stored() != defaultFieldType.stored()) {
builder.field("store", fieldType.stored());
}
if (includeDefaults || hasDocValues() != Defaults.DOC_VALUES) {
builder.field(TypeParsers.DOC_VALUES, docValues);
}
if (includeDefaults || fieldType.storeTermVectors() != defaultFieldType.storeTermVectors()) {
builder.field("term_vector", termVectorOptionsToString(fieldType));
}
if (includeDefaults || fieldType.omitNorms() != defaultFieldType.omitNorms() || normsLoading != null) {
builder.startObject("norms");
if (includeDefaults || fieldType.omitNorms() != defaultFieldType.omitNorms()) {
builder.field("enabled", !fieldType.omitNorms());
}
if (normsLoading != null) {
builder.field(Loading.KEY, normsLoading);
}
builder.endObject();
}
if (includeDefaults || fieldType.indexOptions() != defaultFieldType.indexOptions()) {
builder.field("index_options", indexOptionToString(fieldType.indexOptions()));
}
if (indexAnalyzer == null && searchAnalyzer == null) {
if (includeDefaults) {
builder.field("analyzer", "default");
}
} else if (indexAnalyzer == null) {
// searchAnalyzer != null
if (includeDefaults || (!searchAnalyzer.name().startsWith("_") && !searchAnalyzer.name().equals("default"))) {
builder.field("search_analyzer", searchAnalyzer.name());
}
} else if (searchAnalyzer == null) {
// indexAnalyzer != null
if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
builder.field("index_analyzer", indexAnalyzer.name());
}
} else if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
// indexAnalyzer == searchAnalyzer
if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
builder.field("analyzer", indexAnalyzer.name());
}
} else {
// both are there but different
if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
builder.field("index_analyzer", indexAnalyzer.name());
}
if (includeDefaults || (!searchAnalyzer.name().startsWith("_") && !searchAnalyzer.name().equals("default"))) {
builder.field("search_analyzer", searchAnalyzer.name());
}
}
if (postingsFormat != null) {
if (includeDefaults || !postingsFormat.name().equals(defaultPostingFormat())) {
builder.field("postings_format", postingsFormat.name());
}
} else if (includeDefaults) {
String format = defaultPostingFormat();
if (format == null) {
format = PostingsFormatService.DEFAULT_FORMAT;
}
builder.field("postings_format", format);
}
if (docValuesFormat != null) {
if (includeDefaults || !docValuesFormat.name().equals(defaultDocValuesFormat())) {
builder.field(DOC_VALUES_FORMAT, docValuesFormat.name());
}
} else if (includeDefaults) {
String format = defaultDocValuesFormat();
if (format == null) {
format = DocValuesFormatService.DEFAULT_FORMAT;
}
builder.field(DOC_VALUES_FORMAT, format);
}
if (similarity() != null) {
builder.field("similarity", similarity().name());
} else if (includeDefaults) {
builder.field("similariry", SimilarityLookupService.DEFAULT_SIMILARITY);
}
if (customFieldDataSettings != null) {
builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
} else if (includeDefaults) {
builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
}
multiFields.toXContent(builder, params);
if (copyTo != null) {
copyTo.toXContent(builder, params);
}
}
protected static String indexOptionToString(IndexOptions indexOption) {
switch (indexOption) {
case DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS:
return TypeParsers.INDEX_OPTIONS_OFFSETS;
case DOCS_AND_FREQS:
return TypeParsers.INDEX_OPTIONS_FREQS;
case DOCS_AND_FREQS_AND_POSITIONS:
return TypeParsers.INDEX_OPTIONS_POSITIONS;
case DOCS_ONLY:
return TypeParsers.INDEX_OPTIONS_DOCS;
default:
throw new ElasticsearchIllegalArgumentException("Unknown IndexOptions [" + indexOption + "]");
}
}
public static String termVectorOptionsToString(FieldType fieldType) {
if (!fieldType.storeTermVectors()) {
return "no";
} else if (!fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
return "yes";
} else if (fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
return "with_offsets";
} else {
StringBuilder builder = new StringBuilder("with");
if (fieldType.storeTermVectorPositions()) {
builder.append("_positions");
}
if (fieldType.storeTermVectorOffsets()) {
builder.append("_offsets");
}
if (fieldType.storeTermVectorPayloads()) {
builder.append("_payloads");
}
return builder.toString();
}
}
protected static String indexTokenizeOptionToString(boolean indexed, boolean tokenized) {
if (!indexed) {
return "no";
} else if (tokenized) {
return "analyzed";
} else {
return "not_analyzed";
}
}
protected abstract String contentType();
@Override
public void close() {
multiFields.close();
}
@Override
public boolean isNumeric() {
return false;
}
@Override
public boolean isSortable() {
return true;
}
public boolean hasDocValues() {
return docValues;
}
@Override
public Loading normsLoading(Loading defaultLoading) {
return normsLoading == null ? defaultLoading : normsLoading;
}
public static class MultiFields {
public static MultiFields empty() {
return new MultiFields(Defaults.PATH_TYPE, ImmutableOpenMap.<String, Mapper>of());
}
public static class Builder {
private final ImmutableOpenMap.Builder<String, Mapper.Builder> mapperBuilders = ImmutableOpenMap.builder();
private ContentPath.Type pathType = Defaults.PATH_TYPE;
public Builder pathType(ContentPath.Type pathType) {
this.pathType = pathType;
return this;
}
public Builder add(Mapper.Builder builder) {
mapperBuilders.put(builder.name(), builder);
return this;
}
@SuppressWarnings("unchecked")
public MultiFields build(AbstractFieldMapper.Builder mainFieldBuilder, BuilderContext context) {
if (pathType == Defaults.PATH_TYPE && mapperBuilders.isEmpty()) {
return empty();
} else if (mapperBuilders.isEmpty()) {
return new MultiFields(pathType, ImmutableOpenMap.<String, Mapper>of());
} else {
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(pathType);
context.path().add(mainFieldBuilder.name());
ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders;
for (ObjectObjectCursor<String, Mapper.Builder> cursor : this.mapperBuilders) {
String key = cursor.key;
Mapper.Builder value = cursor.value;
mapperBuilders.put(key, value.build(context));
}
context.path().remove();
context.path().pathType(origPathType);
ImmutableOpenMap.Builder<String, Mapper> mappers = mapperBuilders.cast();
return new MultiFields(pathType, mappers.build());
}
}
}
private final ContentPath.Type pathType;
private volatile ImmutableOpenMap<String, Mapper> mappers;
public MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, Mapper> mappers) {
this.pathType = pathType;
this.mappers = mappers;
// we disable the all in multi-field mappers
for (ObjectCursor<Mapper> cursor : mappers.values()) {
Mapper mapper = cursor.value;
if (mapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
}
}
}
public void parse(AbstractFieldMapper mainField, ParseContext context) throws IOException {
if (mappers.isEmpty()) {
return;
}
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(pathType);
context.path().add(mainField.name());
for (ObjectCursor<Mapper> cursor : mappers.values()) {
cursor.value.parse(context);
}
context.path().remove();
context.path().pathType(origPathType);
}
// No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
AbstractFieldMapper mergeWithMultiField = (AbstractFieldMapper) mergeWith;
List<FieldMapper> newFieldMappers = null;
ImmutableOpenMap.Builder<String, Mapper> newMappersBuilder = null;
for (ObjectCursor<Mapper> cursor : mergeWithMultiField.multiFields.mappers.values()) {
Mapper mergeWithMapper = cursor.value;
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name());
if (mergeIntoMapper == null) {
// no mapping, simply add it if not simulating
if (!mergeContext.mergeFlags().simulate()) {
// we disable the all in multi-field mappers
if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
}
if (newMappersBuilder == null) {
newMappersBuilder = ImmutableOpenMap.builder(mappers);
}
newMappersBuilder.put(mergeWithMapper.name(), mergeWithMapper);
if (mergeWithMapper instanceof AbstractFieldMapper) {
if (newFieldMappers == null) {
newFieldMappers = new ArrayList<FieldMapper>(2);
}
newFieldMappers.add((FieldMapper) mergeWithMapper);
}
}
} else {
mergeIntoMapper.merge(mergeWithMapper, mergeContext);
}
}
// first add all field mappers
if (newFieldMappers != null) {
mergeContext.docMapper().addFieldMappers(newFieldMappers);
}
// now publish mappers
if (newMappersBuilder != null) {
mappers = newMappersBuilder.build();
}
}
public void traverse(FieldMapperListener fieldMapperListener) {
for (ObjectCursor<Mapper> cursor : mappers.values()) {
cursor.value.traverse(fieldMapperListener);
}
}
public void close() {
for (ObjectCursor<Mapper> cursor : mappers.values()) {
cursor.value.close();
}
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (pathType != Defaults.PATH_TYPE) {
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
}
if (!mappers.isEmpty()) {
builder.startObject("fields");
for (ObjectCursor<Mapper> cursor : mappers.values()) {
cursor.value.toXContent(builder, params);
}
builder.endObject();
}
return builder;
}
}
/**
* Represents a list of fields with optional boost factor where the current field should be copied to
*/
public static class CopyTo {
private final ImmutableList<String> copyToFields;
private CopyTo(ImmutableList<String> copyToFields) {
this.copyToFields = copyToFields;
}
/**
* Creates instances of the fields that the current field should be copied to
*/
public void parse(ParseContext context) throws IOException {
if (!context.isWithinCopyTo()) {
for (String field : copyToFields) {
parse(field, context);
}
}
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (!copyToFields.isEmpty()) {
builder.startArray("copy_to");
for (String field : copyToFields) {
builder.value(field);
}
builder.endArray();
}
return builder;
}
public static class Builder {
private final ImmutableList.Builder<String> copyToBuilders = ImmutableList.builder();
public Builder add(String field) {
copyToBuilders.add(field);
return this;
}
public CopyTo build() {
return new CopyTo(copyToBuilders.build());
}
}
public ImmutableList<String> copyToFields() {
return copyToFields;
}
/**
* Creates an copy of the current field with given field name and boost
*/
public void parse(String field, ParseContext context) throws IOException {
context.setWithinCopyTo();
FieldMappers mappers = context.docMapper().mappers().indexName(field);
if (mappers != null && !mappers.isEmpty()) {
mappers.mapper().parse(context);
} else {
int posDot = field.lastIndexOf('.');
if (posDot > 0) {
// Compound name
String objectPath = field.substring(0, posDot);
String fieldPath = field.substring(posDot + 1);
ObjectMapper mapper = context.docMapper().objectMappers().get(objectPath);
if (mapper == null) {
//TODO: Create an object dynamically?
throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]");
}
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(ContentPath.Type.FULL);
context.path().add(objectPath);
// We might be in dynamically created field already, so need to clean withinNewMapper flag
// and then restore it, so we wouldn't miss new mappers created from copy_to fields
boolean origWithinNewMapper = context.isWithinNewMapper();
context.clearWithinNewMapper();
try {
mapper.parseDynamicValue(context, fieldPath, context.parser().currentToken());
} finally {
if (origWithinNewMapper) {
context.setWithinNewMapper();
} else {
context.clearWithinNewMapper();
}
context.path().remove();
context.path().pathType(origPathType);
}
} else {
// We might be in dynamically created field already, so need to clean withinNewMapper flag
// and then restore it, so we wouldn't miss new mappers created from copy_to fields
boolean origWithinNewMapper = context.isWithinNewMapper();
context.clearWithinNewMapper();
try {
context.docMapper().root().parseDynamicValue(context, field, context.parser().currentToken());
} finally {
if (origWithinNewMapper) {
context.setWithinNewMapper();
} else {
context.clearWithinNewMapper();
}
}
}
}
context.clearWithinCopyTo();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_core_AbstractFieldMapper.java
|
50 |
@Component("blTimeDTOCustomPersistenceHandler")
public class TimeDTOCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
private static final Log LOG = LogFactory.getLog(TimeDTOCustomPersistenceHandler.class);
@Override
public Boolean canHandleFetch(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleRemove(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleInspect(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
return TimeDTO.class.getName().equals(ceilingEntityFullyQualifiedClassname);
}
@Override
public DynamicResultSet inspect(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, InspectHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
Map<MergedPropertyType, Map<String, FieldMetadata>> allMergedProperties = new HashMap<MergedPropertyType, Map<String, FieldMetadata>>();
Map<String, FieldMetadata> mergedProperties = dynamicEntityDao.getSimpleMergedProperties(ceilingEntityFullyQualifiedClassname, persistencePackage.getPersistencePerspective());
allMergedProperties.put(MergedPropertyType.PRIMARY, mergedProperties);
ClassMetadata mergedMetadata = helper.getMergedClassMetadata(new Class<?>[]{Class.forName(ceilingEntityFullyQualifiedClassname)}, allMergedProperties);
DynamicResultSet results = new DynamicResultSet(mergedMetadata);
return results;
} catch (Exception e) {
ServiceException ex = new ServiceException("Unable to retrieve inspection results for " + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e);
throw ex;
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_TimeDTOCustomPersistenceHandler.java
|
9 |
private class HTTPCommunicator {
final HazelcastInstance instance;
final String address;
HTTPCommunicator(HazelcastInstance instance) {
this.instance = instance;
this.address = "http:/" + instance.getCluster().getLocalMember().getInetSocketAddress().toString() + "/hazelcast/rest/";
}
public String poll(String queueName, long timeout) {
String url = address + "queues/" + queueName + "/" + String.valueOf(timeout);
String result = doGet(url);
return result;
}
public int size(String queueName) {
String url = address + "queues/" + queueName + "/size";
Integer result = Integer.parseInt(doGet(url));
return result;
}
public int offer(String queueName, String data) throws IOException {
String url = address + "queues/" + queueName;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("POST");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
/** post the data */
OutputStream out = null;
out = urlConnection.getOutputStream();
Writer writer = new OutputStreamWriter(out, "UTF-8");
writer.write(data);
writer.close();
out.close();
return urlConnection.getResponseCode();
}
public String get(String mapName, String key) {
String url = address + "maps/" + mapName + "/" + key;
String result = doGet(url);
return result;
}
public int put(String mapName, String key, String value) throws IOException {
String url = address + "maps/" + mapName + "/" + key;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("POST");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
/** post the data */
OutputStream out = urlConnection.getOutputStream();
Writer writer = new OutputStreamWriter(out, "UTF-8");
writer.write(value);
writer.close();
out.close();
return urlConnection.getResponseCode();
}
public int deleteAll(String mapName) throws IOException {
String url = address + "maps/" + mapName;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("DELETE");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
return urlConnection.getResponseCode();
}
public int delete(String mapName, String key) throws IOException {
String url = address + "maps/" + mapName + "/" + key;
/** set up the http connection parameters */
HttpURLConnection urlConnection = (HttpURLConnection) (new URL(url)).openConnection();
urlConnection.setRequestMethod("DELETE");
urlConnection.setDoOutput(true);
urlConnection.setDoInput(true);
urlConnection.setUseCaches(false);
urlConnection.setAllowUserInteraction(false);
urlConnection.setRequestProperty("Content-type", "text/xml; charset=" + "UTF-8");
return urlConnection.getResponseCode();
}
private String doGet(final String url) {
String result = null;
try {
HttpURLConnection httpUrlConnection = (HttpURLConnection) (new URL(url)).openConnection();
BufferedReader rd = new BufferedReader(new InputStreamReader(httpUrlConnection.getInputStream()));
StringBuilder data = new StringBuilder(150);
String line;
while ((line = rd.readLine()) != null) data.append(line);
rd.close();
result = data.toString();
httpUrlConnection.disconnect();
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_ascii_RestTest.java
|
2,363 |
private class SingleExecutionProcessor
implements Runnable {
@Override
public void run() {
try {
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new RequestMemberIdAssignment(name, jobId), name);
// JobSupervisor doesn't exists anymore on jobOwner, job done?
if (result.getResultState() == NO_SUPERVISOR) {
return;
} else if (result.getResultState() == NO_MORE_PARTITIONS) {
return;
}
int partitionId = result.getPartitionId();
KeyValueSource<KeyIn, ValueIn> delegate = keyValueSource;
if (supervisor.getConfiguration().isCommunicateStats()) {
delegate = new KeyValueSourceFacade<KeyIn, ValueIn>(keyValueSource, supervisor);
}
delegate.reset();
if (delegate.open(nodeEngine)) {
DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(MapCombineTask.this);
processMapping(partitionId, context, delegate);
delegate.close();
finalizeMapping(partitionId, context);
} else {
// Partition assignment might not be ready yet, postpone the processing and retry later
postponePartitionProcessing(partitionId);
}
} catch (Throwable t) {
handleProcessorThrowable(t);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_MapCombineTask.java
|
720 |
public class DeleteRequestBuilder extends ShardReplicationOperationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder> {
public DeleteRequestBuilder(Client client) {
super((InternalClient) client, new DeleteRequest());
}
public DeleteRequestBuilder(Client client, @Nullable String index) {
super((InternalClient) client, new DeleteRequest(index));
}
/**
* Sets the type of the document to delete.
*/
public DeleteRequestBuilder setType(String type) {
request.type(type);
return this;
}
/**
* Sets the id of the document to delete.
*/
public DeleteRequestBuilder setId(String id) {
request.id(id);
return this;
}
/**
* Sets the parent id of this document. Will simply set the routing to this value, as it is only
* used for routing with delete requests.
*/
public DeleteRequestBuilder setParent(String parent) {
request.parent(parent);
return this;
}
/**
* Controls the shard routing of the delete request. Using this value to hash the shard
* and not the id.
*/
public DeleteRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* Should a refresh be executed post this index operation causing the operation to
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public DeleteRequestBuilder setRefresh(boolean refresh) {
request.refresh(refresh);
return this;
}
/**
* Sets the version, which will cause the delete operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public DeleteRequestBuilder setVersion(long version) {
request.version(version);
return this;
}
/**
* Sets the type of versioning to use. Defaults to {@link VersionType#INTERNAL}.
*/
public DeleteRequestBuilder setVersionType(VersionType versionType) {
request.versionType(versionType);
return this;
}
/**
* Set the replication type for this operation.
*/
public DeleteRequestBuilder setReplicationType(ReplicationType replicationType) {
request.replicationType(replicationType);
return this;
}
/**
* Sets the consistency level. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}.
*/
public DeleteRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
request.consistencyLevel(consistencyLevel);
return this;
}
@Override
protected void doExecute(ActionListener<DeleteResponse> listener) {
((Client) client).delete(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_delete_DeleteRequestBuilder.java
|
127 |
public class PageRuleType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, PageRuleType> TYPES = new LinkedHashMap<String, PageRuleType>();
public static final PageRuleType REQUEST = new PageRuleType("REQUEST", "Request");
public static final PageRuleType TIME = new PageRuleType("TIME", "Time");
public static final PageRuleType PRODUCT = new PageRuleType("PRODUCT", "Product");
public static final PageRuleType CUSTOMER = new PageRuleType("CUSTOMER", "Customer");
/**
* Allows translation from the passed in String to a <code>PageRuleType</code>
* @param type
* @return The matching rule type
*/
public static PageRuleType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public PageRuleType() {
//do nothing
}
/**
* Initialize the type and friendlyType
* @param <code>type</code>
* @param <code>friendlyType</code>
*/
public PageRuleType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
/**
* Sets the type
* @param type
*/
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
/**
* Gets the type
* @return
*/
@Override
public String getType() {
return type;
}
/**
* Gets the name of the type
* @return
*/
@Override
public String getFriendlyType() {
return friendlyType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
PageRuleType other = (PageRuleType) obj;
if (type == null) {
if (other.type != null) {
return false;
}
} else if (!type.equals(other.type)) {
return false;
}
return true;
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_service_type_PageRuleType.java
|
178 |
@Controller
@RequestMapping(PreviewTemplateController.REQUEST_MAPPING_PREFIX + "**")
public class PreviewTemplateController {
private String templatePathPrefix = "templates";
public static final String REQUEST_MAPPING_PREFIX = "/preview/";
@RequestMapping
public String displayPreview(HttpServletRequest httpServletRequest) {
String requestURIPrefix = httpServletRequest.getContextPath() + REQUEST_MAPPING_PREFIX;
String templatePath = httpServletRequest.getRequestURI().substring(requestURIPrefix.length() - 1);
return templatePathPrefix + templatePath;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_PreviewTemplateController.java
|
1,106 |
public class OSQLFunctionSet extends OSQLFunctionMultiValueAbstract<Set<Object>> {
public static final String NAME = "set";
public OSQLFunctionSet() {
super(NAME, 1, -1);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters,
OCommandContext iContext) {
if (iParameters.length > 1)
// IN LINE MODE
context = new HashSet<Object>();
for (Object value : iParameters) {
if (value != null) {
if (iParameters.length == 1 && context == null)
// AGGREGATION MODE (STATEFULL)
context = new HashSet<Object>();
OMultiValue.add(context, value);
}
}
return prepareResult(context);
}
public String getSyntax() {
return "Syntax error: set(<value>*)";
}
public boolean aggregateResults(final Object[] configuredParameters) {
return configuredParameters.length == 1;
}
@Override
public Set<Object> getResult() {
final Set<Object> res = context;
context = null;
return prepareResult(res);
}
protected Set<Object> prepareResult(Set<Object> res) {
if (returnDistributedResult()) {
final Map<String, Object> doc = new HashMap<String, Object>();
doc.put("node", getDistributedStorageId());
doc.put("context", context);
return Collections.<Object> singleton(doc);
} else {
return res;
}
}
@SuppressWarnings("unchecked")
@Override
public Object mergeDistributedResult(List<Object> resultsToMerge) {
final Map<Long, Collection<Object>> chunks = new HashMap<Long, Collection<Object>>();
for (Object iParameter : resultsToMerge) {
final Map<String, Object> container = (Map<String, Object>) ((Collection<?>) iParameter).iterator().next();
chunks.put((Long) container.get("node"), (Collection<Object>) container.get("context"));
}
final Collection<Object> result = new HashSet<Object>();
for (Collection<Object> chunk : chunks.values()) {
result.addAll(chunk);
}
return result;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_coll_OSQLFunctionSet.java
|
521 |
public class TypesExistsRequest extends MasterNodeReadOperationRequest<TypesExistsRequest> {
private String[] indices;
private String[] types;
private IndicesOptions indicesOptions = IndicesOptions.strict();
TypesExistsRequest() {
}
public TypesExistsRequest(String[] indices, String... types) {
this.indices = indices;
this.types = types;
}
public String[] indices() {
return indices;
}
public void indices(String[] indices) {
this.indices = indices;
}
public String[] types() {
return types;
}
public void types(String[] types) {
this.types = types;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public TypesExistsRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (indices == null) { // Specifying '*' via rest api results in an empty array
validationException = addValidationError("index/indices is missing", validationException);
}
if (types == null || types.length == 0) {
validationException = addValidationError("type/types is missing", validationException);
}
return validationException;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
out.writeStringArray(types);
indicesOptions.writeIndicesOptions(out);
writeLocal(out, Version.V_1_0_0_RC2);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
types = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readLocal(in, Version.V_1_0_0_RC2);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_exists_types_TypesExistsRequest.java
|
784 |
private static class ListenerWrapper {
final Listener listener;
private ListenerWrapper(Listener listener) {
this.listener = listener;
}
@Override
public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
final ListenerWrapper that = (ListenerWrapper) o;
return listener == that.listener;
}
@Override
public int hashCode() {
return listener != null ? System.identityHashCode(listener) : 0;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_memory_OMemoryWatchDog.java
|
765 |
public class OSysBucket extends OBonsaiBucketAbstract {
private static final int SYS_MAGIC_OFFSET = WAL_POSITION_OFFSET + OLongSerializer.LONG_SIZE;
private static final int FREE_SPACE_OFFSET = SYS_MAGIC_OFFSET + OByteSerializer.BYTE_SIZE;
private static final int FREE_LIST_HEAD_OFFSET = FREE_SPACE_OFFSET + OBonsaiBucketPointer.SIZE;
private static final int FREE_LIST_LENGTH_OFFSET = FREE_LIST_HEAD_OFFSET + OBonsaiBucketPointer.SIZE;
private static final byte SYS_MAGIC = (byte) 41;
public OSysBucket(ODirectMemoryPointer pagePointer, TrackMode trackMode) {
super(pagePointer, trackMode);
}
public void init() throws IOException {
setByteValue(SYS_MAGIC_OFFSET, SYS_MAGIC);
setBucketPointer(FREE_SPACE_OFFSET, new OBonsaiBucketPointer(0, OSBTreeBonsaiBucket.MAX_BUCKET_SIZE_BYTES));
setBucketPointer(FREE_LIST_HEAD_OFFSET, OBonsaiBucketPointer.NULL);
setLongValue(FREE_LIST_LENGTH_OFFSET, 0L);
}
public boolean isInitialized() {
return getByteValue(SYS_MAGIC_OFFSET) != 41;
}
public long freeListLength() {
return getLongValue(FREE_LIST_LENGTH_OFFSET);
}
public void setFreeListLength(long length) throws IOException {
setLongValue(FREE_LIST_LENGTH_OFFSET, length);
}
public OBonsaiBucketPointer getFreeSpacePointer() {
return getBucketPointer(FREE_SPACE_OFFSET);
}
public void setFreeSpacePointer(OBonsaiBucketPointer pointer) throws IOException {
setBucketPointer(FREE_SPACE_OFFSET, pointer);
}
public OBonsaiBucketPointer getFreeListHead() {
return getBucketPointer(FREE_LIST_HEAD_OFFSET);
}
public void setFreeListHead(OBonsaiBucketPointer pointer) throws IOException {
setBucketPointer(FREE_LIST_HEAD_OFFSET, pointer);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_sbtreebonsai_local_OSysBucket.java
|
319 |
public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
public NodesHotThreadsRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new NodesHotThreadsRequest());
}
public NodesHotThreadsRequestBuilder setThreads(int threads) {
request.threads(threads);
return this;
}
public NodesHotThreadsRequestBuilder setType(String type) {
request.type(type);
return this;
}
public NodesHotThreadsRequestBuilder setInterval(TimeValue interval) {
request.interval(interval);
return this;
}
@Override
protected void doExecute(ActionListener<NodesHotThreadsResponse> listener) {
((ClusterAdminClient) client).nodesHotThreads(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_NodesHotThreadsRequestBuilder.java
|
69 |
public interface OResourcePoolListener<K, V> {
/**
* Creates a new resource to be used and to be pooled when the client finishes with it.
*
* @return The new resource
*/
public V createNewResource(K iKey, Object... iAdditionalArgs);
/**
* Reuses the pooled resource.
*
* @return true if can be reused, otherwise false. In this case the resource will be removed from the pool
*/
public boolean reuseResource(K iKey, Object[] iAdditionalArgs, V iValue);
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_resource_OResourcePoolListener.java
|
461 |
public class CeylonNavigatorContentProvider implements
IPipelinedTreeContentProvider2, ICeylonModelListener {
private org.eclipse.ui.navigator.IExtensionStateModel javaNavigatorStateModel;
private boolean isFlatLayout() {
return javaNavigatorStateModel.getBooleanProperty(Values.IS_LAYOUT_FLAT);
}
@SuppressWarnings({"rawtypes", "unchecked"})
@Override
public void getPipelinedChildren(Object aParent, Set theCurrentChildren) {
if (aParent instanceof IJavaProject) {
aParent = ((IJavaProject) aParent).getProject();
}
if (aParent instanceof IProject) {
IProject project = (IProject) aParent;
Map<String, RepositoryNode> repositories = getProjectRepositoryNodes(project);
List<Object> toRemove = new ArrayList<>();
for (Object child : theCurrentChildren) {
if (child instanceof ClassPathContainer) {
toRemove.add(child);
ClassPathContainer cpContainer = (ClassPathContainer) child;
for (IAdaptable entry : cpContainer.getChildren()) {
if (entry instanceof IPackageFragmentRoot) {
IPackageFragmentRoot pfr = (IPackageFragmentRoot) entry;
for (RepositoryNode rn : repositories.values()) {
for (ExternalModuleNode emn : rn.getModules()) {
if (emn.getModule() != null &&
emn.getModule()
.getPackageFragmentRoots()
.contains(pfr)) {
emn.getBinaryArchives().add(pfr);
}
}
}
}
if (entry instanceof RequiredProjectWrapper) {
System.out.print("");
}
}
}
if (child instanceof LibraryContainer) {
toRemove.add(child);
}
}
theCurrentChildren.removeAll(toRemove);
for (RepositoryNode repoNode : repositories.values()) {
theCurrentChildren.add(repoNode);
}
}
if (aParent instanceof IPackageFragmentRoot) {
IPackageFragmentRoot root = (IPackageFragmentRoot) aParent;
if (CeylonBuilder.isSourceFolder(root)) {
Map<String, SourceModuleNode> moduleNodes = getSourceDirectoryModules(root);
List<Object> toRemove = new ArrayList<Object>();
for (Object child : theCurrentChildren) {
if (child instanceof IPackageFragment) {
toRemove.add(child);
} else {
if (child instanceof IFile) {
toRemove.add(child);
}
}
}
theCurrentChildren.removeAll(toRemove);
try {
for (IJavaElement pfElement : root.getChildren()) {
IPackageFragment child = (IPackageFragment) pfElement;
IFolder pkgFolder = (IFolder) ((IPackageFragment) child).getResource();
Package pkg = CeylonBuilder.getPackage(pkgFolder);
if (pkg != null) {
Module module = pkg.getModule();
String signature = module.getSignature();
SourceModuleNode moduleNode = moduleNodes.get(signature);
if (moduleNode != null) {
if (! isFlatLayout()
&& ! module.isDefault()
&& ! pkg.getNameAsString().equals(module.getNameAsString())) {
continue;
}
moduleNode.getPackageFragments().add((IPackageFragment) child);
}
}
}
} catch (JavaModelException e) {
e.printStackTrace();
}
for (SourceModuleNode moduleNode : moduleNodes.values()) {
theCurrentChildren.add(moduleNode);
}
}
}
if (aParent instanceof IPackageFragment) {
if (!(aParent instanceof SourceModuleNode)) {
IPackageFragment pkgFragment = (IPackageFragment) aParent;
IPackageFragmentRoot root = (IPackageFragmentRoot) pkgFragment.getAncestor(IJavaElement.PACKAGE_FRAGMENT_ROOT);
if (root != null) {
IFolder rootFolder = null;
try {
rootFolder = (IFolder) root.getCorrespondingResource();
} catch (JavaModelException e) {
e.printStackTrace();
}
if (rootFolder != null && RootFolderType.SOURCE.equals(CeylonBuilder.getRootFolderType(root))) {
if (pkgFragment.isDefaultPackage()) {
try {
for (IResource r : rootFolder.members()) {
if (r instanceof IFile && ! JavaCore.isJavaLikeFileName(r.getName())) {
theCurrentChildren.add((IFile)r);
}
}
} catch (CoreException e) {
e.printStackTrace();
}
} else {
JDTModule fragmentModule = CeylonBuilder.getModule(pkgFragment);
if (fragmentModule != null) {
for (Iterator<Object> itr = theCurrentChildren.iterator(); itr.hasNext(); ) {
Object child = itr.next();
if (child instanceof IPackageFragment) {
IPackageFragment childPkg = (IPackageFragment) child;
if (! fragmentModule.equals(CeylonBuilder.getModule(childPkg))) {
itr.remove();
}
}
}
}
}
}
}
} else {
theCurrentChildren.clear();
theCurrentChildren.addAll(((SourceModuleNode)aParent).getPackageFragments());
}
}
}
private synchronized Map<String, RepositoryNode> getProjectRepositoryNodes(IProject project) {
RepositoryManager repoManager = CeylonBuilder.getProjectRepositoryManager(project);
Map<String, RepositoryNode> repositories = new LinkedHashMap<>();
for (String displayString : repoManager.getRepositoriesDisplayString()) {
repositories.put(displayString, new RepositoryNode(project, displayString));
}
RepositoryNode unknownRepositoryNode = new RepositoryNode(project, NodeUtils.UNKNOWN_REPOSITORY);
repositories.put(NodeUtils.UNKNOWN_REPOSITORY, unknownRepositoryNode);
for (JDTModule externalModule : CeylonBuilder.getProjectExternalModules(project)) {
if (! externalModule.isAvailable()) {
continue;
}
String repoDisplayString = externalModule.getRepositoryDisplayString();
if (repositories.containsKey(repoDisplayString)) {
repositories.get(repoDisplayString).addModule(externalModule);
} else {
unknownRepositoryNode.addModule(externalModule);
}
}
return repositories;
}
private synchronized Map<String, SourceModuleNode> getSourceDirectoryModules(IPackageFragmentRoot sourceRoot) {
Map<String, SourceModuleNode> sourceDirectoryModules = new LinkedHashMap<>();
for (Module m : CeylonBuilder.getProjectSourceModules(sourceRoot.getJavaProject().getProject())) {
if (m instanceof JDTModule) {
JDTModule module = (JDTModule) m;
if (module.getPackageFragmentRoots().contains(sourceRoot)) {
String signature = module.getSignature();
SourceModuleNode sourceModuleNode = sourceDirectoryModules.get(signature);
if (sourceModuleNode == null) {
sourceModuleNode = SourceModuleNode.createSourceModuleNode(sourceRoot, signature);
sourceDirectoryModules.put(signature, sourceModuleNode);
}
}
}
}
return sourceDirectoryModules;
}
@Override
@SuppressWarnings("rawtypes")
public void getPipelinedElements(Object anInput, Set theCurrentElements) {}
@Override
public Object getPipelinedParent(Object anObject, Object aSuggestedParent) {
if (anObject instanceof IPackageFragmentRoot) {
IPackageFragmentRoot pfr = (IPackageFragmentRoot) anObject;
if (aSuggestedParent instanceof ClassPathContainer) {
IProject project = pfr.getJavaProject().getProject();
Map<String, RepositoryNode> repositories = getProjectRepositoryNodes(project);
for (RepositoryNode rn : repositories.values()) {
for (ExternalModuleNode emn : rn.getModules()) {
if (emn.getModule()
.getPackageFragmentRoots()
.contains(pfr)) {
return rn;
}
}
}
return null;
}
}
if (anObject instanceof IPackageFragment) {
if ( !(anObject instanceof SourceModuleNode)) {
IPackageFragment pkgFragment = (IPackageFragment) anObject;
IPackageFragmentRoot root = (IPackageFragmentRoot) pkgFragment.getAncestor(IJavaElement.PACKAGE_FRAGMENT_ROOT);
Map<String, SourceModuleNode> moduleNodes = getSourceDirectoryModules(root);
if (CeylonBuilder.isSourceFolder(root)) {
if (aSuggestedParent instanceof IPackageFragmentRoot) {
JDTModule module = CeylonBuilder.getModule(pkgFragment);
if (module != null) {
return moduleNodes.get(module.getSignature());
}
}
if (aSuggestedParent instanceof IPackageFragment) {
JDTModule module = CeylonBuilder.getModule(pkgFragment);
if (module != null) {
JDTModule parentModule = CeylonBuilder.getModule((IPackageFragment)aSuggestedParent);
if (! module.equals(parentModule)) {
String signature = module.getSignature();
return moduleNodes.get(signature);
}
}
}
}
} else {
return ((SourceModuleNode)anObject).getSourceFolder();
}
}
if (anObject instanceof IFile && aSuggestedParent instanceof IPackageFragmentRoot) {
IPackageFragmentRoot root = (IPackageFragmentRoot) aSuggestedParent;
try {
for (IJavaElement je : root.getChildren()) {
if (((IPackageFragment)je).isDefaultPackage()) {
return je;
}
}
} catch (JavaModelException e) {
e.printStackTrace();
}
}
return aSuggestedParent;
}
/* (non-Javadoc)
* @see org.eclipse.ui.navigator.IPipelinedTreeContentProvider#interceptAdd(org.eclipse.ui.navigator.PipelinedShapeModification)
*/
@Override
public PipelinedShapeModification interceptAdd(
PipelinedShapeModification aShapeModification) {
Object aParent = aShapeModification.getParent();
@SuppressWarnings("rawtypes")
Set changedChildren = aShapeModification.getChildren();
/*
* IProject - ClassPathContainer * => remove the modification and refresh project
*/
if (aParent instanceof IProject) {
for (Object child : changedChildren) {
if (child instanceof ClassPathContainer) {
aShapeModification.getChildren().clear();
scheduleRefresh(aParent);
return aShapeModification;
}
}
}
/*
* ClassPathContainer - IPackageFragmentRoot * =>
* Calculate the parent module for each root.
* - If only on parent module => add ExternalModuleNode - IPackageFragmentRoot (What happens if the module didn't exist before ??)
* - Else refresh on project
*
*/
if (aParent instanceof ClassPathContainer) {
replaceParentOrScheduleRefresh(aShapeModification, aParent, changedChildren,
((ClassPathContainer)aParent).getJavaProject().getProject());
return aShapeModification;
}
/*
IPackageFragmentRoot - IPackageFragment * =>
Calculate the parent source module for each fragment.
- If only on parent module => add sourceModule - IPackageFragment * (What happens if the module didn't exist before ??)
- Else refresh on the IPackageFragmentRoot
IPackageFragmentRoot - IFile * => add defaultPackage - IFile (What happens if the default module wasn't displayed before ??)
*/
if (aParent instanceof IPackageFragmentRoot) {
IPackageFragmentRoot root = (IPackageFragmentRoot) aParent;
if (CeylonBuilder.isSourceFolder(root)) {
replaceParentOrScheduleRefresh(aShapeModification, aParent,
changedChildren, aParent);
}
return aShapeModification;
}
return aShapeModification;
}
@SuppressWarnings("rawtypes")
private void replaceParentOrScheduleRefresh(
PipelinedShapeModification shapeModification, Object parent,
Set addedChildren, Object nodeToRefresh) {
Object newParent = null;
for (Object child : addedChildren) {
Object currentParent = getPipelinedParent(child, parent);
if (currentParent == null) {
currentParent = getParent(child);
}
if (newParent == null) {
newParent = currentParent;
} else {
if (! newParent.equals(currentParent)) {
// Several new parents
// Cancel the addition and refresh the project
newParent = null;
break;
}
}
}
if (newParent == null) {
shapeModification.getChildren().clear();
scheduleRefresh(nodeToRefresh);
} else {
shapeModification.setParent(newParent);
}
}
private void scheduleRefresh(final Object aParent) {
if (viewer != null) {
UIJob refreshJob = new UIJob("Refresh Viewer") {
@Override
public IStatus runInUIThread(IProgressMonitor monitor) {
viewer.refresh(aParent);
return Status.OK_STATUS;
}
};
refreshJob.setSystem(true);
refreshJob.schedule();
}
}
@Override
public PipelinedShapeModification interceptRemove(
PipelinedShapeModification aRemoveModification) {
return aRemoveModification;
}
@Override
@SuppressWarnings("unchecked")
public boolean interceptRefresh(
PipelinedViewerUpdate aRefreshSynchronization) {
ClassPathContainer aClassPathContainer = null;
for (Object target : aRefreshSynchronization.getRefreshTargets()) {
if (target instanceof ClassPathContainer) {
aClassPathContainer = (ClassPathContainer)target;
break;
}
}
if (aClassPathContainer != null) {
aRefreshSynchronization.getRefreshTargets().clear();
aRefreshSynchronization.getRefreshTargets().addAll(getProjectRepositoryNodes(aClassPathContainer.getJavaProject().getProject()).values());
return true;
}
return false;
}
@Override
public boolean interceptUpdate(PipelinedViewerUpdate anUpdateSynchronization) {
return false;
}
@Override
public void init(ICommonContentExtensionSite aConfig) {
CeylonBuilder.addModelListener(this);
INavigatorContentExtension javaNavigatorExtension = null;
@SuppressWarnings("unchecked")
Set<INavigatorContentExtension> set = aConfig.getService().findContentExtensionsByTriggerPoint(JavaCore.create(ResourcesPlugin.getWorkspace().getRoot()));
for (INavigatorContentExtension extension : set) {
if (extension.getDescriptor().equals(aConfig.getExtension().getDescriptor().getOverriddenDescriptor())) {
javaNavigatorExtension = extension;
break;
}
}
ITreeContentProvider javaContentProvider = javaNavigatorExtension.getContentProvider();
if (javaContentProvider instanceof PackageExplorerContentProvider) {
((PackageExplorerContentProvider) javaContentProvider).setShowLibrariesNode(true);
}
javaNavigatorStateModel = javaNavigatorExtension.getStateModel();
final INavigatorFilterService filterService = aConfig.getService().getFilterService();
final List<String> filtersToActivate = new ArrayList<>();
for (ICommonFilterDescriptor descriptor : filterService.getVisibleFilterDescriptors()) {
String filterId = descriptor.getId();
if (filterService.isActive(filterId)) {
if (filterId.equals("org.eclipse.jdt.java.ui.filters.HideEmptyPackages")) {
filtersToActivate.add("com.redhat.ceylon.eclipse.ui.navigator.filters.HideEmptyPackages");
} else if (filterId.equals("org.eclipse.jdt.java.ui.filters.HideEmptyInnerPackages")) {
filtersToActivate.add("com.redhat.ceylon.eclipse.ui.navigator.filters.HideEmptyInnerPackages");
} else {
filtersToActivate.add(filterId);
}
}
}
UIJob changeJDTEmptyFiltersJob = new UIJob("Change JDT Empty Filters") {
@Override
public IStatus runInUIThread(IProgressMonitor monitor) {
filterService.activateFilterIdsAndUpdateViewer(filtersToActivate.toArray(new String[0]));
return Status.OK_STATUS;
}
};
changeJDTEmptyFiltersJob.setSystem(true);
changeJDTEmptyFiltersJob.schedule();
}
@Override
public Object[] getElements(Object inputElement) {
System.out.print("");
return new Object[0];
}
@Override
public Object[] getChildren(Object parentElement) {
if (parentElement instanceof RepositoryNode) {
return ((RepositoryNode)parentElement).getModules().toArray();
}
if (parentElement instanceof ExternalModuleNode) {
ExternalModuleNode moduleNode = (ExternalModuleNode) parentElement;
ArrayList<Object> result = new ArrayList<Object>(moduleNode.getBinaryArchives().size() + (moduleNode.getSourceArchive() != null ? 1 : 0));
if (moduleNode.getSourceArchive() != null) {
result.add(moduleNode.getSourceArchive());
}
result.addAll(moduleNode.getBinaryArchives());
return result.toArray();
}
if (parentElement instanceof SourceModuleNode) {
return ((SourceModuleNode)parentElement).getPackageFragments().toArray();
}
if (parentElement instanceof CeylonArchiveFileStore) {
CeylonArchiveFileStore archiveFileStore = (CeylonArchiveFileStore)parentElement;
List<Object> children = new ArrayList<Object>();
try {
for (IFileStore child : archiveFileStore.childStores(EFS.NONE, null)) {
CeylonArchiveFileStore childFileStore = (CeylonArchiveFileStore)child;
children.add(childFileStore);
}
} catch (CoreException e) {
e.printStackTrace();
}
return children.toArray();
}
return new Object[0];
}
@Override
public Object getParent(Object element) {
if (element instanceof RepositoryNode) {
return ((RepositoryNode)element).project;
}
if (element instanceof ExternalModuleNode) {
return ((ExternalModuleNode)element).getRepositoryNode();
}
if (element instanceof SourceModuleNode) {
return ((SourceModuleNode)element).getSourceFolder();
}
if (element instanceof CeylonArchiveFileStore) {
CeylonArchiveFileStore archiveFileStore = (CeylonArchiveFileStore) element;
if (archiveFileStore.getParent() == null) {
// it's the archive root
for (IProject project: CeylonBuilder.getProjects()) {
for (RepositoryNode repoNode: getProjectRepositoryNodes(project).values()) {
for (ExternalModuleNode moduleNode: repoNode.getModules()) {
CeylonArchiveFileStore sourceArchive = moduleNode.getSourceArchive();
if (sourceArchive!=null &&
sourceArchive.equals(archiveFileStore)) {
return moduleNode;
}
}
}
}
} else {
return ((CeylonArchiveFileStore) element).getParent();
}
}
return null;
}
@Override
public boolean hasChildren(Object element) {
if (element instanceof RepositoryNode) {
return ! ((RepositoryNode)element).getModules().isEmpty();
}
if (element instanceof ExternalModuleNode) {
return ! ((ExternalModuleNode)element).getBinaryArchives().isEmpty() ||
((ExternalModuleNode)element).getSourceArchive() != null;
}
if (element instanceof SourceModuleNode) {
SourceModuleNode sourceModuleNode = (SourceModuleNode) element;
return sourceModuleNode.getPackageFragments().size() > 0;
}
if (element instanceof CeylonArchiveFileStore) {
try {
return ((CeylonArchiveFileStore) element).childNames(EFS.NONE, null).length > 0;
} catch (CoreException e) {
e.printStackTrace();
}
}
return false;
}
@Override
public void dispose() {
CeylonBuilder.removeModelListener(this);
}
private StructuredViewer viewer = null;
@Override
public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
this.viewer = (StructuredViewer) viewer;
}
@Override
public void restoreState(IMemento aMemento) {
}
@Override
public void saveState(IMemento aMemento) {
}
@Override
public boolean hasPipelinedChildren(Object anInput,
boolean currentHasChildren) {
if (anInput instanceof SourceModuleNode) {
SourceModuleNode sourceModuleNode = (SourceModuleNode) anInput;
return sourceModuleNode.getPackageFragments().size() > 0;
}
if (anInput instanceof IPackageFragment) {
IPackageFragment pkgFragment = (IPackageFragment) anInput;
IPackageFragmentRoot root = (IPackageFragmentRoot) pkgFragment.getAncestor(IJavaElement.PACKAGE_FRAGMENT_ROOT);
if (pkgFragment.isDefaultPackage() && root != null) {
IFolder rootFolder = null;
try {
rootFolder = (IFolder) root.getCorrespondingResource();
} catch (JavaModelException e) {
e.printStackTrace();
}
if (rootFolder != null && CeylonBuilder.isSourceFolder(root)) {
try {
for (IResource r : rootFolder.members()) {
if (r instanceof IFile) {
return true;
}
}
} catch (JavaModelException e) {
e.printStackTrace();
} catch (CoreException e) {
e.printStackTrace();
}
}
}
}
return currentHasChildren;
}
@Override
public void modelParsed(IProject project) {
if (project != null) {
try {
for (IPackageFragmentRoot pfr : JavaCore.create(project).getAllPackageFragmentRoots()) {
if (CeylonBuilder.isSourceFolder(pfr)) {
scheduleRefresh(pfr);
}
}
} catch (JavaModelException e) {
e.printStackTrace();
}
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_navigator_CeylonNavigatorContentProvider.java
|
6,449 |
public class TribeService extends AbstractLifecycleComponent<TribeService> {
public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, ClusterBlockLevel.METADATA);
public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, ClusterBlockLevel.WRITE);
public static Settings processSettings(Settings settings) {
if (settings.get(TRIBE_NAME) != null) {
// if its a node client started by this service as tribe, remove any tribe group setting
// to avoid recursive configuration
ImmutableSettings.Builder sb = ImmutableSettings.builder().put(settings);
for (String s : settings.getAsMap().keySet()) {
if (s.startsWith("tribe.") && !s.equals(TRIBE_NAME)) {
sb.remove(s);
}
}
return sb.build();
}
Map<String, Settings> nodesSettings = settings.getGroups("tribe", true);
if (nodesSettings.isEmpty()) {
return settings;
}
// its a tribe configured node..., force settings
ImmutableSettings.Builder sb = ImmutableSettings.builder().put(settings);
sb.put("node.client", true); // this node should just act as a node client
sb.put("discovery.type", "local"); // a tribe node should not use zen discovery
sb.put("discovery.initial_state_timeout", 0); // nothing is going to be discovered, since no master will be elected
if (sb.get("cluster.name") == null) {
sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM
}
sb.put("gateway.type", "none"); // we shouldn't store anything locally...
sb.put(TransportMasterNodeReadOperationAction.FORCE_LOCAL_SETTING, true);
return sb.build();
}
public static final String TRIBE_NAME = "tribe.name";
private final ClusterService clusterService;
private final List<InternalNode> nodes = Lists.newCopyOnWriteArrayList();
@Inject
public TribeService(Settings settings, ClusterService clusterService) {
super(settings);
this.clusterService = clusterService;
Map<String, Settings> nodesSettings = settings.getGroups("tribe", true);
for (Map.Entry<String, Settings> entry : nodesSettings.entrySet()) {
ImmutableSettings.Builder sb = ImmutableSettings.builder().put(entry.getValue());
sb.put("node.name", settings.get("name") + "/" + entry.getKey());
sb.put(TRIBE_NAME, entry.getKey());
if (sb.get("http.enabled") == null) {
sb.put("http.enabled", false);
}
nodes.add((InternalNode) NodeBuilder.nodeBuilder().settings(sb).client(true).build());
}
if (!nodes.isEmpty()) {
// remove the initial election / recovery blocks since we are not going to have a
// master elected in this single tribe node local "cluster"
clusterService.removeInitialStateBlock(Discovery.NO_MASTER_BLOCK);
clusterService.removeInitialStateBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
if (settings.getAsBoolean("tribe.blocks.write", false)) {
clusterService.addInitialStateBlock(TRIBE_WRITE_BLOCK);
}
if (settings.getAsBoolean("tribe.blocks.metadata", false)) {
clusterService.addInitialStateBlock(TRIBE_METADATA_BLOCK);
}
for (InternalNode node : nodes) {
node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node));
}
}
}
@Override
protected void doStart() throws ElasticsearchException {
final CountDownLatch latch = new CountDownLatch(1);
clusterService.submitStateUpdateTask("updating local node id", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
// add our local node to the mix...
return ClusterState.builder(currentState)
.nodes(DiscoveryNodes.builder(currentState.nodes()).put(clusterService.localNode()).localNodeId(clusterService.localNode().id()))
.build();
}
@Override
public void onFailure(String source, Throwable t) {
try {
logger.error("{}", t, source);
} finally {
latch.countDown();
}
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
});
try {
latch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ElasticsearchIllegalStateException("Interrupted while starting [" + this.getClass().getSimpleName()+ "]", e);
}
for (InternalNode node : nodes) {
try {
node.start();
} catch (Throwable e) {
// calling close is safe for non started nodes, we can just iterate over all
for (InternalNode otherNode : nodes) {
try {
otherNode.close();
} catch (Throwable t) {
logger.warn("failed to close node {} on failed start", otherNode, t);
}
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new ElasticsearchException(e.getMessage(), e);
}
}
}
@Override
protected void doStop() throws ElasticsearchException {
for (InternalNode node : nodes) {
try {
node.stop();
} catch (Throwable t) {
logger.warn("failed to stop node {}", t, node);
}
}
}
@Override
protected void doClose() throws ElasticsearchException {
for (InternalNode node : nodes) {
try {
node.close();
} catch (Throwable t) {
logger.warn("failed to close node {}", t, node);
}
}
}
class TribeClusterStateListener implements ClusterStateListener {
private final InternalNode tribeNode;
private final String tribeName;
TribeClusterStateListener(InternalNode tribeNode) {
this.tribeNode = tribeNode;
this.tribeName = tribeNode.settings().get(TRIBE_NAME);
}
@Override
public void clusterChanged(final ClusterChangedEvent event) {
logger.debug("[{}] received cluster event, [{}]", tribeName, event.source());
clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
ClusterState tribeState = event.state();
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes());
// -- merge nodes
// go over existing nodes, and see if they need to be removed
for (DiscoveryNode discoNode : currentState.nodes()) {
String markedTribeName = discoNode.attributes().get(TRIBE_NAME);
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
if (tribeState.nodes().get(discoNode.id()) == null) {
logger.info("[{}] removing node [{}]", tribeName, discoNode);
nodes.remove(discoNode.id());
}
}
}
// go over tribe nodes, and see if they need to be added
for (DiscoveryNode tribe : tribeState.nodes()) {
if (currentState.nodes().get(tribe.id()) == null) {
// a new node, add it, but also add the tribe name to the attributes
ImmutableMap<String, String> tribeAttr = MapBuilder.newMapBuilder(tribe.attributes()).put(TRIBE_NAME, tribeName).immutableMap();
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), tribeAttr, tribe.version());
logger.info("[{}] adding node [{}]", tribeName, discoNode);
nodes.put(discoNode);
}
}
// -- merge metadata
MetaData.Builder metaData = MetaData.builder(currentState.metaData());
RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
// go over existing indices, and see if they need to be removed
for (IndexMetaData index : currentState.metaData()) {
String markedTribeName = index.settings().get(TRIBE_NAME);
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
IndexMetaData tribeIndex = tribeState.metaData().index(index.index());
if (tribeIndex == null) {
logger.info("[{}] removing index [{}]", tribeName, index.index());
metaData.remove(index.index());
routingTable.remove(index.index());
} else {
// always make sure to update the metadata and routing table, in case
// there are changes in them (new mapping, shards moving from initializing to started)
routingTable.add(tribeState.routingTable().index(index.index()));
Settings tribeSettings = ImmutableSettings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
}
}
}
// go over tribe one, and see if they need to be added
for (IndexMetaData tribeIndex : tribeState.metaData()) {
if (!currentState.metaData().hasIndex(tribeIndex.index())) {
// a new index, add it, and add the tribe name as a setting
logger.info("[{}] adding index [{}]", tribeName, tribeIndex.index());
Settings tribeSettings = ImmutableSettings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
routingTable.add(tribeState.routingTable().index(tribeIndex.index()));
}
}
return ClusterState.builder(currentState).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("failed to process [{}]", t, source);
}
});
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_tribe_TribeService.java
|
643 |
public class DeleteIndexTemplateAction extends IndicesAction<DeleteIndexTemplateRequest, DeleteIndexTemplateResponse, DeleteIndexTemplateRequestBuilder> {
public static final DeleteIndexTemplateAction INSTANCE = new DeleteIndexTemplateAction();
public static final String NAME = "indices/template/delete";
private DeleteIndexTemplateAction() {
super(NAME);
}
@Override
public DeleteIndexTemplateResponse newResponse() {
return new DeleteIndexTemplateResponse();
}
@Override
public DeleteIndexTemplateRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new DeleteIndexTemplateRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_delete_DeleteIndexTemplateAction.java
|
1,448 |
public abstract class TitanInputFormat extends InputFormat<NullWritable, FaunusVertex> implements Configurable {
private static final String SETUP_PACKAGE_PREFIX = "com.thinkaurelius.titan.hadoop.formats.util.input.";
private static final String SETUP_CLASS_NAME = ".TitanHadoopSetupImpl";
protected FaunusVertexQueryFilter vertexQuery;
protected boolean trackPaths;
protected TitanHadoopSetup titanSetup;
protected ModifiableHadoopConfiguration faunusConf;
protected ModifiableConfiguration inputConf;
// TODO why does this class even implement setConf? It doesn't save any overhead. Might as well make all the state final, delete setConf, and construct instances instead
@Override
public void setConf(final Configuration config) {
this.faunusConf = ModifiableHadoopConfiguration.of(config);
this.vertexQuery = FaunusVertexQueryFilter.create(faunusConf);
this.inputConf = faunusConf.getInputConf();
final String titanVersion = faunusConf.get(TITAN_INPUT_VERSION);
this.trackPaths = faunusConf.get(PIPELINE_TRACK_PATHS);
final String className = SETUP_PACKAGE_PREFIX + titanVersion + SETUP_CLASS_NAME;
this.titanSetup = ConfigurationUtil.instantiate(className, new Object[]{config}, new Class[]{Configuration.class});
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_util_TitanInputFormat.java
|
5,210 |
public class DateHistogramParser implements Aggregator.Parser {
private final ImmutableMap<String, DateTimeUnit> dateFieldUnits;
public DateHistogramParser() {
dateFieldUnits = MapBuilder.<String, DateTimeUnit>newMapBuilder()
.put("year", DateTimeUnit.YEAR_OF_CENTURY)
.put("1y", DateTimeUnit.YEAR_OF_CENTURY)
.put("quarter", DateTimeUnit.QUARTER)
.put("1q", DateTimeUnit.QUARTER)
.put("month", DateTimeUnit.MONTH_OF_YEAR)
.put("1M", DateTimeUnit.MONTH_OF_YEAR)
.put("week", DateTimeUnit.WEEK_OF_WEEKYEAR)
.put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR)
.put("day", DateTimeUnit.DAY_OF_MONTH)
.put("1d", DateTimeUnit.DAY_OF_MONTH)
.put("hour", DateTimeUnit.HOUR_OF_DAY)
.put("1h", DateTimeUnit.HOUR_OF_DAY)
.put("minute", DateTimeUnit.MINUTES_OF_HOUR)
.put("1m", DateTimeUnit.MINUTES_OF_HOUR)
.put("second", DateTimeUnit.SECOND_OF_MINUTE)
.put("1s", DateTimeUnit.SECOND_OF_MINUTE)
.immutableMap();
}
@Override
public String type() {
return InternalDateHistogram.TYPE.name();
}
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceConfig<NumericValuesSource> config = new ValuesSourceConfig<NumericValuesSource>(NumericValuesSource.class);
String field = null;
String script = null;
String scriptLang = null;
Map<String, Object> scriptParams = null;
boolean keyed = false;
long minDocCount = 1;
InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;
String interval = null;
boolean preZoneAdjustLargeInterval = false;
DateTimeZone preZone = DateTimeZone.UTC;
DateTimeZone postZone = DateTimeZone.UTC;
String format = null;
long preOffset = 0;
long postOffset = 0;
boolean assumeSorted = false;
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("field".equals(currentFieldName)) {
field = parser.text();
} else if ("script".equals(currentFieldName)) {
script = parser.text();
} else if ("lang".equals(currentFieldName)) {
scriptLang = parser.text();
} else if ("time_zone".equals(currentFieldName) || "timeZone".equals(currentFieldName)) {
preZone = parseZone(parser, token);
} else if ("pre_zone".equals(currentFieldName) || "preZone".equals(currentFieldName)) {
preZone = parseZone(parser, token);
} else if ("pre_zone_adjust_large_interval".equals(currentFieldName) || "preZoneAdjustLargeInterval".equals(currentFieldName)) {
preZoneAdjustLargeInterval = parser.booleanValue();
} else if ("post_zone".equals(currentFieldName) || "postZone".equals(currentFieldName)) {
postZone = parseZone(parser, token);
} else if ("pre_offset".equals(currentFieldName) || "preOffset".equals(currentFieldName)) {
preOffset = parseOffset(parser.text());
} else if ("post_offset".equals(currentFieldName) || "postOffset".equals(currentFieldName)) {
postOffset = parseOffset(parser.text());
} else if ("interval".equals(currentFieldName)) {
interval = parser.text();
} else if ("format".equals(currentFieldName)) {
format = parser.text();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
if ("keyed".equals(currentFieldName)) {
keyed = parser.booleanValue();
} else if ("script_values_sorted".equals(currentFieldName)) {
assumeSorted = parser.booleanValue();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.VALUE_NUMBER) {
if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) {
minDocCount = parser.longValue();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("params".equals(currentFieldName)) {
scriptParams = parser.map();
} else if ("order".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
String dir = parser.text();
boolean asc = "asc".equals(dir);
order = resolveOrder(currentFieldName, asc);
//TODO should we throw an error if the value is not "asc" or "desc"???
}
}
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else {
throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
}
}
if (interval == null) {
throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
}
SearchScript searchScript = null;
if (script != null) {
searchScript = context.scriptService().search(context.lookup(), scriptLang, script, scriptParams);
config.script(searchScript);
}
if (!assumeSorted) {
// we need values to be sorted and unique for efficiency
config.ensureSorted(true);
}
TimeZoneRounding.Builder tzRoundingBuilder;
DateTimeUnit dateTimeUnit = dateFieldUnits.get(interval);
if (dateTimeUnit != null) {
tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit);
} else {
// the interval is a time value?
tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.parseTimeValue(interval, null));
}
TimeZoneRounding rounding = tzRoundingBuilder
.preZone(preZone).postZone(postZone)
.preZoneAdjustLargeInterval(preZoneAdjustLargeInterval)
.preOffset(preOffset).postOffset(postOffset)
.build();
if (format != null) {
config.formatter(new ValueFormatter.DateTime(format));
}
if (field == null) {
if (searchScript != null) {
ValueParser valueParser = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER, DateFieldMapper.Defaults.TIME_UNIT));
config.parser(valueParser);
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
// falling back on the get field data context
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
FieldMapper<?> mapper = context.smartNameFieldMapper(field);
if (mapper == null) {
config.unmapped(true);
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
if (!(mapper instanceof DateFieldMapper)) {
throw new SearchParseException(context, "date histogram can only be aggregated on date fields but [" + field + "] is not a date field");
}
IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
config.fieldContext(new FieldContext(field, indexFieldData));
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, InternalDateHistogram.FACTORY);
}
private static InternalOrder resolveOrder(String key, boolean asc) {
if ("_key".equals(key) || "_time".equals(key)) {
return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC);
}
if ("_count".equals(key)) {
return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC);
}
int i = key.indexOf('.');
if (i < 0) {
return new InternalOrder.Aggregation(key, null, asc);
}
return new InternalOrder.Aggregation(key.substring(0, i), key.substring(i + 1), asc);
}
private long parseOffset(String offset) throws IOException {
if (offset.charAt(0) == '-') {
return -TimeValue.parseTimeValue(offset.substring(1), null).millis();
}
int beginIndex = offset.charAt(0) == '+' ? 1 : 0;
return TimeValue.parseTimeValue(offset.substring(beginIndex), null).millis();
}
private DateTimeZone parseZone(XContentParser parser, XContentParser.Token token) throws IOException {
if (token == XContentParser.Token.VALUE_NUMBER) {
return DateTimeZone.forOffsetHours(parser.intValue());
} else {
String text = parser.text();
int index = text.indexOf(':');
if (index != -1) {
int beginIndex = text.charAt(0) == '+' ? 1 : 0;
// format like -02:30
return DateTimeZone.forOffsetHoursMinutes(
Integer.parseInt(text.substring(beginIndex, index)),
Integer.parseInt(text.substring(index + 1))
);
} else {
// id, listed here: http://joda-time.sourceforge.net/timezones.html
return DateTimeZone.forID(text);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_histogram_DateHistogramParser.java
|
422 |
static final class Fields {
static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
static final XContentBuilderString ACCEPTED = new XContentBuilderString("accepted");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_RestoreSnapshotResponse.java
|
234 |
@Entity
@Table(name="BLC_SYSTEM_PROPERTY")
@Inheritance(strategy = InheritanceType.JOINED)
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@AdminPresentationClass(friendlyName = "SystemPropertyImpl")
public class SystemPropertyImpl implements SystemProperty {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "SystemPropertyId")
@GenericGenerator(
name="SystemPropertyId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="SystemPropertyImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.common.config.domain.SystemPropertyImpl")
}
)
@Column(name = "BLC_SYSTEM_PROPERTY_ID")
protected Long id;
@Column(name= "PROPERTY_NAME", unique = true, nullable = false)
protected String name;
@Column(name= "PROPERTY_VALUE", nullable = false)
protected String value;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public String getValue() {
return value;
}
@Override
public void setValue(String value) {
this.value = value;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_domain_SystemPropertyImpl.java
|
297 |
new Thread() {
public void run() {
try {
if (l.tryLock(20, TimeUnit.SECONDS)) {
latch2.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java
|
186 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ClientSetTest {
static final String name = "test";
static HazelcastInstance hz;
static HazelcastInstance server;
static ISet set;
@BeforeClass
public static void init(){
Config config = new Config();
server = Hazelcast.newHazelcastInstance(config);
hz = HazelcastClient.newHazelcastClient(null);
set = hz.getSet(name);
}
@AfterClass
public static void destroy() {
hz.shutdown();
Hazelcast.shutdownAll();
}
@Before
@After
public void clear() throws IOException {
set.clear();
}
@Test
public void testAddAll() {
List l = new ArrayList();
l.add("item1");
l.add("item2");
assertTrue(set.addAll(l));
assertEquals(2, set.size());
assertFalse(set.addAll(l));
assertEquals(2, set.size());
}
@Test
public void testAddRemove() {
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertEquals(3, set.size());
assertFalse(set.add("item3"));
assertEquals(3, set.size());
assertFalse(set.remove("item4"));
assertTrue(set.remove("item3"));
}
@Test
public void testIterator(){
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertTrue(set.add("item4"));
Iterator iter = set.iterator();
assertTrue(((String)iter.next()).startsWith("item"));
assertTrue(((String)iter.next()).startsWith("item"));
assertTrue(((String)iter.next()).startsWith("item"));
assertTrue(((String)iter.next()).startsWith("item"));
assertFalse(iter.hasNext());
}
@Test
public void testContains(){
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertTrue(set.add("item4"));
assertFalse(set.contains("item5"));
assertTrue(set.contains("item2"));
List l = new ArrayList();
l.add("item6");
l.add("item3");
assertFalse(set.containsAll(l));
assertTrue(set.add("item6"));
assertTrue(set.containsAll(l));
}
@Test
public void removeRetainAll(){
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertTrue(set.add("item4"));
List l = new ArrayList();
l.add("item4");
l.add("item3");
assertTrue(set.removeAll(l));
assertEquals(2, set.size());
assertFalse(set.removeAll(l));
assertEquals(2, set.size());
l.clear();
l.add("item1");
l.add("item2");
assertFalse(set.retainAll(l));
assertEquals(2, set.size());
l.clear();
assertTrue(set.retainAll(l));
assertEquals(0, set.size());
}
@Test
public void testListener() throws Exception {
// final ISet tempSet = server.getSet(name);
final ISet tempSet = set;
final CountDownLatch latch = new CountDownLatch(6);
ItemListener listener = new ItemListener() {
public void itemAdded(ItemEvent itemEvent) {
latch.countDown();
}
public void itemRemoved(ItemEvent item) {
}
};
String registrationId = tempSet.addItemListener(listener, true);
new Thread(){
public void run() {
for (int i=0; i<5; i++){
tempSet.add("item" + i);
}
tempSet.add("done");
}
}.start();
assertTrue(latch.await(20, TimeUnit.SECONDS));
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_collections_ClientSetTest.java
|
571 |
public class ODefaultIndexFactory implements OIndexFactory {
public static final String SBTREE_ALGORITHM = "SBTREE";
public static final String MVRBTREE_ALGORITHM = "MVRBTREE";
public static final String MVRBTREE_VALUE_CONTAINER = "MVRBTREESET";
public static final String SBTREEBONSAI_VALUE_CONTAINER = "SBTREEBONSAISET";
public static final String NONE_VALUE_CONTAINER = "NONE";
private static final Set<String> TYPES;
static {
final Set<String> types = new HashSet<String>();
types.add(OClass.INDEX_TYPE.UNIQUE.toString());
types.add(OClass.INDEX_TYPE.NOTUNIQUE.toString());
types.add(OClass.INDEX_TYPE.FULLTEXT.toString());
types.add(OClass.INDEX_TYPE.DICTIONARY.toString());
TYPES = Collections.unmodifiableSet(types);
}
/**
* Index types :
* <ul>
* <li>UNIQUE</li>
* <li>NOTUNIQUE</li>
* <li>FULLTEXT</li>
* <li>DICTIONARY</li>
* </ul>
*/
public Set<String> getTypes() {
return TYPES;
}
public OIndexInternal<?> createIndex(ODatabaseRecord database, String indexType, String algorithm, String valueContainerAlgorithm)
throws OConfigurationException {
if (valueContainerAlgorithm == null) {
if (OClass.INDEX_TYPE.NOTUNIQUE.toString().equals(indexType)
|| OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString().equals(indexType)
|| OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString().equals(indexType)
|| OClass.INDEX_TYPE.FULLTEXT.toString().equals(indexType))
valueContainerAlgorithm = MVRBTREE_VALUE_CONTAINER;
else
valueContainerAlgorithm = NONE_VALUE_CONTAINER;
}
if ((database.getStorage().getType().equals(OEngineLocalPaginated.NAME) || database.getStorage().getType()
.equals(OEngineLocal.NAME))
&& valueContainerAlgorithm.equals(ODefaultIndexFactory.MVRBTREE_VALUE_CONTAINER)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean()) {
OLogManager
.instance()
.warn(
this,
"Index was created using %s as values container. "
+ "This container is deprecated and is not supported any more. To avoid this message please drop and recreate indexes or perform DB export/import.",
valueContainerAlgorithm);
}
if (SBTREE_ALGORITHM.equals(algorithm))
return createSBTreeIndex(indexType, valueContainerAlgorithm);
if (MVRBTREE_ALGORITHM.equals(algorithm) || algorithm == null)
return createMRBTreeIndex(indexType, valueContainerAlgorithm);
throw new OConfigurationException("Unsupported type : " + indexType);
}
private OIndexInternal<?> createMRBTreeIndex(String indexType, String valueContainerAlgorithm) {
if (OClass.INDEX_TYPE.UNIQUE.toString().equals(indexType)) {
return new OIndexUnique(indexType, MVRBTREE_ALGORITHM, new OMVRBTreeIndexEngine<OIdentifiable>(), valueContainerAlgorithm);
} else if (OClass.INDEX_TYPE.NOTUNIQUE.toString().equals(indexType)) {
return new OIndexNotUnique(indexType, MVRBTREE_ALGORITHM, new OMVRBTreeIndexEngine<Set<OIdentifiable>>(),
valueContainerAlgorithm);
} else if (OClass.INDEX_TYPE.FULLTEXT.toString().equals(indexType)) {
return new OIndexFullText(indexType, MVRBTREE_ALGORITHM, new OMVRBTreeIndexEngine<Set<OIdentifiable>>(),
valueContainerAlgorithm);
} else if (OClass.INDEX_TYPE.DICTIONARY.toString().equals(indexType)) {
return new OIndexDictionary(indexType, MVRBTREE_ALGORITHM, new OMVRBTreeIndexEngine<OIdentifiable>(), valueContainerAlgorithm);
}
throw new OConfigurationException("Unsupported type : " + indexType);
}
private OIndexInternal<?> createSBTreeIndex(String indexType, String valueContainerAlgorithm) {
if (OClass.INDEX_TYPE.UNIQUE.toString().equals(indexType)) {
return new OIndexUnique(indexType, SBTREE_ALGORITHM, new OSBTreeIndexEngine<OIdentifiable>(), valueContainerAlgorithm);
} else if (OClass.INDEX_TYPE.NOTUNIQUE.toString().equals(indexType)) {
return new OIndexNotUnique(indexType, SBTREE_ALGORITHM, new OSBTreeIndexEngine<Set<OIdentifiable>>(), valueContainerAlgorithm);
} else if (OClass.INDEX_TYPE.FULLTEXT.toString().equals(indexType)) {
return new OIndexFullText(indexType, SBTREE_ALGORITHM, new OSBTreeIndexEngine<Set<OIdentifiable>>(), valueContainerAlgorithm);
} else if (OClass.INDEX_TYPE.DICTIONARY.toString().equals(indexType)) {
return new OIndexDictionary(indexType, SBTREE_ALGORITHM, new OSBTreeIndexEngine<OIdentifiable>(), valueContainerAlgorithm);
}
throw new OConfigurationException("Unsupported type : " + indexType);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_ODefaultIndexFactory.java
|
157 |
public class ConcurrentLinkedDeque<E>
extends AbstractCollection<E>
implements Deque<E>, java.io.Serializable {
/*
* This is an implementation of a concurrent lock-free deque
* supporting interior removes but not interior insertions, as
* required to support the entire Deque interface.
*
* We extend the techniques developed for ConcurrentLinkedQueue and
* LinkedTransferQueue (see the internal docs for those classes).
* Understanding the ConcurrentLinkedQueue implementation is a
* prerequisite for understanding the implementation of this class.
*
* The data structure is a symmetrical doubly-linked "GC-robust"
* linked list of nodes. We minimize the number of volatile writes
* using two techniques: advancing multiple hops with a single CAS
* and mixing volatile and non-volatile writes of the same memory
* locations.
*
* A node contains the expected E ("item") and links to predecessor
* ("prev") and successor ("next") nodes:
*
* class Node<E> { volatile Node<E> prev, next; volatile E item; }
*
* A node p is considered "live" if it contains a non-null item
* (p.item != null). When an item is CASed to null, the item is
* atomically logically deleted from the collection.
*
* At any time, there is precisely one "first" node with a null
* prev reference that terminates any chain of prev references
* starting at a live node. Similarly there is precisely one
* "last" node terminating any chain of next references starting at
* a live node. The "first" and "last" nodes may or may not be live.
* The "first" and "last" nodes are always mutually reachable.
*
* A new element is added atomically by CASing the null prev or
* next reference in the first or last node to a fresh node
* containing the element. The element's node atomically becomes
* "live" at that point.
*
* A node is considered "active" if it is a live node, or the
* first or last node. Active nodes cannot be unlinked.
*
* A "self-link" is a next or prev reference that is the same node:
* p.prev == p or p.next == p
* Self-links are used in the node unlinking process. Active nodes
* never have self-links.
*
* A node p is active if and only if:
*
* p.item != null ||
* (p.prev == null && p.next != p) ||
* (p.next == null && p.prev != p)
*
* The deque object has two node references, "head" and "tail".
* The head and tail are only approximations to the first and last
* nodes of the deque. The first node can always be found by
* following prev pointers from head; likewise for tail. However,
* it is permissible for head and tail to be referring to deleted
* nodes that have been unlinked and so may not be reachable from
* any live node.
*
* There are 3 stages of node deletion;
* "logical deletion", "unlinking", and "gc-unlinking".
*
* 1. "logical deletion" by CASing item to null atomically removes
* the element from the collection, and makes the containing node
* eligible for unlinking.
*
* 2. "unlinking" makes a deleted node unreachable from active
* nodes, and thus eventually reclaimable by GC. Unlinked nodes
* may remain reachable indefinitely from an iterator.
*
* Physical node unlinking is merely an optimization (albeit a
* critical one), and so can be performed at our convenience. At
* any time, the set of live nodes maintained by prev and next
* links are identical, that is, the live nodes found via next
* links from the first node is equal to the elements found via
* prev links from the last node. However, this is not true for
* nodes that have already been logically deleted - such nodes may
* be reachable in one direction only.
*
* 3. "gc-unlinking" takes unlinking further by making active
* nodes unreachable from deleted nodes, making it easier for the
* GC to reclaim future deleted nodes. This step makes the data
* structure "gc-robust", as first described in detail by Boehm
* (http://portal.acm.org/citation.cfm?doid=503272.503282).
*
* GC-unlinked nodes may remain reachable indefinitely from an
* iterator, but unlike unlinked nodes, are never reachable from
* head or tail.
*
* Making the data structure GC-robust will eliminate the risk of
* unbounded memory retention with conservative GCs and is likely
* to improve performance with generational GCs.
*
* When a node is dequeued at either end, e.g. via poll(), we would
* like to break any references from the node to active nodes. We
* develop further the use of self-links that was very effective in
* other concurrent collection classes. The idea is to replace
* prev and next pointers with special values that are interpreted
* to mean off-the-list-at-one-end. These are approximations, but
* good enough to preserve the properties we want in our
* traversals, e.g. we guarantee that a traversal will never visit
* the same element twice, but we don't guarantee whether a
* traversal that runs out of elements will be able to see more
* elements later after enqueues at that end. Doing gc-unlinking
* safely is particularly tricky, since any node can be in use
* indefinitely (for example by an iterator). We must ensure that
* the nodes pointed at by head/tail never get gc-unlinked, since
* head/tail are needed to get "back on track" by other nodes that
* are gc-unlinked. gc-unlinking accounts for much of the
* implementation complexity.
*
* Since neither unlinking nor gc-unlinking are necessary for
* correctness, there are many implementation choices regarding
* frequency (eagerness) of these operations. Since volatile
* reads are likely to be much cheaper than CASes, saving CASes by
* unlinking multiple adjacent nodes at a time may be a win.
* gc-unlinking can be performed rarely and still be effective,
* since it is most important that long chains of deleted nodes
* are occasionally broken.
*
* The actual representation we use is that p.next == p means to
* goto the first node (which in turn is reached by following prev
* pointers from head), and p.next == null && p.prev == p means
* that the iteration is at an end and that p is a (static final)
* dummy node, NEXT_TERMINATOR, and not the last active node.
* Finishing the iteration when encountering such a TERMINATOR is
* good enough for read-only traversals, so such traversals can use
* p.next == null as the termination condition. When we need to
* find the last (active) node, for enqueueing a new node, we need
* to check whether we have reached a TERMINATOR node; if so,
* restart traversal from tail.
*
* The implementation is completely directionally symmetrical,
* except that most public methods that iterate through the list
* follow next pointers ("forward" direction).
*
* We believe (without full proof) that all single-element deque
* operations (e.g., addFirst, peekLast, pollLast) are linearizable
* (see Herlihy and Shavit's book). However, some combinations of
* operations are known not to be linearizable. In particular,
* when an addFirst(A) is racing with pollFirst() removing B, it is
* possible for an observer iterating over the elements to observe
* A B C and subsequently observe A C, even though no interior
* removes are ever performed. Nevertheless, iterators behave
* reasonably, providing the "weakly consistent" guarantees.
*
* Empirically, microbenchmarks suggest that this class adds about
* 40% overhead relative to ConcurrentLinkedQueue, which feels as
* good as we can hope for.
*/
private static final long serialVersionUID = 876323262645176354L;
/**
* A node from which the first node on list (that is, the unique node p
* with p.prev == null && p.next != p) can be reached in O(1) time.
* Invariants:
* - the first node is always O(1) reachable from head via prev links
* - all live nodes are reachable from the first node via succ()
* - head != null
* - (tmp = head).next != tmp || tmp != head
* - head is never gc-unlinked (but may be unlinked)
* Non-invariants:
* - head.item may or may not be null
* - head may not be reachable from the first or last node, or from tail
*/
private transient volatile Node<E> head;
/**
* A node from which the last node on list (that is, the unique node p
* with p.next == null && p.prev != p) can be reached in O(1) time.
* Invariants:
* - the last node is always O(1) reachable from tail via next links
* - all live nodes are reachable from the last node via pred()
* - tail != null
* - tail is never gc-unlinked (but may be unlinked)
* Non-invariants:
* - tail.item may or may not be null
* - tail may not be reachable from the first or last node, or from head
*/
private transient volatile Node<E> tail;
private static final Node<Object> PREV_TERMINATOR, NEXT_TERMINATOR;
@SuppressWarnings("unchecked")
Node<E> prevTerminator() {
return (Node<E>) PREV_TERMINATOR;
}
@SuppressWarnings("unchecked")
Node<E> nextTerminator() {
return (Node<E>) NEXT_TERMINATOR;
}
static final class Node<E> {
volatile Node<E> prev;
volatile E item;
volatile Node<E> next;
Node() { // default constructor for NEXT_TERMINATOR, PREV_TERMINATOR
}
/**
* Constructs a new node. Uses relaxed write because item can
* only be seen after publication via casNext or casPrev.
*/
Node(E item) {
UNSAFE.putObject(this, itemOffset, item);
}
boolean casItem(E cmp, E val) {
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
}
void lazySetNext(Node<E> val) {
UNSAFE.putOrderedObject(this, nextOffset, val);
}
boolean casNext(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
}
void lazySetPrev(Node<E> val) {
UNSAFE.putOrderedObject(this, prevOffset, val);
}
boolean casPrev(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, prevOffset, cmp, val);
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long prevOffset;
private static final long itemOffset;
private static final long nextOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = Node.class;
prevOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("prev"));
itemOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("item"));
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/**
* Links e as first element.
*/
private void linkFirst(E e) {
checkNotNull(e);
final Node<E> newNode = new Node<E>(e);
restartFromHead:
for (;;)
for (Node<E> h = head, p = h, q;;) {
if ((q = p.prev) != null &&
(q = (p = q).prev) != null)
// Check for head updates every other hop.
// If p == q, we are sure to follow head instead.
p = (h != (h = head)) ? h : q;
else if (p.next == p) // PREV_TERMINATOR
continue restartFromHead;
else {
// p is first node
newNode.lazySetNext(p); // CAS piggyback
if (p.casPrev(null, newNode)) {
// Successful CAS is the linearization point
// for e to become an element of this deque,
// and for newNode to become "live".
if (p != h) // hop two nodes at a time
casHead(h, newNode); // Failure is OK.
return;
}
// Lost CAS race to another thread; re-read prev
}
}
}
/**
* Links e as last element.
*/
private void linkLast(E e) {
checkNotNull(e);
final Node<E> newNode = new Node<E>(e);
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p.prev == p) // NEXT_TERMINATOR
continue restartFromTail;
else {
// p is last node
newNode.lazySetPrev(p); // CAS piggyback
if (p.casNext(null, newNode)) {
// Successful CAS is the linearization point
// for e to become an element of this deque,
// and for newNode to become "live".
if (p != t) // hop two nodes at a time
casTail(t, newNode); // Failure is OK.
return;
}
// Lost CAS race to another thread; re-read next
}
}
}
private static final int HOPS = 2;
/**
* Unlinks non-null node x.
*/
void unlink(Node<E> x) {
// assert x != null;
// assert x.item == null;
// assert x != PREV_TERMINATOR;
// assert x != NEXT_TERMINATOR;
final Node<E> prev = x.prev;
final Node<E> next = x.next;
if (prev == null) {
unlinkFirst(x, next);
} else if (next == null) {
unlinkLast(x, prev);
} else {
// Unlink interior node.
//
// This is the common case, since a series of polls at the
// same end will be "interior" removes, except perhaps for
// the first one, since end nodes cannot be unlinked.
//
// At any time, all active nodes are mutually reachable by
// following a sequence of either next or prev pointers.
//
// Our strategy is to find the unique active predecessor
// and successor of x. Try to fix up their links so that
// they point to each other, leaving x unreachable from
// active nodes. If successful, and if x has no live
// predecessor/successor, we additionally try to gc-unlink,
// leaving active nodes unreachable from x, by rechecking
// that the status of predecessor and successor are
// unchanged and ensuring that x is not reachable from
// tail/head, before setting x's prev/next links to their
// logical approximate replacements, self/TERMINATOR.
Node<E> activePred, activeSucc;
boolean isFirst, isLast;
int hops = 1;
// Find active predecessor
for (Node<E> p = prev; ; ++hops) {
if (p.item != null) {
activePred = p;
isFirst = false;
break;
}
Node<E> q = p.prev;
if (q == null) {
if (p.next == p)
return;
activePred = p;
isFirst = true;
break;
}
else if (p == q)
return;
else
p = q;
}
// Find active successor
for (Node<E> p = next; ; ++hops) {
if (p.item != null) {
activeSucc = p;
isLast = false;
break;
}
Node<E> q = p.next;
if (q == null) {
if (p.prev == p)
return;
activeSucc = p;
isLast = true;
break;
}
else if (p == q)
return;
else
p = q;
}
// TODO: better HOP heuristics
if (hops < HOPS
// always squeeze out interior deleted nodes
&& (isFirst | isLast))
return;
// Squeeze out deleted nodes between activePred and
// activeSucc, including x.
skipDeletedSuccessors(activePred);
skipDeletedPredecessors(activeSucc);
// Try to gc-unlink, if possible
if ((isFirst | isLast) &&
// Recheck expected state of predecessor and successor
(activePred.next == activeSucc) &&
(activeSucc.prev == activePred) &&
(isFirst ? activePred.prev == null : activePred.item != null) &&
(isLast ? activeSucc.next == null : activeSucc.item != null)) {
updateHead(); // Ensure x is not reachable from head
updateTail(); // Ensure x is not reachable from tail
// Finally, actually gc-unlink
x.lazySetPrev(isFirst ? prevTerminator() : x);
x.lazySetNext(isLast ? nextTerminator() : x);
}
}
}
/**
* Unlinks non-null first node.
*/
private void unlinkFirst(Node<E> first, Node<E> next) {
// assert first != null;
// assert next != null;
// assert first.item == null;
for (Node<E> o = null, p = next, q;;) {
if (p.item != null || (q = p.next) == null) {
if (o != null && p.prev != p && first.casNext(next, p)) {
skipDeletedPredecessors(p);
if (first.prev == null &&
(p.next == null || p.item != null) &&
p.prev == first) {
updateHead(); // Ensure o is not reachable from head
updateTail(); // Ensure o is not reachable from tail
// Finally, actually gc-unlink
o.lazySetNext(o);
o.lazySetPrev(prevTerminator());
}
}
return;
}
else if (p == q)
return;
else {
o = p;
p = q;
}
}
}
/**
* Unlinks non-null last node.
*/
private void unlinkLast(Node<E> last, Node<E> prev) {
// assert last != null;
// assert prev != null;
// assert last.item == null;
for (Node<E> o = null, p = prev, q;;) {
if (p.item != null || (q = p.prev) == null) {
if (o != null && p.next != p && last.casPrev(prev, p)) {
skipDeletedSuccessors(p);
if (last.next == null &&
(p.prev == null || p.item != null) &&
p.next == last) {
updateHead(); // Ensure o is not reachable from head
updateTail(); // Ensure o is not reachable from tail
// Finally, actually gc-unlink
o.lazySetPrev(o);
o.lazySetNext(nextTerminator());
}
}
return;
}
else if (p == q)
return;
else {
o = p;
p = q;
}
}
}
/**
* Guarantees that any node which was unlinked before a call to
* this method will be unreachable from head after it returns.
* Does not guarantee to eliminate slack, only that head will
* point to a node that was active while this method was running.
*/
private final void updateHead() {
// Either head already points to an active node, or we keep
// trying to cas it to the first node until it does.
Node<E> h, p, q;
restartFromHead:
while ((h = head).item == null && (p = h.prev) != null) {
for (;;) {
if ((q = p.prev) == null ||
(q = (p = q).prev) == null) {
// It is possible that p is PREV_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
if (casHead(h, p))
return;
else
continue restartFromHead;
}
else if (h != head)
continue restartFromHead;
else
p = q;
}
}
}
/**
* Guarantees that any node which was unlinked before a call to
* this method will be unreachable from tail after it returns.
* Does not guarantee to eliminate slack, only that tail will
* point to a node that was active while this method was running.
*/
private final void updateTail() {
// Either tail already points to an active node, or we keep
// trying to cas it to the last node until it does.
Node<E> t, p, q;
restartFromTail:
while ((t = tail).item == null && (p = t.next) != null) {
for (;;) {
if ((q = p.next) == null ||
(q = (p = q).next) == null) {
// It is possible that p is NEXT_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
if (casTail(t, p))
return;
else
continue restartFromTail;
}
else if (t != tail)
continue restartFromTail;
else
p = q;
}
}
}
private void skipDeletedPredecessors(Node<E> x) {
whileActive:
do {
Node<E> prev = x.prev;
// assert prev != null;
// assert x != NEXT_TERMINATOR;
// assert x != PREV_TERMINATOR;
Node<E> p = prev;
findActive:
for (;;) {
if (p.item != null)
break findActive;
Node<E> q = p.prev;
if (q == null) {
if (p.next == p)
continue whileActive;
break findActive;
}
else if (p == q)
continue whileActive;
else
p = q;
}
// found active CAS target
if (prev == p || x.casPrev(prev, p))
return;
} while (x.item != null || x.next == null);
}
private void skipDeletedSuccessors(Node<E> x) {
whileActive:
do {
Node<E> next = x.next;
// assert next != null;
// assert x != NEXT_TERMINATOR;
// assert x != PREV_TERMINATOR;
Node<E> p = next;
findActive:
for (;;) {
if (p.item != null)
break findActive;
Node<E> q = p.next;
if (q == null) {
if (p.prev == p)
continue whileActive;
break findActive;
}
else if (p == q)
continue whileActive;
else
p = q;
}
// found active CAS target
if (next == p || x.casNext(next, p))
return;
} while (x.item != null || x.prev == null);
}
/**
* Returns the successor of p, or the first node if p.next has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node<E> succ(Node<E> p) {
// TODO: should we skip deleted nodes here?
Node<E> q = p.next;
return (p == q) ? first() : q;
}
/**
* Returns the predecessor of p, or the last node if p.prev has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node<E> pred(Node<E> p) {
Node<E> q = p.prev;
return (p == q) ? last() : q;
}
/**
* Returns the first node, the unique node p for which:
* p.prev == null && p.next != p
* The returned node may or may not be logically deleted.
* Guarantees that head is set to the returned node.
*/
Node<E> first() {
restartFromHead:
for (;;)
for (Node<E> h = head, p = h, q;;) {
if ((q = p.prev) != null &&
(q = (p = q).prev) != null)
// Check for head updates every other hop.
// If p == q, we are sure to follow head instead.
p = (h != (h = head)) ? h : q;
else if (p == h
// It is possible that p is PREV_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
|| casHead(h, p))
return p;
else
continue restartFromHead;
}
}
/**
* Returns the last node, the unique node p for which:
* p.next == null && p.prev != p
* The returned node may or may not be logically deleted.
* Guarantees that tail is set to the returned node.
*/
Node<E> last() {
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p == t
// It is possible that p is NEXT_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
|| casTail(t, p))
return p;
else
continue restartFromTail;
}
}
// Minor convenience utilities
/**
* Throws NullPointerException if argument is null.
*
* @param v the element
*/
private static void checkNotNull(Object v) {
if (v == null)
throw new NullPointerException();
}
/**
* Returns element unless it is null, in which case throws
* NoSuchElementException.
*
* @param v the element
* @return the element
*/
private E screenNullResult(E v) {
if (v == null)
throw new NoSuchElementException();
return v;
}
/**
* Creates an array list and fills it with elements of this list.
* Used by toArray.
*
* @return the array list
*/
private ArrayList<E> toArrayList() {
ArrayList<E> list = new ArrayList<E>();
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
list.add(item);
}
return list;
}
/**
* Constructs an empty deque.
*/
public ConcurrentLinkedDeque() {
head = tail = new Node<E>(null);
}
/**
* Constructs a deque initially containing the elements of
* the given collection, added in traversal order of the
* collection's iterator.
*
* @param c the collection of elements to initially contain
* @throws NullPointerException if the specified collection or any
* of its elements are null
*/
public ConcurrentLinkedDeque(Collection<? extends E> c) {
// Copy c into a private chain of Nodes
Node<E> h = null, t = null;
for (E e : c) {
checkNotNull(e);
Node<E> newNode = new Node<E>(e);
if (h == null)
h = t = newNode;
else {
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
initHeadTail(h, t);
}
/**
* Initializes head and tail, ensuring invariants hold.
*/
private void initHeadTail(Node<E> h, Node<E> t) {
if (h == t) {
if (h == null)
h = t = new Node<E>(null);
else {
// Avoid edge case of a single Node with non-null item.
Node<E> newNode = new Node<E>(null);
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
head = h;
tail = t;
}
/**
* Inserts the specified element at the front of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException}.
*
* @throws NullPointerException if the specified element is null
*/
public void addFirst(E e) {
linkFirst(e);
}
/**
* Inserts the specified element at the end of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException}.
*
* <p>This method is equivalent to {@link #add}.
*
* @throws NullPointerException if the specified element is null
*/
public void addLast(E e) {
linkLast(e);
}
/**
* Inserts the specified element at the front of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Deque#offerFirst})
* @throws NullPointerException if the specified element is null
*/
public boolean offerFirst(E e) {
linkFirst(e);
return true;
}
/**
* Inserts the specified element at the end of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* <p>This method is equivalent to {@link #add}.
*
* @return {@code true} (as specified by {@link Deque#offerLast})
* @throws NullPointerException if the specified element is null
*/
public boolean offerLast(E e) {
linkLast(e);
return true;
}
public E peekFirst() {
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
return item;
}
return null;
}
public E peekLast() {
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null)
return item;
}
return null;
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E getFirst() {
return screenNullResult(peekFirst());
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E getLast() {
return screenNullResult(peekLast());
}
public E pollFirst() {
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && p.casItem(item, null)) {
unlink(p);
return item;
}
}
return null;
}
public E pollLast() {
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null && p.casItem(item, null)) {
unlink(p);
return item;
}
}
return null;
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E removeFirst() {
return screenNullResult(pollFirst());
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E removeLast() {
return screenNullResult(pollLast());
}
// *** Queue and stack methods ***
/**
* Inserts the specified element at the tail of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Queue#offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e) {
return offerLast(e);
}
/**
* Inserts the specified element at the tail of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException} or return {@code false}.
*
* @return {@code true} (as specified by {@link Collection#add})
* @throws NullPointerException if the specified element is null
*/
public boolean add(E e) {
return offerLast(e);
}
public E poll() { return pollFirst(); }
public E remove() { return removeFirst(); }
public E peek() { return peekFirst(); }
public E element() { return getFirst(); }
public void push(E e) { addFirst(e); }
public E pop() { return removeFirst(); }
/**
* Removes the first element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean removeFirstOccurrence(Object o) {
checkNotNull(o);
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && o.equals(item) && p.casItem(item, null)) {
unlink(p);
return true;
}
}
return false;
}
/**
* Removes the last element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean removeLastOccurrence(Object o) {
checkNotNull(o);
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null && o.equals(item) && p.casItem(item, null)) {
unlink(p);
return true;
}
}
return false;
}
/**
* Returns {@code true} if this deque contains at least one
* element {@code e} such that {@code o.equals(e)}.
*
* @param o element whose presence in this deque is to be tested
* @return {@code true} if this deque contains the specified element
*/
public boolean contains(Object o) {
if (o == null) return false;
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && o.equals(item))
return true;
}
return false;
}
/**
* Returns {@code true} if this collection contains no elements.
*
* @return {@code true} if this collection contains no elements
*/
public boolean isEmpty() {
return peekFirst() == null;
}
/**
* Returns the number of elements in this deque. If this deque
* contains more than {@code Integer.MAX_VALUE} elements, it
* returns {@code Integer.MAX_VALUE}.
*
* <p>Beware that, unlike in most collections, this method is
* <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these deques, determining the current
* number of elements requires traversing them all to count them.
* Additionally, it is possible for the size to change during
* execution of this method, in which case the returned result
* will be inaccurate. Thus, this method is typically not very
* useful in concurrent applications.
*
* @return the number of elements in this deque
*/
public int size() {
int count = 0;
for (Node<E> p = first(); p != null; p = succ(p))
if (p.item != null)
// Collection.size() spec says to max out
if (++count == Integer.MAX_VALUE)
break;
return count;
}
/**
* Removes the first element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean remove(Object o) {
return removeFirstOccurrence(o);
}
/**
* Appends all of the elements in the specified collection to the end of
* this deque, in the order that they are returned by the specified
* collection's iterator. Attempts to {@code addAll} of a deque to
* itself result in {@code IllegalArgumentException}.
*
* @param c the elements to be inserted into this deque
* @return {@code true} if this deque changed as a result of the call
* @throws NullPointerException if the specified collection or any
* of its elements are null
* @throws IllegalArgumentException if the collection is this deque
*/
public boolean addAll(Collection<? extends E> c) {
if (c == this)
// As historically specified in AbstractQueue#addAll
throw new IllegalArgumentException();
// Copy c into a private chain of Nodes
Node<E> beginningOfTheEnd = null, last = null;
for (E e : c) {
checkNotNull(e);
Node<E> newNode = new Node<E>(e);
if (beginningOfTheEnd == null)
beginningOfTheEnd = last = newNode;
else {
last.lazySetNext(newNode);
newNode.lazySetPrev(last);
last = newNode;
}
}
if (beginningOfTheEnd == null)
return false;
// Atomically append the chain at the tail of this collection
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p.prev == p) // NEXT_TERMINATOR
continue restartFromTail;
else {
// p is last node
beginningOfTheEnd.lazySetPrev(p); // CAS piggyback
if (p.casNext(null, beginningOfTheEnd)) {
// Successful CAS is the linearization point
// for all elements to be added to this deque.
if (!casTail(t, last)) {
// Try a little harder to update tail,
// since we may be adding many elements.
t = tail;
if (last.next == null)
casTail(t, last);
}
return true;
}
// Lost CAS race to another thread; re-read next
}
}
}
/**
* Removes all of the elements from this deque.
*/
public void clear() {
while (pollFirst() != null)
;
}
/**
* Returns an array containing all of the elements in this deque, in
* proper sequence (from first to last element).
*
* <p>The returned array will be "safe" in that no references to it are
* maintained by this deque. (In other words, this method must allocate
* a new array). The caller is thus free to modify the returned array.
*
* <p>This method acts as bridge between array-based and collection-based
* APIs.
*
* @return an array containing all of the elements in this deque
*/
public Object[] toArray() {
return toArrayList().toArray();
}
/**
* Returns an array containing all of the elements in this deque,
* in proper sequence (from first to last element); the runtime
* type of the returned array is that of the specified array. If
* the deque fits in the specified array, it is returned therein.
* Otherwise, a new array is allocated with the runtime type of
* the specified array and the size of this deque.
*
* <p>If this deque fits in the specified array with room to spare
* (i.e., the array has more elements than this deque), the element in
* the array immediately following the end of the deque is set to
* {@code null}.
*
* <p>Like the {@link #toArray()} method, this method acts as
* bridge between array-based and collection-based APIs. Further,
* this method allows precise control over the runtime type of the
* output array, and may, under certain circumstances, be used to
* save allocation costs.
*
* <p>Suppose {@code x} is a deque known to contain only strings.
* The following code can be used to dump the deque into a newly
* allocated array of {@code String}:
*
* <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
*
* Note that {@code toArray(new Object[0])} is identical in function to
* {@code toArray()}.
*
* @param a the array into which the elements of the deque are to
* be stored, if it is big enough; otherwise, a new array of the
* same runtime type is allocated for this purpose
* @return an array containing all of the elements in this deque
* @throws ArrayStoreException if the runtime type of the specified array
* is not a supertype of the runtime type of every element in
* this deque
* @throws NullPointerException if the specified array is null
*/
public <T> T[] toArray(T[] a) {
return toArrayList().toArray(a);
}
/**
* Returns an iterator over the elements in this deque in proper sequence.
* The elements will be returned in order from first (head) to last (tail).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this deque in proper sequence
*/
public Iterator<E> iterator() {
return new Itr();
}
/**
* Returns an iterator over the elements in this deque in reverse
* sequential order. The elements will be returned in order from
* last (tail) to first (head).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this deque in reverse order
*/
public Iterator<E> descendingIterator() {
return new DescendingItr();
}
private abstract class AbstractItr implements Iterator<E> {
/**
* Next node to return item for.
*/
private Node<E> nextNode;
/**
* nextItem holds on to item fields because once we claim
* that an element exists in hasNext(), we must return it in
* the following next() call even if it was in the process of
* being removed when hasNext() was called.
*/
private E nextItem;
/**
* Node returned by most recent call to next. Needed by remove.
* Reset to null if this element is deleted by a call to remove.
*/
private Node<E> lastRet;
abstract Node<E> startNode();
abstract Node<E> nextNode(Node<E> p);
AbstractItr() {
advance();
}
/**
* Sets nextNode and nextItem to next valid node, or to null
* if no such.
*/
private void advance() {
lastRet = nextNode;
Node<E> p = (nextNode == null) ? startNode() : nextNode(nextNode);
for (;; p = nextNode(p)) {
if (p == null) {
// p might be active end or TERMINATOR node; both are OK
nextNode = null;
nextItem = null;
break;
}
E item = p.item;
if (item != null) {
nextNode = p;
nextItem = item;
break;
}
}
}
public boolean hasNext() {
return nextItem != null;
}
public E next() {
E item = nextItem;
if (item == null) throw new NoSuchElementException();
advance();
return item;
}
public void remove() {
Node<E> l = lastRet;
if (l == null) throw new IllegalStateException();
l.item = null;
unlink(l);
lastRet = null;
}
}
/** Forward iterator */
private class Itr extends AbstractItr {
Node<E> startNode() { return first(); }
Node<E> nextNode(Node<E> p) { return succ(p); }
}
/** Descending iterator */
private class DescendingItr extends AbstractItr {
Node<E> startNode() { return last(); }
Node<E> nextNode(Node<E> p) { return pred(p); }
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @serialData All of the elements (each an {@code E}) in
* the proper order, followed by a null
* @param s the stream
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
// Write out any hidden stuff
s.defaultWriteObject();
// Write out all elements in the proper order.
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
s.writeObject(item);
}
// Use trailing null as sentinel
s.writeObject(null);
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
// Read in elements until trailing null sentinel found
Node<E> h = null, t = null;
Object item;
while ((item = s.readObject()) != null) {
@SuppressWarnings("unchecked")
Node<E> newNode = new Node<E>((E) item);
if (h == null)
h = t = newNode;
else {
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
initHeadTail(h, t);
}
private boolean casHead(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
}
private boolean casTail(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long headOffset;
private static final long tailOffset;
static {
PREV_TERMINATOR = new Node<Object>();
PREV_TERMINATOR.next = PREV_TERMINATOR;
NEXT_TERMINATOR = new Node<Object>();
NEXT_TERMINATOR.prev = NEXT_TERMINATOR;
try {
UNSAFE = getUnsafe();
Class<?> k = ConcurrentLinkedDeque.class;
headOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("head"));
tailOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("tail"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| 0true
|
src_main_java_jsr166y_ConcurrentLinkedDeque.java
|
4,916 |
public class RestGetAction extends BaseRestHandler {
@Inject
public RestGetAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(GET, "/{index}/{type}/{id}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel) {
final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id"));
getRequest.listenerThreaded(false);
getRequest.operationThreaded(true);
getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh()));
getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing
getRequest.parent(request.param("parent"));
getRequest.preference(request.param("preference"));
getRequest.realtime(request.paramAsBoolean("realtime", null));
String sField = request.param("fields");
if (sField != null) {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
getRequest.fields(sFields);
}
}
getRequest.version(RestActions.parseVersion(request));
getRequest.versionType(VersionType.fromString(request.param("version_type"), getRequest.versionType()));
getRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request));
client.get(getRequest, new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse response) {
try {
XContentBuilder builder = restContentBuilder(request);
response.toXContent(builder, request);
if (!response.isExists()) {
channel.sendResponse(new XContentRestResponse(request, NOT_FOUND, builder));
} else {
channel.sendResponse(new XContentRestResponse(request, OK, builder));
}
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_get_RestGetAction.java
|
2,590 |
private static class NodeComparator implements Comparator<DiscoveryNode> {
@Override
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
return o1.id().compareTo(o2.id());
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_elect_ElectMasterService.java
|
37 |
@Component("blFulfillmentTypeOptionsExtensionListener")
public class FulfillmentTypeEnumOptionsExtensionListener extends AbstractRuleBuilderEnumOptionsExtensionListener {
@Override
protected Map<String, Class<? extends BroadleafEnumerationType>> getValuesToGenerate() {
Map<String, Class<? extends BroadleafEnumerationType>> map =
new HashMap<String, Class<? extends BroadleafEnumerationType>>();
map.put("blcOptions_FulfillmentType", FulfillmentType.class);
return map;
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_options_FulfillmentTypeEnumOptionsExtensionListener.java
|
299 |
public class OTraverseMultiValueDepthFirstProcess extends OTraverseAbstractProcess<Iterator<Object>> {
protected Object value;
protected int index = -1;
public OTraverseMultiValueDepthFirstProcess(final OTraverse iCommand, final Iterator<Object> iTarget) {
super(iCommand, iTarget);
}
public OIdentifiable process() {
while (target.hasNext()) {
value = target.next();
index++;
if (value instanceof OIdentifiable) {
final ORecord<?> rec = ((OIdentifiable) value).getRecord();
if (rec instanceof ODocument) {
final OTraverseRecordProcess subProcess = new OTraverseRecordProcess(command, (ODocument) rec);
final OIdentifiable subValue = subProcess.process();
if (subValue != null)
return subValue;
}
}
}
return drop();
}
@Override
public String getStatus() {
return toString();
}
@Override
public String toString() {
return "[idx:" + index + "]";
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_traverse_OTraverseMultiValueDepthFirstProcess.java
|
238 |
public interface OCache {
/**
* All operations running at cache initialization stage
*/
void startup();
/**
* All operations running at cache destruction stage
*/
void shutdown();
/**
* Tell whether cache is enabled
*
* @return {@code true} if cache enabled at call time, otherwise - {@code false}
*/
boolean isEnabled();
/**
* Enable cache
*
* @return {@code true} - if enabled, {@code false} - otherwise (already enabled)
*/
boolean enable();
/**
* Disable cache. None of record management methods will cause effect on cache in disabled state.
* Only cache info methods available at that state.
*
* @return {@code true} - if disabled, {@code false} - otherwise (already disabled)
*/
boolean disable();
/**
* Look up for record in cache by it's identifier
*
* @param id unique identifier of record
* @return record stored in cache if any, otherwise - {@code null}
*/
ORecordInternal<?> get(ORID id);
/**
* Push record to cache. Identifier of record used as access key
*
* @param record record that should be cached
* @return previous version of record
*/
ORecordInternal<?> put(ORecordInternal<?> record);
/**
* Remove record with specified identifier
*
* @param id unique identifier of record
* @return record stored in cache if any, otherwise - {@code null}
*/
ORecordInternal<?> remove(ORID id);
/**
* Remove all records from cache
*/
void clear();
/**
* Total number of stored records
*
* @return non-negative number
*/
int size();
/**
* Maximum number of items cache should keep
*
* @return non-negative number
*/
int limit();
/**
* Keys of all stored in cache records
*
* @return keys of records
*/
Collection<ORID> keys();
/**
* Lock the item with given id, even if item does not exist all read/update
* operations for given item should be locked.
*
* @param id Item to lock.
*/
void lock(ORID id);
/**
* Unlock item.
*
* @param id item to unlock;
*/
void unlock(ORID id);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_cache_OCache.java
|
92 |
INTERSECT {
@Override
public boolean evaluate(Object value, Object condition) {
Preconditions.checkArgument(condition instanceof Geoshape);
if (value == null) return false;
Preconditions.checkArgument(value instanceof Geoshape);
return ((Geoshape) value).intersect((Geoshape) condition);
}
@Override
public String toString() {
return "intersect";
}
@Override
public boolean hasNegation() {
return true;
}
@Override
public TitanPredicate negate() {
return DISJOINT;
}
},
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Geo.java
|
1,356 |
public class JDTAnnotation implements AnnotationMirror {
private Map<String, Object> values;
public JDTAnnotation(AnnotationBinding annotation) {
values = new HashMap<String, Object>();
ElementValuePair[] annotationVaues = annotation.getElementValuePairs();
for (ElementValuePair annotationValue : annotationVaues) {
String name = new String(annotationValue.getName());
MethodBinding elementMethod = annotationValue.getMethodBinding();
Object value = null;
if (elementMethod != null) {
value = convertValue(annotationValue.getMethodBinding().returnType, annotationValue.getValue());
} else {
value = JDTType.UNKNOWN_TYPE;
}
values.put(name, value);
}
}
@Override
public Object getValue(String fieldName) {
return values.get(fieldName);
}
private Object convertValue(TypeBinding returnType, Object value) {
if(value.getClass().isArray()){
Object[] array = (Object[])value;
List<Object> values = new ArrayList<Object>(array.length);
TypeBinding elementType = ((ArrayBinding)returnType).elementsType();
for(Object val : array)
values.add(convertValue(elementType, val));
return values;
}
if(returnType.isArrayType()){
// got a single value but expecting array
List<Object> values = new ArrayList<Object>(1);
TypeBinding elementType = ((ArrayBinding)returnType).elementsType();
values.add(convertValue(elementType, value));
return values;
}
if(value instanceof AnnotationBinding){
return new JDTAnnotation((AnnotationBinding) value);
}
if(value instanceof TypeBinding){
return new JDTType((TypeBinding) value);
}
if(value instanceof FieldBinding){
return new String(((FieldBinding) value).name);
}
if(value instanceof Constant){
Constant constant = (Constant) value;
return JDTUtils.fromConstant(constant);
}
return value;
}
@Override
public Object getValue() {
return getValue("value");
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_model_mirror_JDTAnnotation.java
|
653 |
public final class OMVRBTreeIndexEngine<V> extends OSharedResourceAdaptiveExternal implements OIndexEngine<V> {
private int maxUpdatesBeforeSave;
private OMemoryWatchDog.Listener watchDog;
private OMVRBTreeDatabaseLazySave<Object, V> map;
public OMVRBTreeIndexEngine() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean(), OGlobalConfiguration.MVRBTREE_TIMEOUT
.getValueAsInteger(), true);
}
@Override
public void init() {
acquireExclusiveLock();
try {
watchDog = new OMemoryWatchDog.Listener() {
public void memoryUsageLow(final long iFreeMemory, final long iFreeMemoryPercentage) {
map.setOptimization(iFreeMemoryPercentage < 10 ? 2 : 1);
}
};
} finally {
releaseExclusiveLock();
}
}
@Override
public void flush() {
acquireExclusiveLock();
try {
map.lazySave();
} finally {
releaseExclusiveLock();
}
}
@Override
public void create(String indexName, OIndexDefinition indexDefinition, String clusterIndexName,
OStreamSerializer valueSerializer, boolean isAutomatic) {
acquireExclusiveLock();
try {
maxUpdatesBeforeSave = lazyUpdates(isAutomatic);
if (indexDefinition != null) {
if (indexDefinition instanceof ORuntimeKeyIndexDefinition) {
map = new OMVRBTreeDatabaseLazySave<Object, V>(clusterIndexName,
((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer(), valueSerializer, 1, maxUpdatesBeforeSave);
} else {
final OBinarySerializer<?> keySerializer;
if (indexDefinition.getTypes().length > 1) {
keySerializer = OCompositeKeySerializer.INSTANCE;
} else {
keySerializer = OBinarySerializerFactory.INSTANCE.getObjectSerializer(indexDefinition.getTypes()[0]);
}
map = new OMVRBTreeDatabaseLazySave<Object, V>(clusterIndexName, (OBinarySerializer<Object>) keySerializer,
valueSerializer, indexDefinition.getTypes().length, maxUpdatesBeforeSave);
}
} else
map = new OMVRBTreeDatabaseLazySave<Object, V>(clusterIndexName, new OSimpleKeySerializer(), valueSerializer, 1,
maxUpdatesBeforeSave);
installHooks(indexName);
} finally {
releaseExclusiveLock();
}
}
private void installHooks(String indexName) {
final OProfilerMBean profiler = Orient.instance().getProfiler();
final String profilerPrefix = profiler.getDatabaseMetric(getDatabase().getName(), "index." + indexName + '.');
final String profilerMetadataPrefix = "db.*.index.*.";
profiler.registerHookValue(profilerPrefix + "items", "Index size", OProfiler.METRIC_TYPE.SIZE,
new OProfiler.OProfilerHookValue() {
public Object getValue() {
acquireSharedLock();
try {
return map != null ? map.size() : "-";
} finally {
releaseSharedLock();
}
}
}, profilerMetadataPrefix + "items");
profiler.registerHookValue(profilerPrefix + "entryPointSize", "Number of entrypoints in an index", OProfiler.METRIC_TYPE.SIZE,
new OProfiler.OProfilerHookValue() {
public Object getValue() {
return map != null ? map.getEntryPointSize() : "-";
}
}, profilerMetadataPrefix + "items");
profiler.registerHookValue(profilerPrefix + "maxUpdateBeforeSave", "Maximum number of updates in a index before force saving",
OProfiler.METRIC_TYPE.SIZE, new OProfiler.OProfilerHookValue() {
public Object getValue() {
return map != null ? map.getMaxUpdatesBeforeSave() : "-";
}
}, profilerMetadataPrefix + "maxUpdateBeforeSave");
Orient.instance().getMemoryWatchDog().addListener(watchDog);
}
@Override
public void delete() {
acquireExclusiveLock();
try {
if (map != null)
map.delete();
} finally {
releaseExclusiveLock();
}
}
@Override
public void deleteWithoutLoad(String indexName) {
throw new UnsupportedOperationException("deleteWithoutLoad");
}
public void load(ORID indexRid, String indexName, OIndexDefinition indexDefinition, boolean isAutomatic) {
acquireExclusiveLock();
try {
maxUpdatesBeforeSave = lazyUpdates(isAutomatic);
map = new OMVRBTreeDatabaseLazySave<Object, V>(getDatabase(), indexRid, maxUpdatesBeforeSave);
map.load();
installHooks(indexName);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean contains(Object key) {
acquireExclusiveLock();
try {
return map.containsKey(key);
} finally {
releaseExclusiveLock();
}
}
@Override
public ORID getIdentity() {
acquireSharedLock();
try {
return ((OMVRBTreeProviderAbstract<Object, ?>) map.getProvider()).getRecord().getIdentity();
} finally {
releaseSharedLock();
}
}
@Override
public void clear() {
acquireExclusiveLock();
try {
map.clear();
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean remove(Object key) {
acquireExclusiveLock();
try {
return map.remove(key) != null;
} finally {
releaseExclusiveLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> iterator() {
acquireExclusiveLock();
try {
return map.entrySet().iterator();
} finally {
releaseExclusiveLock();
}
}
@Override
public Iterator<Map.Entry<Object, V>> inverseIterator() {
acquireExclusiveLock();
try {
return ((OMVRBTree.EntrySet) map.entrySet()).inverseIterator();
} finally {
releaseExclusiveLock();
}
}
@Override
public Iterable<Object> keys() {
acquireExclusiveLock();
try {
return map.keySet();
} finally {
releaseExclusiveLock();
}
}
@Override
public void unload() {
acquireExclusiveLock();
try {
map.unload();
} finally {
releaseExclusiveLock();
}
}
@Override
public void startTransaction() {
acquireExclusiveLock();
try {
map.setRunningTransaction(true);
} finally {
releaseExclusiveLock();
}
}
@Override
public void stopTransaction() {
acquireExclusiveLock();
try {
map.setRunningTransaction(false);
} finally {
releaseExclusiveLock();
}
}
@Override
public void afterTxRollback() {
acquireExclusiveLock();
try {
map.unload();
} finally {
releaseExclusiveLock();
}
}
@Override
public void afterTxCommit() {
acquireExclusiveLock();
try {
map.onAfterTxCommit();
} finally {
releaseExclusiveLock();
}
}
@Override
public void closeDb() {
acquireExclusiveLock();
try {
map.commitChanges(true);
// TODO: GO IN DEEP WHY THE UNLOAD CAUSE LOOSE OF INDEX ENTRIES!
// map.unload();
} finally {
releaseExclusiveLock();
}
}
@Override
public void close() {
}
@Override
public void beforeTxBegin() {
acquireExclusiveLock();
try {
map.commitChanges(true);
} finally {
releaseExclusiveLock();
}
}
@Override
public V get(Object key) {
acquireExclusiveLock();
try {
return map.get(key);
} finally {
releaseExclusiveLock();
}
}
@Override
public void put(Object key, V value) {
acquireExclusiveLock();
try {
map.put(key, value);
} finally {
releaseExclusiveLock();
}
}
@Override
public void getValuesBetween(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive,
ValuesTransformer<V> transformer, ValuesResultListener valuesResultListener) {
acquireExclusiveLock();
try {
final OMVRBTreeEntry<Object, V> firstEntry;
if (fromInclusive)
firstEntry = map.getCeilingEntry(rangeFrom, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
else
firstEntry = map.getHigherEntry(rangeFrom);
if (firstEntry == null)
return;
final int firstEntryIndex = map.getPageIndex();
final OMVRBTreeEntry<Object, V> lastEntry;
if (toInclusive)
lastEntry = map.getHigherEntry(rangeTo);
else
lastEntry = map.getCeilingEntry(rangeTo, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
final int lastEntryIndex;
if (lastEntry != null)
lastEntryIndex = map.getPageIndex();
else
lastEntryIndex = -1;
OMVRBTreeEntry<Object, V> entry = firstEntry;
map.setPageIndex(firstEntryIndex);
while (entry != null && !(entry == lastEntry && map.getPageIndex() == lastEntryIndex)) {
final V value = entry.getValue();
boolean cont = addToResult(transformer, valuesResultListener, value);
if (!cont)
return;
entry = OMVRBTree.next(entry);
}
} finally {
releaseExclusiveLock();
}
}
@Override
public void getValuesMajor(Object fromKey, boolean isInclusive, ValuesTransformer<V> transformer,
ValuesResultListener valuesResultListener) {
acquireExclusiveLock();
try {
final OMVRBTreeEntry<Object, V> firstEntry;
if (isInclusive)
firstEntry = map.getCeilingEntry(fromKey, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
else
firstEntry = map.getHigherEntry(fromKey);
if (firstEntry == null)
return;
OMVRBTreeEntry<Object, V> entry = firstEntry;
while (entry != null) {
final V value = entry.getValue();
boolean cont = addToResult(transformer, valuesResultListener, value);
if (!cont)
return;
entry = OMVRBTree.next(entry);
}
} finally {
releaseExclusiveLock();
}
}
@Override
public void getValuesMinor(Object toKey, boolean isInclusive, ValuesTransformer<V> transformer,
ValuesResultListener valuesResultListener) {
acquireExclusiveLock();
try {
final OMVRBTreeEntry<Object, V> lastEntry;
if (isInclusive)
lastEntry = map.getFloorEntry(toKey, OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
else
lastEntry = map.getLowerEntry(toKey);
if (lastEntry == null)
return;
OMVRBTreeEntry<Object, V> entry = lastEntry;
while (entry != null) {
V value = entry.getValue();
boolean cont = addToResult(transformer, valuesResultListener, value);
if (!cont)
return;
entry = OMVRBTree.previous(entry);
}
} finally {
releaseExclusiveLock();
}
}
@Override
public void getEntriesMajor(Object fromKey, boolean isInclusive, ValuesTransformer<V> transformer,
EntriesResultListener entriesResultListener) {
acquireExclusiveLock();
try {
final OMVRBTreeEntry<Object, V> firstEntry;
if (isInclusive)
firstEntry = map.getCeilingEntry(fromKey, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
else
firstEntry = map.getHigherEntry(fromKey);
if (firstEntry == null)
return;
OMVRBTreeEntry<Object, V> entry = firstEntry;
while (entry != null) {
final Object key = entry.getKey();
final V value = entry.getValue();
boolean cont = addToEntriesResult(transformer, key, value, entriesResultListener);
if (!cont)
return;
entry = OMVRBTree.next(entry);
}
} finally {
releaseExclusiveLock();
}
}
@Override
public void getEntriesMinor(Object toKey, boolean isInclusive, ValuesTransformer<V> transformer,
EntriesResultListener entriesResultListener) {
acquireExclusiveLock();
try {
final OMVRBTreeEntry<Object, V> lastEntry;
if (isInclusive)
lastEntry = map.getFloorEntry(toKey, OMVRBTree.PartialSearchMode.HIGHEST_BOUNDARY);
else
lastEntry = map.getLowerEntry(toKey);
if (lastEntry == null)
return;
OMVRBTreeEntry<Object, V> entry = lastEntry;
while (entry != null) {
final Object key = entry.getKey();
final V value = entry.getValue();
boolean cont = addToEntriesResult(transformer, key, value, entriesResultListener);
if (!cont)
return;
entry = OMVRBTree.previous(entry);
}
} finally {
releaseExclusiveLock();
}
}
@Override
public void getEntriesBetween(Object iRangeFrom, Object iRangeTo, boolean iInclusive, ValuesTransformer<V> transformer,
EntriesResultListener entriesResultListener) {
acquireExclusiveLock();
try {
final OMVRBTreeEntry<Object, V> firstEntry;
if (iInclusive)
firstEntry = map.getCeilingEntry(iRangeFrom, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
else
firstEntry = map.getHigherEntry(iRangeFrom);
if (firstEntry == null)
return;
final int firstEntryIndex = map.getPageIndex();
final OMVRBTreeEntry<Object, V> lastEntry;
if (iInclusive)
lastEntry = map.getHigherEntry(iRangeTo);
else
lastEntry = map.getCeilingEntry(iRangeTo, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
final int lastEntryIndex;
if (lastEntry != null)
lastEntryIndex = map.getPageIndex();
else
lastEntryIndex = -1;
OMVRBTreeEntry<Object, V> entry = firstEntry;
map.setPageIndex(firstEntryIndex);
final Set<ODocument> result = new ODocumentFieldsHashSet();
while (entry != null && !(entry == lastEntry && map.getPageIndex() == lastEntryIndex)) {
final Object key = entry.getKey();
final V value = entry.getValue();
boolean cont = addToEntriesResult(transformer, key, value, entriesResultListener);
if (!cont)
return;
entry = OMVRBTree.next(entry);
}
} finally {
releaseExclusiveLock();
}
}
@Override
public long size(ValuesTransformer<V> valuesTransformer) {
acquireExclusiveLock();
try {
if (valuesTransformer == null)
return map.size();
OMVRBTreeEntry<Object, V> rootEntry = map.getRoot();
long size = 0;
OMVRBTreeEntry<Object, V> currentEntry = rootEntry;
map.setPageIndex(0);
while (currentEntry != null) {
size += valuesTransformer.transformFromValue(currentEntry.getValue()).size();
currentEntry = OMVRBTree.next(currentEntry);
}
map.setPageIndex(0);
currentEntry = OMVRBTree.previous(rootEntry);
while (currentEntry != null) {
size += valuesTransformer.transformFromValue(currentEntry.getValue()).size();
currentEntry = OMVRBTree.previous(currentEntry);
}
return size;
} finally {
releaseExclusiveLock();
}
}
@Override
public long count(Object rangeFrom, boolean fromInclusive, Object rangeTo, boolean toInclusive, int maxValuesToFetch,
ValuesTransformer<V> transformer) {
acquireExclusiveLock();
try {
final OMVRBTreeEntry<Object, V> firstEntry;
if (rangeFrom == null)
firstEntry = (OMVRBTreeEntry<Object, V>) map.firstEntry();
else if (fromInclusive)
firstEntry = map.getCeilingEntry(rangeFrom, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
else
firstEntry = map.getHigherEntry(rangeFrom);
if (firstEntry == null)
return 0;
long count = 0;
final int firstEntryIndex = map.getPageIndex();
final OMVRBTreeEntry<Object, V> lastEntry;
if (rangeFrom == null)
lastEntry = (OMVRBTreeEntry<Object, V>) map.lastEntry();
else if (toInclusive)
lastEntry = map.getHigherEntry(rangeTo);
else
lastEntry = map.getCeilingEntry(rangeTo, OMVRBTree.PartialSearchMode.LOWEST_BOUNDARY);
final int lastEntryIndex;
if (lastEntry != null)
lastEntryIndex = map.getPageIndex();
else
lastEntryIndex = -1;
OMVRBTreeEntry<Object, V> entry = firstEntry;
map.setPageIndex(firstEntryIndex);
while (entry != null && !(entry == lastEntry && map.getPageIndex() == lastEntryIndex)) {
final V value = entry.getValue();
if (transformer != null)
count += transformer.transformFromValue(value).size();
else
count++;
if (maxValuesToFetch > -1 && maxValuesToFetch == count)
return maxValuesToFetch;
entry = OMVRBTree.next(entry);
}
return count;
} finally {
releaseExclusiveLock();
}
}
@Override
public Iterator<V> valuesIterator() {
acquireExclusiveLock();
try {
return map.values().iterator();
} finally {
releaseExclusiveLock();
}
}
@Override
public Iterator<V> inverseValuesIterator() {
acquireExclusiveLock();
try {
return ((OMVRBTree.Values) map.values()).inverseIterator();
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean hasRangeQuerySupport() {
return true;
}
private boolean addToResult(ValuesTransformer<V> transformer, ValuesResultListener valuesResultListener, V value) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
boolean cont = valuesResultListener.addResult(transformedValue);
if (!cont)
return false;
}
return true;
} else
return valuesResultListener.addResult((OIdentifiable) value);
}
private boolean addToEntriesResult(ValuesTransformer<V> transformer, Object key, V value,
EntriesResultListener entriesResultListener) {
if (transformer != null) {
Collection<OIdentifiable> transformResult = transformer.transformFromValue(value);
for (OIdentifiable transformedValue : transformResult) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", transformedValue.getIdentity());
document.unsetDirty();
boolean cont = entriesResultListener.addResult(document);
if (!cont)
return false;
}
return true;
} else {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", ((OIdentifiable) value).getIdentity());
document.unsetDirty();
return entriesResultListener.addResult(document);
}
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
private int lazyUpdates(boolean isAutomatic) {
return isAutomatic ? OGlobalConfiguration.INDEX_AUTO_LAZY_UPDATES.getValueAsInteger()
: OGlobalConfiguration.INDEX_MANUAL_LAZY_UPDATES.getValueAsInteger();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_engine_OMVRBTreeIndexEngine.java
|
251 |
service.submitToMembers(callable, collection, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
if (value.equals(msg + AppendCallable.APPENDAGE)) {
responseLatch.countDown();
}
}
public void onComplete(Map<Member, Object> values) {
for (Member member : values.keySet()) {
Object value = values.get(member);
if (value.equals(msg + AppendCallable.APPENDAGE)) {
completeLatch.countDown();
}
}
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
4,922 |
public class RestMultiGetAction extends BaseRestHandler {
private final boolean allowExplicitIndex;
@Inject
public RestMultiGetAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(GET, "/_mget", this);
controller.registerHandler(POST, "/_mget", this);
controller.registerHandler(GET, "/{index}/_mget", this);
controller.registerHandler(POST, "/{index}/_mget", this);
controller.registerHandler(GET, "/{index}/{type}/_mget", this);
controller.registerHandler(POST, "/{index}/{type}/_mget", this);
this.allowExplicitIndex = settings.getAsBoolean("rest.action.multi.allow_explicit_index", true);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel) {
MultiGetRequest multiGetRequest = new MultiGetRequest();
multiGetRequest.listenerThreaded(false);
multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh()));
multiGetRequest.preference(request.param("preference"));
multiGetRequest.realtime(request.paramAsBoolean("realtime", null));
String[] sFields = null;
String sField = request.param("fields");
if (sField != null) {
sFields = Strings.splitStringByCommaToArray(sField);
}
FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request);
try {
multiGetRequest.add(request.param("index"), request.param("type"), sFields, defaultFetchSource, request.param("routing"), RestActions.getRestContent(request), allowExplicitIndex);
} catch (Exception e) {
try {
XContentBuilder builder = restContentBuilder(request);
channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
return;
}
client.multiGet(multiGetRequest, new ActionListener<MultiGetResponse>() {
@Override
public void onResponse(MultiGetResponse response) {
try {
XContentBuilder builder = restContentBuilder(request);
response.toXContent(builder, request);
channel.sendResponse(new XContentRestResponse(request, OK, builder));
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_get_RestMultiGetAction.java
|
487 |
public class ODatabaseExport extends ODatabaseImpExpAbstract {
protected OJSONWriter writer;
protected long recordExported;
public static final int VERSION = 6;
public ODatabaseExport(final ODatabaseRecord iDatabase, final String iFileName, final OCommandOutputListener iListener)
throws IOException {
super(iDatabase, iFileName, iListener);
if (fileName == null)
throw new IllegalArgumentException("file name missing");
if (!fileName.endsWith(".gz")) {
fileName += ".gz";
}
final File f = new File(fileName);
f.mkdirs();
if (f.exists())
f.delete();
writer = new OJSONWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(fileName), 16384))); // 16KB
writer.beginObject();
iDatabase.getLevel1Cache().setEnable(false);
iDatabase.getLevel2Cache().setEnable(false);
}
public ODatabaseExport(final ODatabaseRecord iDatabase, final OutputStream iOutputStream, final OCommandOutputListener iListener)
throws IOException {
super(iDatabase, "streaming", iListener);
writer = new OJSONWriter(new OutputStreamWriter(iOutputStream));
writer.beginObject();
iDatabase.getLevel1Cache().setEnable(false);
iDatabase.getLevel2Cache().setEnable(false);
}
@Override
public ODatabaseExport setOptions(final String s) {
super.setOptions(s);
return this;
}
public ODatabaseExport exportDatabase() {
try {
listener.onMessage("\nStarted export of database '" + database.getName() + "' to " + fileName + "...");
database.getLevel1Cache().setEnable(false);
database.getLevel2Cache().setEnable(false);
long time = System.currentTimeMillis();
if (includeInfo)
exportInfo();
if (includeClusterDefinitions)
exportClusters();
if (includeSchema)
exportSchema();
if (includeRecords)
exportRecords();
if (includeIndexDefinitions)
exportIndexDefinitions();
if (includeManualIndexes)
exportManualIndexes();
listener.onMessage("\n\nDatabase export completed in " + (System.currentTimeMillis() - time) + "ms");
writer.flush();
} catch (Exception e) {
e.printStackTrace();
throw new ODatabaseExportException("Error on exporting database '" + database.getName() + "' to: " + fileName, e);
} finally {
close();
}
return this;
}
public long exportRecords() throws IOException {
long totalFoundRecords = 0;
long totalExportedRecords = 0;
int level = 1;
listener.onMessage("\nExporting records...");
writer.beginCollection(level, true, "records");
int exportedClusters = 0;
int maxClusterId = getMaxClusterId();
for (int i = 0; exportedClusters <= maxClusterId; ++i) {
String clusterName = database.getClusterNameById(i);
exportedClusters++;
long clusterExportedRecordsTot = 0;
if (clusterName != null) {
// CHECK IF THE CLUSTER IS INCLUDED
if (includeClusters != null) {
if (!includeClusters.contains(clusterName.toUpperCase()))
continue;
} else if (excludeClusters != null) {
if (excludeClusters.contains(clusterName.toUpperCase()))
continue;
}
if (excludeClusters != null && excludeClusters.contains(clusterName.toUpperCase()))
continue;
clusterExportedRecordsTot = database.countClusterElements(clusterName);
} else if (includeClusters != null && !includeClusters.isEmpty())
continue;
listener.onMessage("\n- Cluster " + (clusterName != null ? "'" + clusterName + "'" : "NULL") + " (id=" + i + ")...");
long clusterExportedRecordsCurrent = 0;
if (clusterName != null) {
ORecordInternal<?> rec = null;
try {
for (ORecordIteratorCluster<ORecordInternal<?>> it = database.browseCluster(clusterName); it.hasNext();) {
rec = it.next();
if (rec instanceof ODocument) {
// CHECK IF THE CLASS OF THE DOCUMENT IS INCLUDED
ODocument doc = (ODocument) rec;
final String className = doc.getClassName() != null ? doc.getClassName().toUpperCase() : null;
if (includeClasses != null) {
if (!includeClasses.contains(className))
continue;
} else if (excludeClasses != null) {
if (excludeClasses.contains(className))
continue;
}
} else if (includeClasses != null && !includeClasses.isEmpty())
continue;
if (exportRecord(clusterExportedRecordsTot, clusterExportedRecordsCurrent, rec))
clusterExportedRecordsCurrent++;
}
} catch (IOException e) {
OLogManager.instance().error(this, "\nError on exporting record %s because of I/O problems", e, rec.getIdentity());
// RE-THROW THE EXCEPTION UP
throw e;
} catch (OIOException e) {
OLogManager.instance().error(this, "\nError on exporting record %s because of I/O problems", e, rec.getIdentity());
// RE-THROW THE EXCEPTION UP
throw e;
} catch (Throwable t) {
if (rec != null) {
final byte[] buffer = rec.toStream();
OLogManager
.instance()
.error(
this,
"\nError on exporting record %s. It seems corrupted; size: %d bytes, raw content (as string):\n==========\n%s\n==========",
t, rec.getIdentity(), buffer.length, new String(buffer));
}
}
}
listener.onMessage("OK (records=" + clusterExportedRecordsCurrent + "/" + clusterExportedRecordsTot + ")");
totalExportedRecords += clusterExportedRecordsCurrent;
totalFoundRecords += clusterExportedRecordsTot;
}
writer.endCollection(level, true);
listener.onMessage("\n\nDone. Exported " + totalExportedRecords + " of total " + totalFoundRecords + " records\n");
return totalFoundRecords;
}
public void close() {
database.declareIntent(null);
if (writer == null)
return;
try {
writer.endObject();
writer.close();
writer = null;
} catch (IOException e) {
}
}
private void exportClusters() throws IOException {
listener.onMessage("\nExporting clusters...");
writer.beginCollection(1, true, "clusters");
int exportedClusters = 0;
int maxClusterId = getMaxClusterId();
for (int clusterId = 0; clusterId <= maxClusterId; ++clusterId) {
final String clusterName = database.getClusterNameById(clusterId);
// exclude removed clusters
if (clusterName == null)
continue;
// CHECK IF THE CLUSTER IS INCLUDED
if (includeClusters != null) {
if (!includeClusters.contains(clusterName.toUpperCase()))
continue;
} else if (excludeClusters != null) {
if (excludeClusters.contains(clusterName.toUpperCase()))
continue;
}
writer.beginObject(2, true, null);
writer.writeAttribute(0, false, "name", clusterName);
writer.writeAttribute(0, false, "id", clusterId);
writer.writeAttribute(0, false, "type", database.getClusterType(clusterName));
exportedClusters++;
writer.endObject(2, false);
}
listener.onMessage("OK (" + exportedClusters + " clusters)");
writer.endCollection(1, true);
}
protected int getMaxClusterId() {
int totalCluster = -1;
for (String clusterName : database.getClusterNames()) {
if (database.getClusterIdByName(clusterName) > totalCluster)
totalCluster = database.getClusterIdByName(clusterName);
}
return totalCluster;
}
private void exportInfo() throws IOException {
listener.onMessage("\nExporting database info...");
writer.beginObject(1, true, "info");
writer.writeAttribute(2, true, "name", database.getName().replace('\\', '/'));
writer.writeAttribute(2, true, "default-cluster-id", database.getDefaultClusterId());
writer.writeAttribute(2, true, "exporter-version", VERSION);
writer.writeAttribute(2, true, "engine-version", OConstants.ORIENT_VERSION);
final String engineBuild = OConstants.getBuildNumber();
if (engineBuild != null)
writer.writeAttribute(2, true, "engine-build", engineBuild);
writer.writeAttribute(2, true, "storage-config-version", OStorageConfiguration.CURRENT_VERSION);
writer.writeAttribute(2, true, "schema-version", OSchemaShared.CURRENT_VERSION_NUMBER);
writer.writeAttribute(2, true, "mvrbtree-version", OMVRBTreeMapProvider.CURRENT_PROTOCOL_VERSION);
writer.writeAttribute(2, true, "schemaRecordId", database.getStorage().getConfiguration().schemaRecordId);
writer.writeAttribute(2, true, "indexMgrRecordId", database.getStorage().getConfiguration().indexMgrRecordId);
writer.endObject(1, true);
listener.onMessage("OK");
}
private void exportIndexDefinitions() throws IOException {
listener.onMessage("\nExporting index info...");
writer.beginCollection(1, true, "indexes");
final OIndexManagerProxy indexManager = database.getMetadata().getIndexManager();
indexManager.reload();
final Collection<? extends OIndex<?>> indexes = indexManager.getIndexes();
for (OIndex<?> index : indexes) {
if (index.getName().equals(ODatabaseImport.EXPORT_IMPORT_MAP_NAME))
continue;
listener.onMessage("\n- Index " + index.getName() + "...");
writer.beginObject(2, true, null);
writer.writeAttribute(3, true, "name", index.getName());
writer.writeAttribute(3, true, "type", index.getType());
if (!index.getClusters().isEmpty())
writer.writeAttribute(3, true, "clustersToIndex", index.getClusters());
if (index.getDefinition() != null) {
writer.beginObject(4, true, "definition");
writer.writeAttribute(5, true, "defClass", index.getDefinition().getClass().getName());
writer.writeAttribute(5, true, "stream", index.getDefinition().toStream());
writer.endObject(4, true);
}
writer.endObject(2, true);
listener.onMessage("OK");
}
writer.endCollection(1, true);
listener.onMessage("\nOK (" + indexes.size() + " indexes)");
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private void exportManualIndexes() throws IOException {
listener.onMessage("\nExporting manual indexes content...");
final OIndexManagerProxy indexManager = database.getMetadata().getIndexManager();
indexManager.reload();
final Collection<? extends OIndex<?>> indexes = indexManager.getIndexes();
ODocument exportEntry = new ODocument();
int manualIndexes = 0;
writer.beginCollection(1, true, "manualIndexes");
for (OIndex<?> index : indexes) {
if (index.getName().equals(ODatabaseImport.EXPORT_IMPORT_MAP_NAME))
continue;
if (!index.isAutomatic()) {
listener.onMessage("\n- Exporting index " + index.getName() + " ...");
writer.beginObject(2, true, null);
writer.writeAttribute(3, true, "name", index.getName());
List<ODocument> indexContent = database.query(new OSQLSynchQuery<ODocument>("select from index:" + index.getName()));
writer.beginCollection(3, true, "content");
int i = 0;
for (ODocument indexEntry : indexContent) {
if (i > 0)
writer.append(",");
final OIndexDefinition indexDefinition = index.getDefinition();
exportEntry.reset();
exportEntry.setLazyLoad(false);
if (indexDefinition instanceof ORuntimeKeyIndexDefinition
&& ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer() != null) {
final OBinarySerializer binarySerializer = ((ORuntimeKeyIndexDefinition) indexDefinition).getSerializer();
final int dataSize = binarySerializer.getObjectSize(indexEntry.field("key"));
final byte[] binaryContent = new byte[dataSize];
binarySerializer.serialize(indexEntry.field("key"), binaryContent, 0);
exportEntry.field("binary", true);
exportEntry.field("key", binaryContent);
} else {
exportEntry.field("binary", false);
exportEntry.field("key", indexEntry.field("key"));
}
exportEntry.field("rid", indexEntry.field("rid"));
i++;
writer.append(exportEntry.toJSON());
final long percent = indexContent.size() / 10;
if (percent > 0 && (i % percent) == 0)
listener.onMessage(".");
}
writer.endCollection(3, true);
writer.endObject(2, true);
listener.onMessage("OK (entries=" + index.getSize() + ")");
manualIndexes++;
}
}
writer.endCollection(1, true);
listener.onMessage("\nOK (" + manualIndexes + " manual indexes)");
}
private void exportSchema() throws IOException {
listener.onMessage("\nExporting schema...");
writer.beginObject(1, true, "schema");
OSchemaProxy s = (OSchemaProxy) database.getMetadata().getSchema();
writer.writeAttribute(2, true, "version", s.getVersion());
if (!s.getClasses().isEmpty()) {
writer.beginCollection(2, true, "classes");
final List<OClass> classes = new ArrayList<OClass>(s.getClasses());
Collections.sort(classes);
for (OClass cls : classes) {
writer.beginObject(3, true, null);
writer.writeAttribute(0, false, "name", cls.getName());
writer.writeAttribute(0, false, "default-cluster-id", cls.getDefaultClusterId());
writer.writeAttribute(0, false, "cluster-ids", cls.getClusterIds());
if (((OClassImpl) cls).getOverSizeInternal() > 1)
writer.writeAttribute(0, false, "oversize", ((OClassImpl) cls).getOverSizeInternal());
if (cls.isStrictMode())
writer.writeAttribute(0, false, "strictMode", cls.isStrictMode());
if (cls.getSuperClass() != null)
writer.writeAttribute(0, false, "super-class", cls.getSuperClass().getName());
if (cls.getShortName() != null)
writer.writeAttribute(0, false, "short-name", cls.getShortName());
if (cls.isAbstract())
writer.writeAttribute(0, false, "abstract", cls.isAbstract());
if (!cls.properties().isEmpty()) {
writer.beginCollection(4, true, "properties");
final List<OProperty> properties = new ArrayList<OProperty>(cls.declaredProperties());
Collections.sort(properties);
for (OProperty p : properties) {
writer.beginObject(5, true, null);
writer.writeAttribute(0, false, "name", p.getName());
writer.writeAttribute(0, false, "type", p.getType().toString());
if (p.isMandatory())
writer.writeAttribute(0, false, "mandatory", p.isMandatory());
if (p.isReadonly())
writer.writeAttribute(0, false, "readonly", p.isReadonly());
if (p.isNotNull())
writer.writeAttribute(0, false, "not-null", p.isNotNull());
if (p.getLinkedClass() != null)
writer.writeAttribute(0, false, "linked-class", p.getLinkedClass().getName());
if (p.getLinkedType() != null)
writer.writeAttribute(0, false, "linked-type", p.getLinkedType().toString());
if (p.getMin() != null)
writer.writeAttribute(0, false, "min", p.getMin());
if (p.getMax() != null)
writer.writeAttribute(0, false, "max", p.getMax());
if (((OPropertyImpl) p).getCustomInternal() != null)
writer.writeAttribute(0, false, "customFields", ((OPropertyImpl) p).getCustomInternal());
writer.endObject(0, false);
}
writer.endCollection(4, true);
}
writer.endObject(3, true);
}
writer.endCollection(2, true);
}
writer.endObject(1, true);
listener.onMessage("OK (" + s.getClasses().size() + " classes)");
}
private boolean exportRecord(long recordTot, long recordNum, ORecordInternal<?> rec) throws IOException {
if (rec != null)
try {
if (rec.getIdentity().isValid())
rec.reload();
if (useLineFeedForRecords)
writer.append("\n");
if (recordExported > 0)
writer.append(",");
writer.append(rec.toJSON("rid,type,version,class,attribSameRow,keepTypes,alwaysFetchEmbedded,dateAsLong"));
recordExported++;
recordNum++;
if (recordTot > 10 && (recordNum + 1) % (recordTot / 10) == 0)
listener.onMessage(".");
return true;
} catch (Throwable t) {
if (rec != null) {
final byte[] buffer = rec.toStream();
OLogManager
.instance()
.error(
this,
"\nError on exporting record %s. It seems corrupted; size: %d bytes, raw content (as string):\n==========\n%s\n==========",
t, rec.getIdentity(), buffer.length, new String(buffer));
}
}
return false;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseExport.java
|
356 |
public class NodesStatsAction extends ClusterAction<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
public static final NodesStatsAction INSTANCE = new NodesStatsAction();
public static final String NAME = "cluster/nodes/stats";
private NodesStatsAction() {
super(NAME);
}
@Override
public NodesStatsResponse newResponse() {
return new NodesStatsResponse();
}
@Override
public NodesStatsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new NodesStatsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_stats_NodesStatsAction.java
|
1,447 |
static private class EvictionEntry implements Comparable<EvictionEntry> {
final Object key;
final Value value;
private EvictionEntry(final Object key, final Value value) {
this.key = key;
this.value = value;
}
public int compareTo(final EvictionEntry o) {
final long thisVal = this.value.getCreationTime();
final long anotherVal = o.value.getCreationTime();
return (thisVal < anotherVal ? -1 : (thisVal == anotherVal ? 0 : 1));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EvictionEntry that = (EvictionEntry) o;
if (key != null ? !key.equals(that.key) : that.key != null) return false;
if (value != null ? !value.equals(that.value) : that.value != null) return false;
return true;
}
@Override
public int hashCode() {
return key != null ? key.hashCode() : 0;
}
}
| 1no label
|
hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_local_LocalRegionCache.java
|
294 |
public enum STRATEGY {
DEPTH_FIRST, BREADTH_FIRST
};
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_traverse_OTraverse.java
|
119 |
public class ClientPortableHook implements PortableHook {
public static final int ID = FactoryIdHelper.getFactoryId(FactoryIdHelper.CLIENT_PORTABLE_FACTORY, -3);
public static final int GENERIC_ERROR = 1;
public static final int AUTH = 2;
public static final int PRINCIPAL = 3;
public static final int GET_DISTRIBUTED_OBJECT_INFO = 4;
public static final int DISTRIBUTED_OBJECT_INFO = 6;
public static final int CREATE_PROXY = 7;
public static final int DESTROY_PROXY = 8;
public static final int LISTENER = 9;
public static final int MEMBERSHIP_LISTENER = 10;
public static final int CLIENT_PING = 11;
public static final int GET_PARTITIONS = 12;
public static final int REMOVE_LISTENER = 13;
public int getFactoryId() {
return ID;
}
public PortableFactory createFactory() {
return new ClientPortableFactory();
}
public Collection<ClassDefinition> getBuiltinDefinitions() {
ClassDefinitionBuilder builder = new ClassDefinitionBuilder(ID, PRINCIPAL);
builder.addUTFField("uuid").addUTFField("ownerUuid");
return Collections.singleton(builder.build());
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_ClientPortableHook.java
|
150 |
public interface Action<A> { void apply(A a); }
| 0true
|
src_main_java_jsr166e_extra_ReadMostlyVector.java
|
96 |
public static class GeoshapeSerializer implements AttributeSerializer<Geoshape> {
@Override
public void verifyAttribute(Geoshape value) {
//All values of Geoshape are valid
}
@Override
public Geoshape convert(Object value) {
if (value.getClass().isArray() && (value.getClass().getComponentType().isPrimitive() ||
Number.class.isAssignableFrom(value.getClass().getComponentType())) ) {
Geoshape shape = null;
int len= Array.getLength(value);
double[] arr = new double[len];
for (int i=0;i<len;i++) arr[i]=((Number)Array.get(value,i)).doubleValue();
if (len==2) shape= point(arr[0],arr[1]);
else if (len==3) shape= circle(arr[0],arr[1],arr[2]);
else if (len==4) shape= box(arr[0],arr[1],arr[2],arr[3]);
else throw new IllegalArgumentException("Expected 2-4 coordinates to create Geoshape, but given: " + value);
return shape;
} else if (value instanceof String) {
String[] components=null;
for (String delimiter : new String[]{",",";"}) {
components = ((String)value).split(delimiter);
if (components.length>=2 && components.length<=4) break;
else components=null;
}
Preconditions.checkArgument(components!=null,"Could not parse coordinates from string: %s",value);
double[] coords = new double[components.length];
try {
for (int i=0;i<components.length;i++) {
coords[i]=Double.parseDouble(components[i]);
}
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Could not parse coordinates from string: " + value, e);
}
return convert(coords);
} else return null;
}
@Override
public Geoshape read(ScanBuffer buffer) {
long l = VariableLong.readPositive(buffer);
assert l>0 && l<Integer.MAX_VALUE;
int length = (int)l;
float[][] coordinates = new float[2][];
for (int i = 0; i < 2; i++) {
coordinates[i]=buffer.getFloats(length);
}
return new Geoshape(coordinates);
}
@Override
public void write(WriteBuffer buffer, Geoshape attribute) {
float[][] coordinates = attribute.coordinates;
assert (coordinates.length==2);
assert (coordinates[0].length==coordinates[1].length && coordinates[0].length>0);
int length = coordinates[0].length;
VariableLong.writePositive(buffer,length);
for (int i = 0; i < 2; i++) {
for (int j = 0; j < length; j++) {
buffer.putFloat(coordinates[i][j]);
}
}
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Geoshape.java
|
157 |
class MockConnection implements Connection {
volatile boolean live = true;
final int port;
MockConnection(int port) {
this.port = port;
}
BlockingQueue<SocketWritable> q = new LinkedBlockingQueue<SocketWritable>();
public boolean write(SocketWritable packet) {
return q.offer(packet);
}
@Override
public Address getEndPoint() {
return null;
}
@Override
public boolean live() {
return live;
}
@Override
public long lastReadTime() {
return 0;
}
@Override
public long lastWriteTime() {
return 0;
}
@Override
public void close() {
live = false;
}
@Override
public boolean isClient() {
return true;
}
@Override
public ConnectionType getType() {
return ConnectionType.BINARY_CLIENT;
}
@Override
public InetAddress getInetAddress() {
return null;
}
@Override
public InetSocketAddress getRemoteSocketAddress() {
return null;
}
@Override
public int getPort() {
return port;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof MockConnection)) return false;
MockConnection that = (MockConnection) o;
if (port != that.port) return false;
return true;
}
@Override
public int hashCode() {
return port;
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_client_MockSimpleClient.java
|
496 |
return scheduledExecutor.scheduleAtFixedRate(new Runnable() {
public void run() {
executeInternal(command);
}
}, initialDelay, period, unit);
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientExecutionServiceImpl.java
|
4,663 |
private final PercolatorType queryCountPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x02;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
return countPercolator.reduce(shardResults);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
long count = 0;
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
Count countCollector = count(logger, context);
queryBasedPercolating(percolatorSearcher, context, countCollector);
count = countCollector.counter();
} catch (Throwable e) {
logger.warn("failed to execute", e);
} finally {
percolatorSearcher.release();
}
return new PercolateShardResponse(count, context, request.index(), request.shardId());
}
};
| 1no label
|
src_main_java_org_elasticsearch_percolator_PercolatorService.java
|
3,681 |
public class SizeFieldMapper extends IntegerFieldMapper implements RootMapper {
public static final String NAME = "_size";
public static final String CONTENT_TYPE = "_size";
public static class Defaults extends IntegerFieldMapper.Defaults {
public static final String NAME = CONTENT_TYPE;
public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED;
public static final FieldType SIZE_FIELD_TYPE = new FieldType(IntegerFieldMapper.Defaults.FIELD_TYPE);
static {
SIZE_FIELD_TYPE.freeze();
}
}
public static class Builder extends NumberFieldMapper.Builder<Builder, IntegerFieldMapper> {
protected EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
public Builder() {
super(Defaults.NAME, new FieldType(Defaults.SIZE_FIELD_TYPE));
builder = this;
}
public Builder enabled(EnabledAttributeMapper enabled) {
this.enabledState = enabled;
return builder;
}
@Override
public SizeFieldMapper build(BuilderContext context) {
return new SizeFieldMapper(enabledState, fieldType, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
SizeFieldMapper.Builder builder = size();
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("enabled")) {
builder.enabled(nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED);
} else if (fieldName.equals("store")) {
builder.store(parseStore(fieldName, fieldNode.toString()));
}
}
return builder;
}
}
private EnabledAttributeMapper enabledState;
public SizeFieldMapper() {
this(Defaults.ENABLED_STATE, new FieldType(Defaults.SIZE_FIELD_TYPE), null, null, null, ImmutableSettings.EMPTY);
}
public SizeFieldMapper(EnabledAttributeMapper enabled, FieldType fieldType, PostingsFormatProvider postingsProvider,
DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {
super(new Names(Defaults.NAME), Defaults.PRECISION_STEP, Defaults.BOOST, fieldType, null, Defaults.NULL_VALUE,
Defaults.IGNORE_MALFORMED, Defaults.COERCE, postingsProvider, docValuesProvider, null, null, fieldDataSettings,
indexSettings, MultiFields.empty(), null);
this.enabledState = enabled;
}
@Override
public boolean hasDocValues() {
return false;
}
@Override
protected String contentType() {
return Defaults.NAME;
}
public boolean enabled() {
return this.enabledState.enabled;
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
}
@Override
public void preParse(ParseContext context) throws IOException {
}
@Override
public void postParse(ParseContext context) throws IOException {
// we post parse it so we get the size stored, possibly compressed (source will be preParse)
super.parse(context);
}
@Override
public void parse(ParseContext context) throws IOException {
// nothing to do here, we call the parent in postParse
}
@Override
public boolean includeInObject() {
return false;
}
@Override
protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
if (!enabledState.enabled) {
return;
}
if (context.flyweight()) {
return;
}
fields.add(new CustomIntegerNumericField(this, context.source().length(), fieldType));
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
// all are defaults, no need to write it at all
if (!includeDefaults && enabledState == Defaults.ENABLED_STATE && fieldType().stored() == Defaults.SIZE_FIELD_TYPE.stored()) {
return builder;
}
builder.startObject(contentType());
if (includeDefaults || enabledState != Defaults.ENABLED_STATE) {
builder.field("enabled", enabledState.enabled);
}
if (includeDefaults || fieldType().stored() != Defaults.SIZE_FIELD_TYPE.stored() && enabledState.enabled) {
builder.field("store", fieldType().stored());
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith;
if (!mergeContext.mergeFlags().simulate()) {
if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) {
this.enabledState = sizeFieldMapperMergeWith.enabledState;
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_internal_SizeFieldMapper.java
|
1,377 |
database.getStorage().callInLock(new Callable<Object>() {
@Override
public Object call() throws Exception {
database.getStorage().commit(OTransactionOptimistic.this, null);
callback.run();
return null;
}
}, true);
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionOptimistic.java
|
1,163 |
public class OSQLMethodTrim extends OAbstractSQLMethod {
public static final String NAME = "trim";
public OSQLMethodTrim() {
super(NAME);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
ioResult = ioResult != null ? ioResult.toString().trim() : null;
return ioResult;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodTrim.java
|
484 |
public static class AnalyzeToken implements Streamable {
private String term;
private int startOffset;
private int endOffset;
private int position;
private String type;
AnalyzeToken() {
}
public AnalyzeToken(String term, int position, int startOffset, int endOffset, String type) {
this.term = term;
this.position = position;
this.startOffset = startOffset;
this.endOffset = endOffset;
this.type = type;
}
public String getTerm() {
return this.term;
}
public int getStartOffset() {
return this.startOffset;
}
public int getEndOffset() {
return this.endOffset;
}
public int getPosition() {
return this.position;
}
public String getType() {
return this.type;
}
public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException {
AnalyzeToken analyzeToken = new AnalyzeToken();
analyzeToken.readFrom(in);
return analyzeToken;
}
@Override
public void readFrom(StreamInput in) throws IOException {
term = in.readString();
startOffset = in.readInt();
endOffset = in.readInt();
position = in.readVInt();
type = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(term);
out.writeInt(startOffset);
out.writeInt(endOffset);
out.writeVInt(position);
out.writeOptionalString(type);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeResponse.java
|
318 |
new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
2,662 |
public class PublishClusterStateAction extends AbstractComponent {
public static interface NewClusterStateListener {
static interface NewStateProcessed {
void onNewClusterStateProcessed();
void onNewClusterStateFailed(Throwable t);
}
void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed);
}
private final TransportService transportService;
private final DiscoveryNodesProvider nodesProvider;
private final NewClusterStateListener listener;
private final TimeValue publishTimeout;
public PublishClusterStateAction(Settings settings, TransportService transportService, DiscoveryNodesProvider nodesProvider,
NewClusterStateListener listener) {
super(settings);
this.transportService = transportService;
this.nodesProvider = nodesProvider;
this.listener = listener;
this.publishTimeout = settings.getAsTime("discovery.zen.publish_timeout", Discovery.DEFAULT_PUBLISH_TIMEOUT);
transportService.registerHandler(PublishClusterStateRequestHandler.ACTION, new PublishClusterStateRequestHandler());
}
public void close() {
transportService.removeHandler(PublishClusterStateRequestHandler.ACTION);
}
public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) {
publish(clusterState, new AckClusterStatePublishResponseHandler(clusterState.nodes().size() - 1, ackListener));
}
private void publish(ClusterState clusterState, final ClusterStatePublishResponseHandler publishResponseHandler) {
DiscoveryNode localNode = nodesProvider.nodes().localNode();
Map<Version, BytesReference> serializedStates = Maps.newHashMap();
for (final DiscoveryNode node : clusterState.nodes()) {
if (node.equals(localNode)) {
continue;
}
// try and serialize the cluster state once (or per version), so we don't serialize it
// per node when we send it over the wire, compress it while we are at it...
BytesReference bytes = serializedStates.get(node.version());
if (bytes == null) {
try {
BytesStreamOutput bStream = new BytesStreamOutput();
StreamOutput stream = new HandlesStreamOutput(CompressorFactory.defaultCompressor().streamOutput(bStream));
stream.setVersion(node.version());
ClusterState.Builder.writeTo(clusterState, stream);
stream.close();
bytes = bStream.bytes();
serializedStates.put(node.version(), bytes);
} catch (Throwable e) {
logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node);
publishResponseHandler.onFailure(node, e);
continue;
}
}
try {
TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false);
// no need to put a timeout on the options here, because we want the response to eventually be received
// and not log an error if it arrives after the timeout
transportService.sendRequest(node, PublishClusterStateRequestHandler.ACTION,
new BytesTransportRequest(bytes, node.version()),
options, // no need to compress, we already compressed the bytes
new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
publishResponseHandler.onResponse(node);
}
@Override
public void handleException(TransportException exp) {
logger.debug("failed to send cluster state to [{}]", exp, node);
publishResponseHandler.onFailure(node, exp);
}
});
} catch (Throwable t) {
logger.debug("error sending cluster state to [{}]", t, node);
publishResponseHandler.onFailure(node, t);
}
}
if (publishTimeout.millis() > 0) {
// only wait if the publish timeout is configured...
try {
boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout);
if (!awaited) {
logger.debug("awaiting all nodes to process published state {} timed out, timeout {}", clusterState.version(), publishTimeout);
}
} catch (InterruptedException e) {
// ignore & restore interrupt
Thread.currentThread().interrupt();
}
}
}
private class PublishClusterStateRequestHandler extends BaseTransportRequestHandler<BytesTransportRequest> {
static final String ACTION = "discovery/zen/publish";
@Override
public BytesTransportRequest newInstance() {
return new BytesTransportRequest();
}
@Override
public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception {
Compressor compressor = CompressorFactory.compressor(request.bytes());
StreamInput in;
if (compressor != null) {
in = CachedStreamInput.cachedHandlesCompressed(compressor, request.bytes().streamInput());
} else {
in = CachedStreamInput.cachedHandles(request.bytes().streamInput());
}
in.setVersion(request.version());
ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode());
logger.debug("received cluster state version {}", clusterState.version());
listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() {
@Override
public void onNewClusterStateProcessed() {
try {
channel.sendResponse(TransportResponse.Empty.INSTANCE);
} catch (Throwable e) {
logger.debug("failed to send response on cluster state processed", e);
}
}
@Override
public void onNewClusterStateFailed(Throwable t) {
try {
channel.sendResponse(t);
} catch (Throwable e) {
logger.debug("failed to send response on cluster state processed", e);
}
}
});
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_publish_PublishClusterStateAction.java
|
76 |
public interface StaticAssetDescription extends Serializable {
public Long getId();
public void setId(Long id);
public String getDescription();
public void setDescription(String description);
public String getLongDescription();
public void setLongDescription(String longDescription);
public StaticAssetDescription cloneEntity();
public AdminAuditable getAuditable();
public void setAuditable(AdminAuditable auditable);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_StaticAssetDescription.java
|
101 |
static final class TableStack<K,V> {
int length;
int index;
Node<K,V>[] tab;
TableStack<K,V> next;
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
1,048 |
@SuppressWarnings("unchecked")
public abstract class OCommandExecutorSQLResultsetAbstract extends OCommandExecutorSQLAbstract implements Iterator<OIdentifiable>,
Iterable<OIdentifiable> {
protected static final String KEYWORD_FROM_2FIND = " " + KEYWORD_FROM + " ";
protected static final String KEYWORD_LET_2FIND = " " + KEYWORD_LET + " ";
protected OSQLAsynchQuery<ORecordSchemaAware<?>> request;
protected OSQLTarget parsedTarget;
protected OSQLFilter compiledFilter;
protected Map<String, Object> let = null;
protected Iterator<? extends OIdentifiable> target;
protected Iterable<OIdentifiable> tempResult;
protected int resultCount;
protected int skip = 0;
/**
* Compile the filter conditions only the first time.
*/
public OCommandExecutorSQLResultsetAbstract parse(final OCommandRequest iRequest) {
final OCommandRequestText textRequest = (OCommandRequestText) iRequest;
init(textRequest);
if (iRequest instanceof OSQLSynchQuery) {
request = (OSQLSynchQuery<ORecordSchemaAware<?>>) iRequest;
} else if (iRequest instanceof OSQLAsynchQuery)
request = (OSQLAsynchQuery<ORecordSchemaAware<?>>) iRequest;
else {
// BUILD A QUERY OBJECT FROM THE COMMAND REQUEST
request = new OSQLSynchQuery<ORecordSchemaAware<?>>(textRequest.getText());
if (textRequest.getResultListener() != null)
request.setResultListener(textRequest.getResultListener());
}
return this;
}
@Override
public boolean isReplicated() {
return true;
}
@Override
public boolean isIdempotent() {
return true;
}
/**
* Assign the right TARGET if found.
*
* @param iArgs
* Parameters to bind
* @return true if the target has been recognized, otherwise false
*/
protected boolean assignTarget(final Map<Object, Object> iArgs) {
parameters = iArgs;
if (parsedTarget == null)
return true;
if (iArgs != null && iArgs.size() > 0 && compiledFilter != null)
compiledFilter.bindParameters(iArgs);
if (target == null)
if (parsedTarget.getTargetClasses() != null)
searchInClasses();
else if (parsedTarget.getTargetClusters() != null)
searchInClusters();
else if (parsedTarget.getTargetRecords() != null)
target = parsedTarget.getTargetRecords().iterator();
else if (parsedTarget.getTargetVariable() != null) {
final Object var = getContext().getVariable(parsedTarget.getTargetVariable());
if (var == null) {
target = Collections.EMPTY_LIST.iterator();
return true;
} else if (var instanceof OIdentifiable) {
final ArrayList<OIdentifiable> list = new ArrayList<OIdentifiable>();
list.add((OIdentifiable) var);
target = list.iterator();
} else if (var instanceof Iterable<?>)
target = ((Iterable<? extends OIdentifiable>) var).iterator();
} else
return false;
return true;
}
protected Object getResult() {
if (tempResult != null) {
for (Object d : tempResult)
if (d != null) {
if (!(d instanceof OIdentifiable))
// NON-DOCUMENT AS RESULT, COMES FROM EXPAND? CREATE A DOCUMENT AT THE FLY
d = new ODocument().field("value", d);
request.getResultListener().result(d);
}
}
if (request instanceof OSQLSynchQuery)
return ((OSQLSynchQuery<ORecordSchemaAware<?>>) request).getResult();
return null;
}
protected boolean handleResult(final OIdentifiable iRecord, boolean iCloneIt) {
if (iRecord != null) {
resultCount++;
OIdentifiable recordCopy = iRecord instanceof ORecord<?> ? ((ORecord<?>) iRecord).copy() : iRecord.getIdentity().copy();
if (recordCopy != null)
// CALL THE LISTENER NOW
if (request.getResultListener() != null)
request.getResultListener().result(recordCopy);
if (limit > -1 && resultCount >= limit)
// BREAK THE EXECUTION
return false;
}
return true;
}
protected void parseLet() {
let = new LinkedHashMap<String, Object>();
boolean stop = false;
while (!stop) {
// PARSE THE KEY
parserNextWord(false);
final String letName = parserGetLastWord();
parserOptionalKeyword("=");
parserNextWord(false, " =><,\r\n");
// PARSE THE VALUE
String letValueAsString = parserGetLastWord();
final Object letValue;
// TRY TO PARSE AS FUNCTION
final Object func = OSQLHelper.getFunction(parsedTarget, letValueAsString);
if (func != null)
letValue = func;
else if (letValueAsString.startsWith("(")) {
letValue = new OSQLSynchQuery<Object>(letValueAsString.substring(1, letValueAsString.length() - 1));
} else
letValue = letValueAsString;
let.put(letName, letValue);
stop = parserGetLastSeparator() == ' ';
}
}
/**
* Parses the limit keyword if found.
*
* @param w
*
* @return
* @return the limit found as integer, or -1 if no limit is found. -1 means no limits.
* @throws OCommandSQLParsingException
* if no valid limit has been found
*/
protected int parseLimit(final String w) throws OCommandSQLParsingException {
if (!w.equals(KEYWORD_LIMIT))
return -1;
parserNextWord(true);
final String word = parserGetLastWord();
try {
limit = Integer.parseInt(word);
} catch (Exception e) {
throwParsingException("Invalid LIMIT value setted to '" + word + "' but it should be a valid integer. Example: LIMIT 10");
}
if (limit == 0)
throwParsingException("Invalid LIMIT value setted to ZERO. Use -1 to ignore the limit or use a positive number. Example: LIMIT 10");
return limit;
}
/**
* Parses the skip keyword if found.
*
* @param w
*
* @return
* @return the skip found as integer, or -1 if no skip is found. -1 means no skip.
* @throws OCommandSQLParsingException
* if no valid skip has been found
*/
protected int parseSkip(final String w) throws OCommandSQLParsingException {
if (!w.equals(KEYWORD_SKIP))
return -1;
parserNextWord(true);
final String word = parserGetLastWord();
try {
skip = Integer.parseInt(word);
} catch (Exception e) {
throwParsingException("Invalid SKIP value setted to '" + word
+ "' but it should be a valid positive integer. Example: SKIP 10");
}
if (skip < 0)
throwParsingException("Invalid SKIP value setted to the negative number '" + word
+ "'. Only positive numbers are valid. Example: SKIP 10");
return skip;
}
protected boolean filter(final ORecordInternal<?> iRecord) {
context.setVariable("current", iRecord);
if (iRecord instanceof ORecordSchemaAware<?>) {
// CHECK THE TARGET CLASS
final ORecordSchemaAware<?> recordSchemaAware = (ORecordSchemaAware<?>) iRecord;
Map<OClass, String> targetClasses = parsedTarget.getTargetClasses();
// check only classes that specified in query will go to result set
if ((targetClasses != null) && (!targetClasses.isEmpty())) {
for (OClass targetClass : targetClasses.keySet()) {
if (!targetClass.isSuperClassOf(recordSchemaAware.getSchemaClass()))
return false;
}
context.updateMetric("documentAnalyzedCompatibleClass", +1);
}
}
return evaluateRecord(iRecord);
}
protected boolean evaluateRecord(final ORecord<?> iRecord) {
assignLetClauses(iRecord);
if (compiledFilter == null)
return true;
return (Boolean) compiledFilter.evaluate(iRecord, null, context);
}
protected void assignLetClauses(final ORecord<?> iRecord) {
if (let != null && !let.isEmpty()) {
// BIND CONTEXT VARIABLES
for (Entry<String, Object> entry : let.entrySet()) {
String varName = entry.getKey();
if (varName.startsWith("$"))
varName = varName.substring(1);
final Object letValue = entry.getValue();
Object varValue;
if (letValue instanceof OSQLSynchQuery<?>) {
final OSQLSynchQuery<Object> subQuery = (OSQLSynchQuery<Object>) letValue;
subQuery.reset();
subQuery.resetPagination();
subQuery.getContext().setParent(context);
subQuery.getContext().setVariable("current", iRecord);
varValue = ODatabaseRecordThreadLocal.INSTANCE.get().query(subQuery);
} else if (letValue instanceof OSQLFunctionRuntime) {
final OSQLFunctionRuntime f = (OSQLFunctionRuntime) letValue;
if (f.getFunction().aggregateResults()) {
f.execute(iRecord, null, context);
varValue = f.getFunction().getResult();
} else
varValue = f.execute(iRecord, null, context);
} else
varValue = ODocumentHelper.getFieldValue(iRecord, ((String) letValue).trim(), context);
context.setVariable(varName, varValue);
}
}
}
protected void searchInClasses() {
final OClass cls = parsedTarget.getTargetClasses().keySet().iterator().next();
final ODatabaseRecord database = getDatabase();
database.checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_READ, cls.getName().toLowerCase());
// NO INDEXES: SCAN THE ENTIRE CLUSTER
final ORID[] range = getRange();
target = new ORecordIteratorClass<ORecordInternal<?>>(database, (ODatabaseRecordAbstract) database, cls.getName(), true,
request.isUseCache(), false).setRange(range[0], range[1]);
}
protected void searchInClusters() {
final ODatabaseRecord database = getDatabase();
final Set<Integer> clusterIds = new HashSet<Integer>();
for (String clusterName : parsedTarget.getTargetClusters().keySet()) {
if (clusterName == null || clusterName.length() == 0)
throw new OCommandExecutionException("No cluster or schema class selected in query");
database.checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, clusterName.toLowerCase());
if (Character.isDigit(clusterName.charAt(0))) {
// GET THE CLUSTER NUMBER
for (int clusterId : OStringSerializerHelper.splitIntArray(clusterName)) {
if (clusterId == -1)
throw new OCommandExecutionException("Cluster '" + clusterName + "' not found");
clusterIds.add(clusterId);
}
} else {
// GET THE CLUSTER NUMBER BY THE CLASS NAME
final int clusterId = database.getClusterIdByName(clusterName.toLowerCase());
if (clusterId == -1)
throw new OCommandExecutionException("Cluster '" + clusterName + "' not found");
clusterIds.add(clusterId);
}
}
// CREATE CLUSTER AS ARRAY OF INT
final int[] clIds = new int[clusterIds.size()];
int i = 0;
for (int c : clusterIds)
clIds[i++] = c;
final ORID[] range = getRange();
target = new ORecordIteratorClusters<ORecordInternal<?>>(database, database, clIds, request.isUseCache(), false).setRange(
range[0], range[1]);
}
protected void applyLimitAndSkip() {
if (tempResult != null && (limit > 0 || skip > 0)) {
final List<OIdentifiable> newList = new ArrayList<OIdentifiable>();
// APPLY LIMIT
if (tempResult instanceof List<?>) {
final List<OIdentifiable> t = (List<OIdentifiable>) tempResult;
final int start = Math.min(skip, t.size());
final int tot = Math.min(limit + start, t.size());
for (int i = start; i < tot; ++i)
newList.add(t.get(i));
t.clear();
tempResult = newList;
}
}
}
/**
* Optimizes the condition tree.
*
* @return
*/
protected void optimize() {
if (compiledFilter != null)
optimizeBranch(null, compiledFilter.getRootCondition());
}
/**
* Check function arguments and pre calculate it if possible
*
* @param function
* @return optimized function, same function if no change
*/
protected Object optimizeFunction(OSQLFunctionRuntime function) {
// boolean precalculate = true;
// for (int i = 0; i < function.configuredParameters.length; ++i) {
// if (function.configuredParameters[i] instanceof OSQLFilterItemField) {
// precalculate = false;
// } else if (function.configuredParameters[i] instanceof OSQLFunctionRuntime) {
// final Object res = optimizeFunction((OSQLFunctionRuntime) function.configuredParameters[i]);
// function.configuredParameters[i] = res;
// if (res instanceof OSQLFunctionRuntime || res instanceof OSQLFilterItemField) {
// // function might have been optimized but result is still not static
// precalculate = false;
// }
// }
// }
//
// if (precalculate) {
// // all fields are static, we can calculate it only once.
// return function.execute(null, null, null); // we can pass nulls here, they wont be used
// } else {
return function;
// }
}
protected void optimizeBranch(final OSQLFilterCondition iParentCondition, OSQLFilterCondition iCondition) {
if (iCondition == null)
return;
Object left = iCondition.getLeft();
if (left instanceof OSQLFilterCondition) {
// ANALYSE LEFT RECURSIVELY
optimizeBranch(iCondition, (OSQLFilterCondition) left);
} else if (left instanceof OSQLFunctionRuntime) {
left = optimizeFunction((OSQLFunctionRuntime) left);
iCondition.setLeft(left);
}
Object right = iCondition.getRight();
if (right instanceof OSQLFilterCondition) {
// ANALYSE RIGHT RECURSIVELY
optimizeBranch(iCondition, (OSQLFilterCondition) right);
} else if (right instanceof OSQLFunctionRuntime) {
right = optimizeFunction((OSQLFunctionRuntime) right);
iCondition.setRight(right);
}
final OQueryOperator oper = iCondition.getOperator();
Object result = null;
if (left instanceof OSQLFilterItemField && right instanceof OSQLFilterItemField) {
if (((OSQLFilterItemField) left).getRoot().equals(((OSQLFilterItemField) right).getRoot())) {
if (oper instanceof OQueryOperatorEquals)
result = Boolean.TRUE;
else if (oper instanceof OQueryOperatorNotEquals)
result = Boolean.FALSE;
}
}
if (result != null) {
if (iParentCondition != null)
if (iCondition == iParentCondition.getLeft())
// REPLACE LEFT
iCondition.setLeft(result);
else
// REPLACE RIGHT
iCondition.setRight(result);
else {
// REPLACE ROOT CONDITION
if (result instanceof Boolean && ((Boolean) result))
compiledFilter.setRootCondition(null);
}
}
}
protected ORID[] getRange() {
final ORID beginRange;
final ORID endRange;
final OSQLFilterCondition rootCondition = compiledFilter == null ? null : compiledFilter.getRootCondition();
if (compiledFilter == null || rootCondition == null) {
if (request instanceof OSQLSynchQuery)
beginRange = ((OSQLSynchQuery<ORecordSchemaAware<?>>) request).getNextPageRID();
else
beginRange = null;
endRange = null;
} else {
final ORID conditionBeginRange = rootCondition.getBeginRidRange();
final ORID conditionEndRange = rootCondition.getEndRidRange();
final ORID nextPageRid;
if (request instanceof OSQLSynchQuery)
nextPageRid = ((OSQLSynchQuery<ORecordSchemaAware<?>>) request).getNextPageRID();
else
nextPageRid = null;
if (conditionBeginRange != null && nextPageRid != null)
beginRange = conditionBeginRange.compareTo(nextPageRid) > 0 ? conditionBeginRange : nextPageRid;
else if (conditionBeginRange != null)
beginRange = conditionBeginRange;
else
beginRange = nextPageRid;
endRange = conditionEndRange;
}
return new ORID[] { beginRange, endRange };
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLResultsetAbstract.java
|
204 |
public class OStorageRemote extends OStorageAbstract implements OStorageProxy, OChannelListener {
private static final String DEFAULT_HOST = "localhost";
private static final int DEFAULT_PORT = 2424;
private static final String ADDRESS_SEPARATOR = ";";
public static final String PARAM_MIN_POOL = "minpool";
public static final String PARAM_MAX_POOL = "maxpool";
public static final String PARAM_DB_TYPE = "dbtype";
private static final String DRIVER_NAME = "OrientDB Java";
private final ExecutorService asynchExecutor;
private OContextConfiguration clientConfiguration;
private int connectionRetry;
private int connectionRetryDelay;
private final List<OChannelBinaryAsynchClient> networkPool = new ArrayList<OChannelBinaryAsynchClient>();
private final OLock networkPoolLock = new OAdaptiveLock();
private int networkPoolCursor = 0;
protected final List<String> serverURLs = new ArrayList<String>();
private OCluster[] clusters = new OCluster[0];
protected final Map<String, OCluster> clusterMap = new ConcurrentHashMap<String, OCluster>();
private int defaultClusterId;
private int minPool;
private int maxPool;
private final ODocument clusterConfiguration = new ODocument();
private ORemoteServerEventListener asynchEventListener;
private String connectionDbType;
private String connectionUserName;
private String connectionUserPassword;
private Map<String, Object> connectionOptions;
private final String clientId;
private final int maxReadQueue;
public OStorageRemote(final String iClientId, final String iURL, final String iMode) throws IOException {
super(iURL, iURL, iMode, 0, new OCacheLevelTwoLocatorRemote()); // NO TIMEOUT @SINCE 1.5
clientId = iClientId;
configuration = null;
clientConfiguration = new OContextConfiguration();
connectionRetry = clientConfiguration.getValueAsInteger(OGlobalConfiguration.NETWORK_SOCKET_RETRY);
connectionRetryDelay = clientConfiguration.getValueAsInteger(OGlobalConfiguration.NETWORK_SOCKET_RETRY_DELAY);
asynchEventListener = new OStorageRemoteAsynchEventListener(this);
parseServerURLs();
asynchExecutor = Executors.newSingleThreadScheduledExecutor();
maxReadQueue = Runtime.getRuntime().availableProcessors() - 1;
}
public int getSessionId() {
return OStorageRemoteThreadLocal.INSTANCE.get().sessionId.intValue();
}
public String getServerURL() {
return OStorageRemoteThreadLocal.INSTANCE.get().serverURL;
}
public void setSessionId(final String iServerURL, final int iSessionId) {
final OStorageRemoteSession tl = OStorageRemoteThreadLocal.INSTANCE.get();
tl.serverURL = iServerURL;
tl.sessionId = iSessionId;
}
public ORemoteServerEventListener getAsynchEventListener() {
return asynchEventListener;
}
public void setAsynchEventListener(final ORemoteServerEventListener iListener) {
asynchEventListener = iListener;
}
public void removeRemoteServerEventListener() {
asynchEventListener = null;
}
public void open(final String iUserName, final String iUserPassword, final Map<String, Object> iOptions) {
addUser();
lock.acquireExclusiveLock();
try {
connectionUserName = iUserName;
connectionUserPassword = iUserPassword;
connectionOptions = iOptions != null ? new HashMap<String, Object>(iOptions) : null; // CREATE A COPY TO AVOID USER
// MANIPULATION
// POST OPEN
openRemoteDatabase();
configuration = new OStorageConfiguration(this);
configuration.load();
} catch (Exception e) {
if (!OGlobalConfiguration.STORAGE_KEEP_OPEN.getValueAsBoolean())
close();
if (e instanceof RuntimeException)
// PASS THROUGH
throw (RuntimeException) e;
else
throw new OStorageException("Cannot open the remote storage: " + name, e);
} finally {
lock.releaseExclusiveLock();
}
}
public void reload() {
checkConnection();
lock.acquireExclusiveLock();
try {
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_RELOAD);
} finally {
endRequest(network);
}
try {
beginResponse(network);
readDatabaseInformation(network);
break;
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on reloading database information", e);
}
} while (true);
} finally {
lock.releaseExclusiveLock();
}
}
public void create(final Map<String, Object> iOptions) {
throw new UnsupportedOperationException(
"Cannot create a database in a remote server. Please use the console or the OServerAdmin class.");
}
public boolean exists() {
throw new UnsupportedOperationException(
"Cannot check the existance of a database in a remote server. Please use the console or the OServerAdmin class.");
}
public void close(final boolean iForce) {
OChannelBinaryAsynchClient network = null;
lock.acquireExclusiveLock();
try {
networkPoolLock.lock();
try {
if (networkPool.size() > 0) {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_CLOSE);
} finally {
endRequest(network);
}
}
} finally {
networkPoolLock.unlock();
}
setSessionId(null, -1);
if (!checkForClose(iForce))
return;
networkPoolLock.lock();
try {
for (OChannelBinaryAsynchClient n : new ArrayList<OChannelBinaryAsynchClient>(networkPool))
n.close();
networkPool.clear();
} finally {
networkPoolLock.unlock();
}
level2Cache.shutdown();
super.close(iForce);
status = STATUS.CLOSED;
Orient.instance().unregisterStorage(this);
} catch (Exception e) {
OLogManager.instance().debug(this, "Error on closing remote connection: %s", network);
network.close();
} finally {
lock.releaseExclusiveLock();
}
}
public void delete() {
throw new UnsupportedOperationException(
"Cannot delete a database in a remote server. Please use the console or the OServerAdmin class.");
}
public Set<String> getClusterNames() {
lock.acquireSharedLock();
try {
return new HashSet<String>(clusterMap.keySet());
} finally {
lock.releaseSharedLock();
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(final int iDataSegmentId, final ORecordId iRid,
final byte[] iContent, ORecordVersion iRecordVersion, final byte iRecordType, int iMode,
final ORecordCallback<OClusterPosition> iCallback) {
checkConnection();
if (iMode == 1 && iCallback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
final OPhysicalPosition ppos = new OPhysicalPosition(iDataSegmentId, -1, iRecordType);
OChannelBinaryAsynchClient lastNetworkUsed = null;
do {
try {
final OChannelBinaryAsynchClient network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_CREATE);
lastNetworkUsed = network;
try {
if (network.getSrvProtocolVersion() >= 10)
// SEND THE DATA SEGMENT ID
network.writeInt(iDataSegmentId);
network.writeShort((short) iRid.clusterId);
network.writeBytes(iContent);
network.writeByte(iRecordType);
network.writeByte((byte) iMode);
} finally {
endRequest(network);
}
switch (iMode) {
case 0:
// SYNCHRONOUS
try {
beginResponse(network);
iRid.clusterPosition = network.readClusterPosition();
ppos.clusterPosition = iRid.clusterPosition;
if (network.getSrvProtocolVersion() >= 11) {
ppos.recordVersion = network.readVersion();
} else
ppos.recordVersion = OVersionFactory.instance().createVersion();
return new OStorageOperationResult<OPhysicalPosition>(ppos);
} finally {
endResponse(network);
}
case 1:
// ASYNCHRONOUS
if (iCallback != null) {
final int sessionId = getSessionId();
Callable<Object> response = new Callable<Object>() {
public Object call() throws Exception {
final OClusterPosition result;
try {
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = sessionId;
beginResponse(network);
result = network.readClusterPosition();
if (network.getSrvProtocolVersion() >= 11)
network.readVersion();
} finally {
endResponse(network);
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = -1;
}
iCallback.call(iRid, result);
return null;
}
};
asynchExecutor.submit(new FutureTask<Object>(response));
}
}
return new OStorageOperationResult<OPhysicalPosition>(ppos);
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(lastNetworkUsed, "Error on create record in cluster: " + iRid.clusterId, e);
}
} while (true);
}
@Override
public boolean updateReplica(int dataSegmentId, ORecordId rid, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
throw new UnsupportedOperationException("updateReplica()");
}
@Override
public <V> V callInRecordLock(Callable<V> iCallable, ORID rid, boolean iExclusiveLock) {
throw new UnsupportedOperationException("callInRecordLock()");
}
@Override
public ORecordMetadata getRecordMetadata(final ORID rid) {
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_METADATA);
network.writeRID(rid);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final ORID responseRid = network.readRID();
final ORecordVersion responseVersion = network.readVersion();
return new ORecordMetadata(responseRid, responseVersion);
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read record " + rid, e);
}
} while (true);
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRid, final String iFetchPlan, final boolean iIgnoreCache,
final ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
checkConnection();
if (OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting)
// PENDING NETWORK OPERATION, CAN'T EXECUTE IT NOW
return new OStorageOperationResult<ORawBuffer>(null);
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_LOAD);
network.writeRID(iRid);
network.writeString(iFetchPlan != null ? iFetchPlan : "");
if (network.getSrvProtocolVersion() >= 9)
network.writeByte((byte) (iIgnoreCache ? 1 : 0));
if (network.getSrvProtocolVersion() >= 13)
network.writeByte(loadTombstones ? (byte) 1 : (byte) 0);
} finally {
endRequest(network);
}
try {
beginResponse(network);
if (network.readByte() == 0)
return new OStorageOperationResult<ORawBuffer>(null);
final ORawBuffer buffer = new ORawBuffer(network.readBytes(), network.readVersion(), network.readByte());
final ODatabaseRecord database = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
ORecordInternal<?> record;
while (network.readByte() == 2) {
record = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
if (database != null)
// PUT IN THE CLIENT LOCAL CACHE
database.getLevel1Cache().updateRecord(record);
}
return new OStorageOperationResult<ORawBuffer>(buffer);
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read record " + iRid, e);
}
} while (true);
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId iRid, final byte[] iContent,
final ORecordVersion iVersion, final byte iRecordType, int iMode, final ORecordCallback<ORecordVersion> iCallback) {
checkConnection();
if (iMode == 1 && iCallback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
OChannelBinaryAsynchClient lastNetworkUsed = null;
do {
try {
final OChannelBinaryAsynchClient network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_UPDATE);
lastNetworkUsed = network;
try {
network.writeRID(iRid);
network.writeBytes(iContent);
network.writeVersion(iVersion);
network.writeByte(iRecordType);
network.writeByte((byte) iMode);
} finally {
endRequest(network);
}
switch (iMode) {
case 0:
// SYNCHRONOUS
try {
beginResponse(network);
return new OStorageOperationResult<ORecordVersion>(network.readVersion());
} finally {
endResponse(network);
}
case 1:
// ASYNCHRONOUS
if (iCallback != null) {
final int sessionId = getSessionId();
Callable<Object> response = new Callable<Object>() {
public Object call() throws Exception {
ORecordVersion result;
try {
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = sessionId;
beginResponse(network);
result = network.readVersion();
} finally {
endResponse(network);
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = -1;
}
iCallback.call(iRid, result);
return null;
}
};
asynchExecutor.submit(new FutureTask<Object>(response));
}
}
return new OStorageOperationResult<ORecordVersion>(iVersion);
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(lastNetworkUsed, "Error on update record " + iRid, e);
}
} while (true);
}
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRid, final ORecordVersion iVersion, int iMode,
final ORecordCallback<Boolean> iCallback) {
checkConnection();
if (iMode == 1 && iCallback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
OChannelBinaryAsynchClient network = null;
do {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_DELETE);
return new OStorageOperationResult<Boolean>(deleteRecord(iRid, iVersion, iMode, iCallback, network));
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on delete record " + iRid, e);
}
} while (true);
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
checkConnection();
if (iMode == 1 && callback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
OChannelBinaryAsynchClient network = null;
do {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_CLEAN_OUT);
return deleteRecord(recordId, recordVersion, iMode, callback, network);
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on clean out record " + recordId, e);
}
} while (true);
}
@Override
public void backup(OutputStream out, Map<String, Object> options, Callable<Object> callable) throws IOException {
throw new UnsupportedOperationException("backup");
}
@Override
public void restore(InputStream in, Map<String, Object> options, Callable<Object> callable) throws IOException {
throw new UnsupportedOperationException("restore");
}
public long count(final int iClusterId) {
return count(new int[] { iClusterId });
}
@Override
public long count(int iClusterId, boolean countTombstones) {
return count(new int[] { iClusterId }, countTombstones);
}
public OClusterPosition[] getClusterDataRange(final int iClusterId) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_DATARANGE);
network.writeShort((short) iClusterId);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return new OClusterPosition[] { network.readClusterPosition(), network.readClusterPosition() };
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on getting last entry position count in cluster: " + iClusterId, e);
}
} while (true);
}
@Override
public OPhysicalPosition[] higherPhysicalPositions(int iClusterId, OPhysicalPosition iClusterPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_HIGHER);
network.writeInt(iClusterId);
network.writeClusterPosition(iClusterPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving higher positions after " + iClusterPosition.clusterPosition, e);
}
} while (true);
}
@Override
public OPhysicalPosition[] ceilingPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_CEILING);
network.writeInt(clusterId);
network.writeClusterPosition(physicalPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving ceiling positions after " + physicalPosition.clusterPosition, e);
}
} while (true);
}
private OPhysicalPosition[] readPhysicalPositions(OChannelBinaryAsynchClient network, int positionsCount) throws IOException {
final OPhysicalPosition[] physicalPositions = new OPhysicalPosition[positionsCount];
for (int i = 0; i < physicalPositions.length; i++) {
final OPhysicalPosition position = new OPhysicalPosition();
position.clusterPosition = network.readClusterPosition();
position.dataSegmentId = network.readInt();
position.dataSegmentPos = network.readLong();
position.recordSize = network.readInt();
position.recordVersion = network.readVersion();
physicalPositions[i] = position;
}
return physicalPositions;
}
@Override
public OPhysicalPosition[] lowerPhysicalPositions(int iClusterId, OPhysicalPosition physicalPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_LOWER);
network.writeInt(iClusterId);
network.writeClusterPosition(physicalPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving lower positions after " + physicalPosition.clusterPosition, e);
}
} while (true);
}
@Override
public OPhysicalPosition[] floorPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_FLOOR);
network.writeInt(clusterId);
network.writeClusterPosition(physicalPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving floor positions after " + physicalPosition.clusterPosition, e);
}
} while (true);
}
public long getSize() {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_SIZE);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readLong();
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read database size", e);
}
} while (true);
}
@Override
public long countRecords() {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_COUNTRECORDS);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readLong();
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read database record count", e);
}
} while (true);
}
public long count(final int[] iClusterIds) {
return count(iClusterIds, false);
}
public long count(final int[] iClusterIds, boolean countTombstones) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_COUNT);
network.writeShort((short) iClusterIds.length);
for (int iClusterId : iClusterIds)
network.writeShort((short) iClusterId);
if (network.getSrvProtocolVersion() >= 13)
network.writeByte(countTombstones ? (byte) 1 : (byte) 0);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readLong();
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read record count in clusters: " + Arrays.toString(iClusterIds), e);
}
} while (true);
}
/**
* Execute the command remotely and get the results back.
*/
public Object command(final OCommandRequestText iCommand) {
checkConnection();
if (!(iCommand instanceof OSerializableStream))
throw new OCommandExecutionException("Cannot serialize the command to be executed to the server side.");
OSerializableStream command = iCommand;
Object result = null;
final ODatabaseRecord database = ODatabaseRecordThreadLocal.INSTANCE.get();
OChannelBinaryAsynchClient network = null;
do {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = true;
try {
final OCommandRequestText aquery = iCommand;
final boolean asynch = iCommand instanceof OCommandRequestAsynch && ((OCommandRequestAsynch) iCommand).isAsynchronous();
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_COMMAND);
network.writeByte((byte) (asynch ? 'a' : 's')); // ASYNC / SYNC
network.writeBytes(OStreamSerializerAnyStreamable.INSTANCE.toStream(command));
} finally {
endRequest(network);
}
try {
beginResponse(network);
if (asynch) {
byte status;
// ASYNCH: READ ONE RECORD AT TIME
while ((status = network.readByte()) > 0) {
final ORecordInternal<?> record = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
if (record == null)
continue;
switch (status) {
case 1:
// PUT AS PART OF THE RESULT SET. INVOKE THE LISTENER
try {
if (!aquery.getResultListener().result(record)) {
// EMPTY THE INPUT CHANNEL
while (network.in.available() > 0)
network.in.read();
break;
}
} catch (Throwable t) {
// ABSORBE ALL THE USER EXCEPTIONS
t.printStackTrace();
}
database.getLevel1Cache().updateRecord(record);
break;
case 2:
// PUT IN THE CLIENT LOCAL CACHE
database.getLevel1Cache().updateRecord(record);
}
}
} else {
final byte type = network.readByte();
switch (type) {
case 'n':
result = null;
break;
case 'r':
result = OChannelBinaryProtocol.readIdentifiable(network);
if (result instanceof ORecord<?>)
database.getLevel1Cache().updateRecord((ORecordInternal<?>) result);
break;
case 'l':
final int tot = network.readInt();
final Collection<OIdentifiable> list = new ArrayList<OIdentifiable>(tot);
for (int i = 0; i < tot; ++i) {
final OIdentifiable resultItem = OChannelBinaryProtocol.readIdentifiable(network);
if (resultItem instanceof ORecord<?>)
database.getLevel1Cache().updateRecord((ORecordInternal<?>) resultItem);
list.add(resultItem);
}
result = list;
break;
case 'a':
final String value = new String(network.readBytes());
result = ORecordSerializerStringAbstract.fieldTypeFromStream(null, ORecordSerializerStringAbstract.getType(value),
value);
break;
default:
OLogManager.instance().warn(this, "Received unexpected result from query: %d", type);
}
if (network.getSrvProtocolVersion() >= 17) {
// LOAD THE FETCHED RECORDS IN CACHE
byte status;
while ((status = network.readByte()) > 0) {
final ORecordInternal<?> record = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
if (record != null && status == 2)
// PUT IN THE CLIENT LOCAL CACHE
database.getLevel1Cache().updateRecord(record);
}
}
}
break;
} finally {
if (aquery.getResultListener() != null) {
aquery.getResultListener().end();
}
endResponse(network);
}
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on executing command: " + iCommand, e);
} finally {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = false;
}
} while (true);
return result;
}
public void commit(final OTransaction iTx, Runnable callback) {
checkConnection();
final List<ORecordOperation> committedEntries = new ArrayList<ORecordOperation>();
OChannelBinaryAsynchClient network = null;
do {
try {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = true;
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_TX_COMMIT);
network.writeInt(iTx.getId());
network.writeByte((byte) (iTx.isUsingLog() ? 1 : 0));
final List<ORecordOperation> tmpEntries = new ArrayList<ORecordOperation>();
if (iTx.getCurrentRecordEntries().iterator().hasNext()) {
while (iTx.getCurrentRecordEntries().iterator().hasNext()) {
for (ORecordOperation txEntry : iTx.getCurrentRecordEntries())
tmpEntries.add(txEntry);
iTx.clearRecordEntries();
if (tmpEntries.size() > 0) {
for (ORecordOperation txEntry : tmpEntries) {
commitEntry(network, txEntry);
committedEntries.add(txEntry);
}
tmpEntries.clear();
}
}
} else if (committedEntries.size() > 0) {
for (ORecordOperation txEntry : committedEntries)
commitEntry(network, txEntry);
}
// END OF RECORD ENTRIES
network.writeByte((byte) 0);
// SEND INDEX ENTRIES
network.writeBytes(iTx.getIndexChanges().toStream());
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int createdRecords = network.readInt();
ORecordId currentRid;
ORecordId createdRid;
for (int i = 0; i < createdRecords; i++) {
currentRid = network.readRID();
createdRid = network.readRID();
iTx.updateIdentityAfterCommit(currentRid, createdRid);
}
final int updatedRecords = network.readInt();
ORecordId rid;
for (int i = 0; i < updatedRecords; ++i) {
rid = network.readRID();
ORecordOperation rop = iTx.getRecordEntry(rid);
if (rop != null)
rop.getRecord().getRecordVersion().copyFrom(network.readVersion());
}
committedEntries.clear();
} finally {
endResponse(network);
}
// SET ALL THE RECORDS AS UNDIRTY
for (ORecordOperation txEntry : iTx.getAllRecordEntries())
txEntry.getRecord().unload();
// UPDATE THE CACHE ONLY IF THE ITERATOR ALLOWS IT. USE THE STRATEGY TO ALWAYS REMOVE ALL THE RECORDS SINCE THEY COULD BE
// CHANGED AS CONTENT IN CASE OF TREE AND GRAPH DUE TO CROSS REFERENCES
OTransactionAbstract.updateCacheFromEntries(iTx, iTx.getAllRecordEntries(), false);
break;
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on commit", e);
} finally {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = false;
}
} while (true);
}
public void rollback(OTransaction iTx) {
}
public int getClusterIdByName(final String iClusterName) {
checkConnection();
if (iClusterName == null)
return -1;
if (Character.isDigit(iClusterName.charAt(0)))
return Integer.parseInt(iClusterName);
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
return -1;
return cluster.getId();
}
public String getClusterTypeByName(final String iClusterName) {
checkConnection();
if (iClusterName == null)
return null;
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
return null;
return cluster.getType();
}
public int getDefaultClusterId() {
return defaultClusterId;
}
public int addCluster(final String iClusterType, final String iClusterName, final String iLocation,
final String iDataSegmentName, boolean forceListBased, final Object... iArguments) {
return addCluster(iClusterType, iClusterName, -1, iLocation, iDataSegmentName, forceListBased, iArguments);
}
public int addCluster(String iClusterType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
boolean forceListBased, Object... iParameters) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_ADD);
network.writeString(iClusterType.toString());
network.writeString(iClusterName);
if (network.getSrvProtocolVersion() >= 10 || iClusterType.equalsIgnoreCase("PHYSICAL"))
network.writeString(iLocation);
if (network.getSrvProtocolVersion() >= 10)
network.writeString(iDataSegmentName);
else
network.writeInt(-1);
if (network.getSrvProtocolVersion() >= 18)
network.writeShort((short) iRequestedId);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int clusterId = network.readShort();
final OClusterRemote cluster = new OClusterRemote();
cluster.setType(iClusterType);
cluster.configure(this, clusterId, iClusterName.toLowerCase(), null, 0);
if (clusters.length <= clusterId)
clusters = Arrays.copyOf(clusters, clusterId + 1);
clusters[cluster.getId()] = cluster;
clusterMap.put(cluster.getName().toLowerCase(), cluster);
return clusterId;
} finally {
endResponse(network);
}
} catch (OModificationOperationProhibitedException mphe) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on add new cluster", e);
}
} while (true);
}
public boolean dropCluster(final int iClusterId, final boolean iTruncate) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_DROP);
network.writeShort((short) iClusterId);
} finally {
endRequest(network);
}
byte result = 0;
try {
beginResponse(network);
result = network.readByte();
} finally {
endResponse(network);
}
if (result == 1) {
// REMOVE THE CLUSTER LOCALLY
final OCluster cluster = clusters[iClusterId];
clusters[iClusterId] = null;
clusterMap.remove(cluster.getName());
if (configuration.clusters.size() > iClusterId)
configuration.dropCluster(iClusterId); // endResponse must be called before this line, which call updateRecord
getLevel2Cache().freeCluster(iClusterId);
return true;
}
return false;
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on removing of cluster", e);
}
} while (true);
}
public int addDataSegment(final String iDataSegmentName) {
return addDataSegment(iDataSegmentName, null);
}
public int addDataSegment(final String iSegmentName, final String iLocation) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATASEGMENT_ADD);
network.writeString(iSegmentName).writeString(iLocation);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readInt();
} finally {
endResponse(network);
}
} catch (OModificationOperationProhibitedException mphe) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on add new data segment", e);
}
} while (true);
}
public boolean dropDataSegment(final String iSegmentName) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATASEGMENT_DROP);
network.writeString(iSegmentName);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readByte() == 1;
} finally {
endResponse(network);
}
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on remove data segment", e);
}
} while (true);
}
public void synch() {
}
public String getPhysicalClusterNameById(final int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId >= clusters.length)
return null;
final OCluster cluster = clusters[iClusterId];
return cluster != null ? cluster.getName() : null;
} finally {
lock.releaseSharedLock();
}
}
public int getClusterMap() {
return clusterMap.size();
}
public Collection<OCluster> getClusterInstances() {
lock.acquireSharedLock();
try {
return Arrays.asList(clusters);
} finally {
lock.releaseSharedLock();
}
}
public OCluster getClusterById(int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId == ORID.CLUSTER_ID_INVALID)
// GET THE DEFAULT CLUSTER
iClusterId = defaultClusterId;
return clusters[iClusterId];
} finally {
lock.releaseSharedLock();
}
}
@Override
public long getVersion() {
throw new UnsupportedOperationException("getVersion");
}
public ODocument getClusterConfiguration() {
return clusterConfiguration;
}
/**
* Handles exceptions. In case of IO errors retries to reconnect until the configured retry times has reached.
*
* @param message
* @param exception
*/
protected void handleException(final OChannelBinaryAsynchClient iNetwork, final String message, final Exception exception) {
if (exception instanceof OTimeoutException)
// TIMEOUT, AVOID LOOP, RE-THROW IT
throw (OTimeoutException) exception;
else if (exception instanceof OException)
// RE-THROW IT
throw (OException) exception;
else if (!(exception instanceof IOException))
throw new OStorageException(message, exception);
if (status != STATUS.OPEN)
// STORAGE CLOSED: DON'T HANDLE RECONNECTION
return;
OLogManager.instance().warn(this, "Caught I/O errors from %s (local socket=%s), trying to reconnect (error: %s)", iNetwork,
iNetwork.socket.getLocalSocketAddress(), exception);
try {
iNetwork.close();
} catch (Exception e) {
// IGNORE ANY EXCEPTION
}
final long lostConnectionTime = System.currentTimeMillis();
final int currentMaxRetry;
final int currentRetryDelay;
synchronized (clusterConfiguration) {
if (!clusterConfiguration.isEmpty()) {
// IN CLUSTER: NO RETRY AND 0 SLEEP TIME BETWEEN NODES
currentMaxRetry = 1;
currentRetryDelay = 0;
} else {
currentMaxRetry = connectionRetry;
currentRetryDelay = connectionRetryDelay;
}
}
for (int retry = 0; retry < currentMaxRetry; ++retry) {
// WAIT THE DELAY BEFORE TO RETRY
if (currentRetryDelay > 0)
try {
Thread.sleep(currentRetryDelay);
} catch (InterruptedException e) {
// THREAD INTERRUPTED: RETURN EXCEPTION
Thread.currentThread().interrupt();
break;
}
try {
if (OLogManager.instance().isDebugEnabled())
OLogManager.instance()
.debug(this, "Retrying to connect to remote server #" + (retry + 1) + "/" + currentMaxRetry + "...");
// FORCE RESET OF THREAD DATA (SERVER URL + SESSION ID)
setSessionId(null, -1);
if (createConnectionPool() == 0)
// NO CONNECTION!
break;
// REACQUIRE DB SESSION ID
openRemoteDatabase();
OLogManager.instance().warn(this,
"Connection re-acquired transparently after %dms and %d retries: no errors will be thrown at application level",
System.currentTimeMillis() - lostConnectionTime, retry + 1);
// RECONNECTED!
return;
} catch (Throwable t) {
// DO NOTHING BUT CONTINUE IN THE LOOP
}
}
// RECONNECTION FAILED: THROW+LOG THE ORIGINAL EXCEPTION
throw new OStorageException(message, exception);
}
protected OChannelBinaryAsynchClient openRemoteDatabase() throws IOException {
minPool = OGlobalConfiguration.CLIENT_CHANNEL_MIN_POOL.getValueAsInteger();
maxPool = OGlobalConfiguration.CLIENT_CHANNEL_MAX_POOL.getValueAsInteger();
connectionDbType = ODatabaseDocument.TYPE;
if (connectionOptions != null && connectionOptions.size() > 0) {
if (connectionOptions.containsKey(PARAM_MIN_POOL))
minPool = Integer.parseInt(connectionOptions.get(PARAM_MIN_POOL).toString());
if (connectionOptions.containsKey(PARAM_MAX_POOL))
maxPool = Integer.parseInt(connectionOptions.get(PARAM_MAX_POOL).toString());
if (connectionOptions.containsKey(PARAM_DB_TYPE))
connectionDbType = connectionOptions.get(PARAM_DB_TYPE).toString();
}
boolean availableConnections = true;
OChannelBinaryAsynchClient network = null;
while (availableConnections) {
try {
network = getAvailableNetwork();
try {
network.writeByte(OChannelBinaryProtocol.REQUEST_DB_OPEN);
network.writeInt(getSessionId());
// @SINCE 1.0rc8
sendClientInfo(network);
network.writeString(name);
if (network.getSrvProtocolVersion() >= 8)
network.writeString(connectionDbType);
network.writeString(connectionUserName);
network.writeString(connectionUserPassword);
} finally {
endRequest(network);
}
final int sessionId;
try {
beginResponse(network);
sessionId = network.readInt();
setSessionId(network.getServerURL(), sessionId);
OLogManager.instance().debug(this, "Client connected to %s with session id=%d", network.getServerURL(), sessionId);
readDatabaseInformation(network);
// READ CLUSTER CONFIGURATION
updateClusterConfiguration(network.readBytes());
// read OrientDB release info
if (network.getSrvProtocolVersion() >= 14)
network.readString();
status = STATUS.OPEN;
return network;
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Cannot create a connection to remote server address(es): " + serverURLs, e);
}
networkPoolLock.lock();
try {
availableConnections = !networkPool.isEmpty();
} finally {
networkPoolLock.unlock();
}
}
throw new OStorageException("Cannot create a connection to remote server address(es): " + serverURLs);
}
protected void sendClientInfo(OChannelBinaryAsynchClient network) throws IOException {
if (network.getSrvProtocolVersion() >= 7) {
// @COMPATIBILITY 1.0rc8
network.writeString(DRIVER_NAME).writeString(OConstants.ORIENT_VERSION)
.writeShort((short) OChannelBinaryProtocol.CURRENT_PROTOCOL_VERSION).writeString(clientId);
}
}
/**
* Parse the URL in the following formats:<br/>
*/
protected void parseServerURLs() {
int dbPos = url.indexOf('/');
if (dbPos == -1) {
// SHORT FORM
addHost(url);
name = url;
} else {
name = url.substring(url.lastIndexOf("/") + 1);
for (String host : url.substring(0, dbPos).split(ADDRESS_SEPARATOR))
addHost(host);
}
if (serverURLs.size() == 1 && OGlobalConfiguration.NETWORK_BINARY_DNS_LOADBALANCING_ENABLED.getValueAsBoolean()) {
// LOOK FOR LOAD BALANCING DNS TXT RECORD
final String primaryServer = serverURLs.get(0);
try {
final Hashtable<String, String> env = new Hashtable<String, String>();
env.put("java.naming.factory.initial", "com.sun.jndi.dns.DnsContextFactory");
env.put("com.sun.jndi.ldap.connect.timeout",
OGlobalConfiguration.NETWORK_BINARY_DNS_LOADBALANCING_TIMEOUT.getValueAsString());
final DirContext ictx = new InitialDirContext(env);
final String hostName = primaryServer.indexOf(":") == -1 ? primaryServer : primaryServer.substring(0,
primaryServer.indexOf(":"));
final Attributes attrs = ictx.getAttributes(hostName, new String[] { "TXT" });
final Attribute attr = attrs.get("TXT");
if (attr != null) {
String configuration = (String) attr.get();
if (configuration.startsWith(""))
configuration = configuration.substring(1, configuration.length() - 1);
if (configuration != null) {
final String[] parts = configuration.split(" ");
for (String part : parts) {
if (part.startsWith("s=")) {
addHost(part.substring("s=".length()));
}
}
}
}
} catch (NamingException e) {
}
}
}
/**
* Registers the remote server with port.
*/
protected String addHost(String host) {
if (host.startsWith("localhost"))
host = "127.0.0.1" + host.substring("localhost".length());
// REGISTER THE REMOTE SERVER+PORT
if (host.indexOf(":") == -1)
host += ":" + getDefaultPort();
if (!serverURLs.contains(host))
serverURLs.add(host);
return host;
}
protected String getDefaultHost() {
return DEFAULT_HOST;
}
protected int getDefaultPort() {
return DEFAULT_PORT;
}
protected OChannelBinaryAsynchClient createNetworkConnection() throws IOException, UnknownHostException {
final String currentServerURL = getServerURL();
if (currentServerURL != null) {
// TRY WITH CURRENT URL IF ANY
try {
return connect(currentServerURL);
} catch (Exception e) {
OLogManager.instance().debug(this, "Error on connecting to %s", e, currentServerURL);
}
}
for (int serverIdx = 0; serverIdx < serverURLs.size(); ++serverIdx) {
final String server = serverURLs.get(serverIdx);
try {
final OChannelBinaryAsynchClient ch = connect(server);
if (serverIdx > 0) {
// UPDATE SERVER LIST WITH THE REACHABLE ONE AS HEAD TO SPEED UP FURTHER CONNECTIONS
serverURLs.remove(serverIdx);
serverURLs.add(0, server);
OLogManager.instance().debug(this, "New server list priority: %s...", serverURLs);
}
return ch;
} catch (Exception e) {
OLogManager.instance().debug(this, "Error on connecting to %s", e, server);
}
}
// ERROR, NO URL IS REACHABLE
final StringBuilder buffer = new StringBuilder();
for (String server : serverURLs) {
if (buffer.length() > 0)
buffer.append(',');
buffer.append(server);
}
throw new OIOException("Cannot connect to any configured remote nodes: " + buffer);
}
protected OChannelBinaryAsynchClient connect(final String server) throws IOException {
OLogManager.instance().debug(this, "Trying to connect to the remote host %s...", server);
final int sepPos = server.indexOf(":");
final String remoteHost = server.substring(0, sepPos);
final int remotePort = Integer.parseInt(server.substring(sepPos + 1));
final OChannelBinaryAsynchClient ch = new OChannelBinaryAsynchClient(remoteHost, remotePort, clientConfiguration,
OChannelBinaryProtocol.CURRENT_PROTOCOL_VERSION, asynchEventListener);
// REGISTER MYSELF AS LISTENER TO REMOVE THE CHANNEL FROM THE POOL IN CASE OF CLOSING
ch.registerListener(this);
// REGISTER IT IN THE POOL
networkPool.add(ch);
return ch;
}
protected void checkConnection() {
// lock.acquireSharedLock();
//
// try {
// synchronized (networkPool) {
//
// if (networkPool.size() == 0)
// throw new ODatabaseException("Connection is closed");
// }
//
// } finally {
// lock.releaseSharedLock();
// }
}
/**
* Acquire a network channel from the pool. Don't lock the write stream since the connection usage is exclusive.
*
* @param iCommand
* @return
* @throws IOException
*/
protected OChannelBinaryAsynchClient beginRequest(final byte iCommand) throws IOException {
final OChannelBinaryAsynchClient network = getAvailableNetwork();
network.writeByte(iCommand);
network.writeInt(getSessionId());
return network;
}
protected OChannelBinaryAsynchClient getAvailableNetwork() throws IOException, UnknownHostException {
// FIND THE FIRST FREE CHANNEL AVAILABLE
OChannelBinaryAsynchClient network = null;
int beginCursor = networkPoolCursor;
while (network == null) {
networkPoolLock.lock();
try {
if (networkPoolCursor < 0)
networkPoolCursor = 0;
else if (networkPoolCursor >= networkPool.size())
// RESTART FROM THE BEGINNING
networkPoolCursor = 0;
if (networkPool.size() == 0) {
createConnectionPool();
networkPoolCursor = 0;
}
if (networkPool.size() == 0)
throw new ONetworkProtocolException("Connection pool closed");
network = networkPool.get(networkPoolCursor);
networkPoolCursor++;
final String serverURL = getServerURL();
if (serverURL == null || network.getServerURL().equals(serverURL)) {
if (network.getLockWrite().tryAcquireLock())
// WAS UNLOCKED! USE THIS
break;
}
network = null;
if (beginCursor >= networkPool.size())
// THE POOL HAS BEEN REDUCED: RSTART FROM CURRENT POSITION
beginCursor = networkPoolCursor;
if (networkPoolCursor == beginCursor) {
// COMPLETE ROUND AND NOT FREE CONNECTIONS FOUND
if (networkPool.size() < maxPool) {
// CREATE NEW CONNECTION
network = createNetworkConnection();
network.getLockWrite().lock();
} else {
OLogManager.instance().info(this,
"Network connection pool is full (max=%d): increase max size to avoid such bottleneck on connections", maxPool);
removeDeadConnections();
final long startToWait = System.currentTimeMillis();
// TEMPORARY UNLOCK
networkPoolLock.unlock();
try {
synchronized (networkPool) {
networkPool.wait(5000);
}
} catch (InterruptedException e) {
// THREAD INTERRUPTED: RETURN EXCEPTION
Thread.currentThread().interrupt();
throw new OStorageException("Cannot acquire a connection because the thread has been interrupted");
} finally {
networkPoolLock.lock();
}
Orient
.instance()
.getProfiler()
.stopChrono("system.network.connectionPool.waitingTime", "Waiting for a free connection from the pool of channels",
startToWait);
}
}
} finally {
networkPoolLock.unlock();
}
}
return network;
}
private void removeDeadConnections() {
// FREE DEAD CONNECTIONS
int removedDeadConnections = 0;
for (OChannelBinaryAsynchClient n : new ArrayList<OChannelBinaryAsynchClient>(networkPool)) {
if (n != null && !n.isConnected()) //Fixed issue with removing of network connections though connection is active.
{
try {
n.close();
} catch (Exception e) {
}
networkPool.remove(n);
removedDeadConnections++;
}
}
OLogManager.instance().debug(this, "Found and removed %d dead connections from the network pool", removedDeadConnections);
}
/**
* Ends the request and unlock the write lock
*/
public void endRequest(final OChannelBinaryAsynchClient iNetwork) throws IOException {
if (iNetwork == null)
return;
try {
iNetwork.flush();
} catch (IOException e) {
try {
iNetwork.close();
} catch (Exception e2) {
} finally {
networkPoolLock.lock();
try {
networkPool.remove(iNetwork);
} finally {
networkPoolLock.unlock();
}
}
throw e;
} finally {
iNetwork.releaseWriteLock();
networkPoolLock.lock();
try {
synchronized (networkPool) {
networkPool.notifyAll();
}
} finally {
networkPoolLock.unlock();
}
}
}
/**
* Starts listening the response.
*/
protected void beginResponse(final OChannelBinaryAsynchClient iNetwork) throws IOException {
iNetwork.beginResponse(getSessionId());
}
/**
* End response reached: release the channel in the pool to being reused
*/
public void endResponse(final OChannelBinaryAsynchClient iNetwork) {
iNetwork.endResponse();
}
public boolean isPermanentRequester() {
return false;
}
protected void getResponse(final OChannelBinaryAsynchClient iNetwork) throws IOException {
try {
beginResponse(iNetwork);
} finally {
endResponse(iNetwork);
}
}
@SuppressWarnings("unchecked")
public void updateClusterConfiguration(final byte[] obj) {
if (obj == null)
return;
// UPDATE IT
synchronized (clusterConfiguration) {
clusterConfiguration.fromStream(obj);
final List<ODocument> members = clusterConfiguration.field("members");
if (members != null) {
serverURLs.clear();
parseServerURLs();
for (ODocument m : members)
if (m != null && !serverURLs.contains((String) m.field("name"))) {
for (Map<String, Object> listener : ((Collection<Map<String, Object>>) m.field("listeners"))) {
if (((String) listener.get("protocol")).equals("ONetworkProtocolBinary")) {
String url = (String) listener.get("listen");
if (!serverURLs.contains(url))
addHost(url);
}
}
}
}
}
}
private void commitEntry(final OChannelBinaryAsynchClient iNetwork, final ORecordOperation txEntry) throws IOException {
if (txEntry.type == ORecordOperation.LOADED)
// JUMP LOADED OBJECTS
return;
// SERIALIZE THE RECORD IF NEEDED. THIS IS DONE HERE TO CATCH EXCEPTION AND SEND A -1 AS ERROR TO THE SERVER TO SIGNAL THE ABORT
// OF TX COMMIT
byte[] stream = null;
try {
switch (txEntry.type) {
case ORecordOperation.CREATED:
case ORecordOperation.UPDATED:
stream = txEntry.getRecord().toStream();
break;
}
} catch (Exception e) {
// ABORT TX COMMIT
iNetwork.writeByte((byte) -1);
throw new OTransactionException("Error on transaction commit", e);
}
iNetwork.writeByte((byte) 1);
iNetwork.writeByte(txEntry.type);
iNetwork.writeRID(txEntry.getRecord().getIdentity());
iNetwork.writeByte(txEntry.getRecord().getRecordType());
switch (txEntry.type) {
case ORecordOperation.CREATED:
iNetwork.writeBytes(stream);
break;
case ORecordOperation.UPDATED:
iNetwork.writeVersion(txEntry.getRecord().getRecordVersion());
iNetwork.writeBytes(stream);
break;
case ORecordOperation.DELETED:
iNetwork.writeVersion(txEntry.getRecord().getRecordVersion());
break;
}
}
protected int createConnectionPool() throws IOException, UnknownHostException {
networkPoolLock.lock();
try {
if (networkPool.isEmpty())
// ALWAYS CREATE THE FIRST CONNECTION
createNetworkConnection();
// CREATE THE MINIMUM POOL
for (int i = networkPool.size(); i < minPool; ++i)
createNetworkConnection();
return networkPool.size();
} finally {
networkPoolLock.unlock();
}
}
private boolean handleDBFreeze() {
boolean retry;
OLogManager.instance().warn(this,
"DB is frozen will wait for " + OGlobalConfiguration.CLIENT_DB_RELEASE_WAIT_TIMEOUT.getValue() + " ms. and then retry.");
retry = true;
try {
Thread.sleep(OGlobalConfiguration.CLIENT_DB_RELEASE_WAIT_TIMEOUT.getValueAsInteger());
} catch (InterruptedException ie) {
retry = false;
Thread.currentThread().interrupt();
}
return retry;
}
private void readDatabaseInformation(final OChannelBinaryAsynchClient network) throws IOException {
// @COMPATIBILITY 1.0rc8
final int tot = network.getSrvProtocolVersion() >= 7 ? network.readShort() : network.readInt();
clusters = new OCluster[tot];
clusterMap.clear();
for (int i = 0; i < tot; ++i) {
final OClusterRemote cluster = new OClusterRemote();
String clusterName = network.readString();
if (clusterName != null)
clusterName = clusterName.toLowerCase();
final int clusterId = network.readShort();
final String clusterType = network.readString();
final int dataSegmentId = network.getSrvProtocolVersion() >= 12 ? (int) network.readShort() : 0;
cluster.setType(clusterType);
cluster.configure(this, clusterId, clusterName, null, dataSegmentId);
if (clusterId >= clusters.length)
clusters = Arrays.copyOf(clusters, clusterId + 1);
clusters[clusterId] = cluster;
clusterMap.put(clusterName, cluster);
}
defaultClusterId = clusterMap.get(CLUSTER_DEFAULT_NAME).getId();
}
@Override
public String getURL() {
return OEngineRemote.NAME + ":" + url;
}
public String getClientId() {
return clientId;
}
public int getDataSegmentIdByName(final String iName) {
if (iName == null)
return 0;
throw new UnsupportedOperationException("getDataSegmentIdByName()");
}
public ODataSegment getDataSegmentById(final int iDataSegmentId) {
throw new UnsupportedOperationException("getDataSegmentById()");
}
public int getClusters() {
return clusterMap.size();
}
public void setDefaultClusterId(int defaultClusterId) {
this.defaultClusterId = defaultClusterId;
}
@Override
public String getType() {
return OEngineRemote.NAME;
}
@Override
public void onChannelClose(final OChannel iChannel) {
networkPoolLock.lock();
try {
networkPool.remove(iChannel);
} finally {
networkPoolLock.unlock();
}
}
private boolean deleteRecord(final ORecordId iRid, ORecordVersion iVersion, int iMode, final ORecordCallback<Boolean> iCallback,
final OChannelBinaryAsynchClient network) throws IOException {
try {
network.writeRID(iRid);
network.writeVersion(iVersion);
network.writeByte((byte) iMode);
} finally {
endRequest(network);
}
switch (iMode) {
case 0:
// SYNCHRONOUS
try {
beginResponse(network);
return network.readByte() == 1;
} finally {
endResponse(network);
}
case 1:
// ASYNCHRONOUS
if (iCallback != null) {
final int sessionId = getSessionId();
Callable<Object> response = new Callable<Object>() {
public Object call() throws Exception {
Boolean result;
try {
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = sessionId;
beginResponse(network);
result = network.readByte() == 1;
} finally {
endResponse(network);
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = -1;
}
iCallback.call(iRid, result);
return null;
}
};
asynchExecutor.submit(new FutureTask<Object>(response));
}
}
return false;
}
}
| 1no label
|
client_src_main_java_com_orientechnologies_orient_client_remote_OStorageRemote.java
|
1,549 |
public class OServer {
protected ReentrantLock lock = new ReentrantLock();
protected volatile boolean running = true;
protected OServerConfigurationLoaderXml configurationLoader;
protected OServerConfiguration configuration;
protected OContextConfiguration contextConfiguration;
protected OServerShutdownHook shutdownHook;
protected Map<String, Class<? extends ONetworkProtocol>> networkProtocols = new HashMap<String, Class<? extends ONetworkProtocol>>();
protected List<OServerNetworkListener> networkListeners = new ArrayList<OServerNetworkListener>();
protected List<OServerLifecycleListener> lifecycleListeners = new ArrayList<OServerLifecycleListener>();
protected OServerPluginManager pluginManager;
protected OConfigurableHooksManager hookManager;
protected ODistributedServerManager distributedManager;
private ODatabaseDocumentPool dbPool;
private final CountDownLatch startupLatch = new CountDownLatch(1);
private Random random = new Random();
private Map<String, Object> variables = new HashMap<String, Object>();
private String databaseDirectory;
private static ThreadGroup threadGroup;
private static Map<String, OServer> distributedServers = new ConcurrentHashMap<String, OServer>();
public OServer() throws ClassNotFoundException, MalformedObjectNameException, NullPointerException,
InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
defaultSettings();
OLogManager.installCustomFormatter();
threadGroup = new ThreadGroup("OrientDB Server");
OGlobalConfiguration.STORAGE_KEEP_OPEN.setValue(true);
System.setProperty("com.sun.management.jmxremote", "true");
Orient.instance().startup();
if (OGlobalConfiguration.PROFILER_ENABLED.getValueAsBoolean() && !Orient.instance().getProfiler().isRecording())
Orient.instance().getProfiler().startRecording();
shutdownHook = new OServerShutdownHook(this);
}
public OServer startup() throws InstantiationException, IllegalAccessException, ClassNotFoundException, IllegalArgumentException,
SecurityException, InvocationTargetException, NoSuchMethodException {
String config = OServerConfiguration.DEFAULT_CONFIG_FILE;
if (System.getProperty(OServerConfiguration.PROPERTY_CONFIG_FILE) != null)
config = System.getProperty(OServerConfiguration.PROPERTY_CONFIG_FILE);
Orient.instance().startup();
startup(new File(config));
Orient
.instance()
.getProfiler()
.registerHookValue("system.databases", "List of databases configured in Server", METRIC_TYPE.TEXT,
new OProfilerHookValue() {
@Override
public Object getValue() {
final StringBuilder dbs = new StringBuilder();
for (String dbName : getAvailableStorageNames().keySet()) {
if (dbs.length() > 0)
dbs.append(',');
dbs.append(dbName);
}
return dbs.toString();
}
});
return this;
}
public OServer startup(final File iConfigurationFile) throws InstantiationException, IllegalAccessException,
ClassNotFoundException, IllegalArgumentException, SecurityException, InvocationTargetException, NoSuchMethodException {
// Startup function split to allow pre-activation changes
return startup(loadConfigurationFromFile(iConfigurationFile));
}
public OServer startup(final String iConfiguration) throws InstantiationException, IllegalAccessException,
ClassNotFoundException, IllegalArgumentException, SecurityException, InvocationTargetException, NoSuchMethodException,
IOException {
return startup(new ByteArrayInputStream(iConfiguration.getBytes()));
}
public OServer startup(final InputStream iInputStream) throws InstantiationException, IllegalAccessException,
ClassNotFoundException, IllegalArgumentException, SecurityException, InvocationTargetException, NoSuchMethodException,
IOException {
if (iInputStream == null)
throw new OConfigurationException("Configuration file is null");
configurationLoader = new OServerConfigurationLoaderXml(OServerConfiguration.class, iInputStream);
configuration = configurationLoader.load();
// Startup function split to allow pre-activation changes
return startup(configuration);
}
public OServer startup(final OServerConfiguration iConfiguration) throws IllegalArgumentException, SecurityException,
InvocationTargetException, NoSuchMethodException {
OLogManager.instance().info(this, "OrientDB Server v" + OConstants.getVersion() + " is starting up...");
Orient.instance();
loadConfiguration(iConfiguration);
if (OGlobalConfiguration.USE_NODE_ID_CLUSTER_POSITION.getValueAsBoolean())
OLogManager.instance().info(
this,
"ONodeId will be used as presentation of cluster position, " + " please do not forget to set "
+ OGlobalConfiguration.USE_NODE_ID_CLUSTER_POSITION.getKey() + " property to \"true\" value on client side ...");
if (OGlobalConfiguration.ENVIRONMENT_DUMP_CFG_AT_STARTUP.getValueAsBoolean()) {
System.out.println("Dumping environment after server startup...");
OGlobalConfiguration.dumpConfiguration(System.out);
}
dbPool = new ODatabaseDocumentPool();
dbPool.setup(contextConfiguration.getValueAsInteger(OGlobalConfiguration.DB_POOL_MIN),
contextConfiguration.getValueAsInteger(OGlobalConfiguration.DB_POOL_MAX),
contextConfiguration.getValueAsLong(OGlobalConfiguration.DB_POOL_IDLE_TIMEOUT),
contextConfiguration.getValueAsLong(OGlobalConfiguration.DB_POOL_IDLE_CHECK_DELAY));
databaseDirectory = contextConfiguration.getValue("server.database.path", "${" + Orient.ORIENTDB_HOME + "}/databases/");
databaseDirectory = OSystemVariableResolver.resolveSystemVariables(databaseDirectory);
databaseDirectory = databaseDirectory.replace("//", "/");
OLogManager.instance().info(this, "Databases directory: " + new File(databaseDirectory).getAbsolutePath());
return this;
}
@SuppressWarnings("unchecked")
public OServer activate() throws ClassNotFoundException, InstantiationException, IllegalAccessException {
for (OServerLifecycleListener l : lifecycleListeners)
l.onBeforeActivate();
// REGISTER PROTOCOLS
for (OServerNetworkProtocolConfiguration p : configuration.network.protocols)
networkProtocols.put(p.name, (Class<? extends ONetworkProtocol>) Class.forName(p.implementation));
// STARTUP LISTENERS
for (OServerNetworkListenerConfiguration l : configuration.network.listeners)
networkListeners.add(new OServerNetworkListener(this, l.ipAddress, l.portRange, l.protocol, networkProtocols.get(l.protocol),
l.parameters, l.commands));
registerPlugins();
for (OServerLifecycleListener l : lifecycleListeners)
l.onAfterActivate();
try {
loadStorages();
loadUsers();
} catch (IOException e) {
OLogManager.instance().error(this, "Error on reading server configuration.", OConfigurationException.class, e);
}
OLogManager.instance().info(this, "OrientDB Server v" + OConstants.ORIENT_VERSION + " is active.");
startupLatch.countDown();
return this;
}
public void shutdown() {
if (!running)
return;
running = false;
shutdownHook.cancel();
Orient.instance().getProfiler().unregisterHookValue("system.databases");
for (OServerLifecycleListener l : lifecycleListeners)
l.onBeforeDeactivate();
OLogManager.instance().info(this, "OrientDB Server is shutting down...");
if (!Orient.isRegisterDatabaseByPath())
try {
Orient.instance().shutdown();
} catch (Throwable e) {
OLogManager.instance().error(this, "Error during OrientDB shutdown", e);
}
lock.lock();
try {
final String[] plugins = pluginManager.getPluginNames();
if (plugins.length > 0) {
// SHUTDOWN HANDLERS
OLogManager.instance().info(this, "Shutting down plugins:");
for (String pluginName : plugins) {
OLogManager.instance().info(this, "- %s", pluginName);
final OServerPluginInfo plugin = pluginManager.getPluginByName(pluginName);
try {
plugin.shutdown();
} catch (Throwable t) {
OLogManager.instance().error(this, "Error during server plugin %s shutdown.", t, plugin);
}
}
}
if (networkProtocols.size() > 0) {
// PROTOCOL SHUTDOWN
OLogManager.instance().info(this, "Shutting down protocols");
networkProtocols.clear();
}
if (networkListeners.size() > 0) {
// SHUTDOWN LISTENERS
OLogManager.instance().info(this, "Shutting down listeners:");
// SHUTDOWN LISTENERS
for (OServerNetworkListener l : networkListeners) {
OLogManager.instance().info(this, "- %s", l);
try {
l.shutdown();
} catch (Throwable e) {
OLogManager.instance().error(this, "Error during shutdown of listener %s.", e, l);
}
}
}
} finally {
lock.unlock();
}
for (OServerLifecycleListener l : lifecycleListeners)
try {
l.onAfterDeactivate();
} catch (Exception e) {
OLogManager.instance().error(this, "Error during deactivation of server lifecycle listener %s", e, l);
}
OLogManager.instance().info(this, "OrientDB Server shutdown complete");
System.out.println();
}
public String getStoragePath(final String iName) {
if (iName == null)
throw new IllegalArgumentException("Storage path is null");
final String name = iName.indexOf(':') > -1 ? iName.substring(iName.indexOf(':') + 1) : iName;
final String dbName = Orient.isRegisterDatabaseByPath() ? getDatabaseDirectory() + name : name;
final String dbPath = Orient.isRegisterDatabaseByPath() ? dbName : getDatabaseDirectory() + name;
final OStorage stg = Orient.instance().getStorage(dbName);
if (stg != null)
// ALREADY OPEN
return stg.getURL();
// SEARCH IN CONFIGURED PATHS
String dbURL = configuration.getStoragePath(name);
if (dbURL == null) {
// SEARCH IN DEFAULT DATABASE DIRECTORY
if (new File(OIOUtils.getPathFromDatabaseName(dbPath) + "/default.odh").exists())
dbURL = "local:" + dbPath;
else if (new File(OIOUtils.getPathFromDatabaseName(dbPath) + "/default.pcl").exists())
dbURL = "plocal:" + dbPath;
else
throw new OConfigurationException("Database '" + name + "' is not configured on server");
}
return dbURL;
}
public Map<String, String> getAvailableStorageNames() {
// SEARCH IN CONFIGURED PATHS
final Map<String, String> storages = new HashMap<String, String>();
if (configuration.storages != null && configuration.storages.length > 0)
for (OServerStorageConfiguration s : configuration.storages)
storages.put(OIOUtils.getDatabaseNameFromPath(s.name), s.path);
// SEARCH IN DEFAULT DATABASE DIRECTORY
final String rootDirectory = getDatabaseDirectory();
scanDatabaseDirectory(rootDirectory, new File(rootDirectory), storages);
for (OStorage storage : Orient.instance().getStorages()) {
final String storageUrl = storage.getURL();
if (storage.exists() && !storages.containsValue(storageUrl))
storages.put(OIOUtils.getDatabaseNameFromPath(storage.getName()), storageUrl);
}
return storages;
}
public String getStorageURL(final String iName) {
// SEARCH IN CONFIGURED PATHS
if (configuration.storages != null && configuration.storages.length > 0)
for (OServerStorageConfiguration s : configuration.storages)
if (s.name.equals(iName))
return s.path;
// SEARCH IN DEFAULT DATABASE DIRECTORY
final Map<String, String> storages = new HashMap<String, String>();
final String rootDirectory = getDatabaseDirectory();
scanDatabaseDirectory(rootDirectory, new File(rootDirectory), storages);
return storages.get(iName);
}
public String getDatabaseDirectory() {
return databaseDirectory;
}
public ThreadGroup getServerThreadGroup() {
return threadGroup;
}
public OServerUserConfiguration serverLogin(final String iUser, final String iPassword, final String iResource) {
if (!authenticate(iUser, iPassword, iResource))
throw new OSecurityAccessException(
"Wrong user/password to [connect] to the remote OrientDB Server instance. Get the user/password from the config/orientdb-server-config.xml file");
return getUser(iUser);
}
/**
* Authenticate a server user.
*
* @param iUserName
* Username to authenticate
* @param iPassword
* Password in clear
* @return true if authentication is ok, otherwise false
*/
public boolean authenticate(final String iUserName, final String iPassword, final String iResourceToCheck) {
final OServerUserConfiguration user = getUser(iUserName);
if (user != null && (iPassword == null || user.password.equals(iPassword))) {
if (user.resources.equals("*"))
// ACCESS TO ALL
return true;
String[] resourceParts = user.resources.split(",");
for (String r : resourceParts)
if (r.equals(iResourceToCheck))
return true;
}
// WRONG PASSWORD OR NO AUTHORIZATION
return false;
}
public OServerUserConfiguration getUser(final String iUserName) {
return configuration.getUser(iUserName);
}
public boolean existsStoragePath(final String iURL) {
return configuration.getStoragePath(iURL) != null;
}
public OServerConfiguration getConfiguration() {
return configuration;
}
public void saveConfiguration() throws IOException {
if (configurationLoader != null)
configurationLoader.save(configuration);
}
public Map<String, Class<? extends ONetworkProtocol>> getNetworkProtocols() {
return networkProtocols;
}
public List<OServerNetworkListener> getNetworkListeners() {
return networkListeners;
}
@SuppressWarnings("unchecked")
public <RET extends OServerNetworkListener> RET getListenerByProtocol(final Class<? extends ONetworkProtocol> iProtocolClass) {
for (OServerNetworkListener l : networkListeners)
if (iProtocolClass.isAssignableFrom(l.getProtocolType()))
return (RET) l;
return null;
}
public Collection<OServerPluginInfo> getPlugins() {
return pluginManager.getPlugins();
}
public OContextConfiguration getContextConfiguration() {
return contextConfiguration;
}
@SuppressWarnings("unchecked")
public <RET extends OServerPlugin> RET getPluginByClass(final Class<RET> iPluginClass) {
try {
startupLatch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
for (OServerPluginInfo h : getPlugins())
if (h.getInstance() != null && h.getInstance().getClass().equals(iPluginClass))
return (RET) h.getInstance();
return null;
}
@SuppressWarnings("unchecked")
public <RET extends OServerPlugin> RET getPlugin(final String iName) {
try {
startupLatch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
final OServerPluginInfo p = pluginManager.getPluginByName(iName);
if (p != null)
return (RET) p.getInstance();
return null;
}
public Object getVariable(final String iName) {
return variables.get(iName);
}
public OServer setVariable(final String iName, final Object iValue) {
if (iValue == null)
variables.remove(iName);
else
variables.put(iName, iValue);
return this;
}
protected void loadConfiguration(final OServerConfiguration iConfiguration) {
configuration = iConfiguration;
// FILL THE CONTEXT CONFIGURATION WITH SERVER'S PARAMETERS
contextConfiguration = new OContextConfiguration();
if (iConfiguration.properties != null)
for (OServerEntryConfiguration prop : iConfiguration.properties)
contextConfiguration.setValue(prop.name, prop.value);
hookManager = new OConfigurableHooksManager(iConfiguration);
}
protected OServerConfiguration loadConfigurationFromFile(final File iFile) {
try {
configurationLoader = new OServerConfigurationLoaderXml(OServerConfiguration.class, iFile);
return configurationLoader.load();
} catch (IOException e) {
OLogManager.instance().error(this, "Error on reading server configuration from file: " + iFile, e,
OConfigurationException.class);
}
return null;
}
protected void loadUsers() throws IOException {
if (configuration.users != null && configuration.users.length > 0) {
for (OServerUserConfiguration u : configuration.users) {
if (u.name.equals(OServerConfiguration.SRV_ROOT_ADMIN))
// FOUND
return;
}
}
createAdminAndDbListerUsers();
}
/**
* Load configured storages.
*/
protected void loadStorages() {
if (configuration.storages == null)
return;
String type;
for (OServerStorageConfiguration stg : configuration.storages)
if (stg.loadOnStartup) {
// @COMPATIBILITY
if (stg.userName == null)
stg.userName = OUser.ADMIN;
if (stg.userPassword == null)
stg.userPassword = OUser.ADMIN;
int idx = stg.path.indexOf(':');
if (idx == -1) {
OLogManager.instance().error(this, "-> Invalid path '" + stg.path + "' for database '" + stg.name + "'");
return;
}
type = stg.path.substring(0, idx);
ODatabaseDocument db = null;
try {
db = new ODatabaseDocumentTx(stg.path);
if (db.exists())
db.open(stg.userName, stg.userPassword);
else {
db.create();
if (stg.userName.equals(OUser.ADMIN)) {
if (!stg.userPassword.equals(OUser.ADMIN))
// CHANGE ADMIN PASSWORD
db.getMetadata().getSecurity().getUser(OUser.ADMIN).setPassword(stg.userPassword);
} else {
// CREATE A NEW USER AS ADMIN AND REMOVE THE DEFAULT ONE
db.getMetadata().getSecurity().createUser(stg.userName, stg.userPassword, new String[] { ORole.ADMIN });
db.getMetadata().getSecurity().dropUser(OUser.ADMIN);
db.close();
db.open(stg.userName, stg.userPassword);
}
}
OLogManager.instance().info(this, "-> Loaded " + type + " database '" + stg.name + "'");
} catch (Exception e) {
OLogManager.instance().error(this, "-> Cannot load " + type + " database '" + stg.name + "': " + e);
} finally {
if (db != null)
db.close();
}
}
}
public void addUser(final String iName, String iPassword, final String iPermissions) throws IOException {
if (iName == null || iName.length() == 0)
throw new IllegalArgumentException("User name null or empty");
if (iPermissions == null || iPermissions.length() == 0)
throw new IllegalArgumentException("User permissions null or empty");
if (configuration.users == null)
configuration.users = new OServerUserConfiguration[1];
else
configuration.users = Arrays.copyOf(configuration.users, configuration.users.length + 1);
if (iPassword == null)
// AUTO GENERATE PASSWORD
iPassword = OSecurityManager.instance().digest2String(String.valueOf(random.nextLong()), false);
configuration.users[configuration.users.length - 1] = new OServerUserConfiguration(iName, iPassword, iPermissions);
saveConfiguration();
}
public OServer registerLifecycleListener(final OServerLifecycleListener iListener) {
lifecycleListeners.add(iListener);
return this;
}
public OServer unregisterLifecycleListener(final OServerLifecycleListener iListener) {
lifecycleListeners.remove(iListener);
return this;
}
protected void createAdminAndDbListerUsers() throws IOException {
addUser(OServerConfiguration.SRV_ROOT_ADMIN, null, "*");
addUser(OServerConfiguration.SRV_ROOT_GUEST, OServerConfiguration.SRV_ROOT_GUEST, "connect,server.listDatabases,server.dblist");
saveConfiguration();
}
protected void registerPlugins() throws InstantiationException, IllegalAccessException, ClassNotFoundException {
pluginManager = new OServerPluginManager();
pluginManager.config(this);
pluginManager.startup();
// PLUGINS CONFIGURED IN XML
if (configuration.handlers != null) {
// ACTIVATE PLUGINS
OServerPlugin handler;
for (OServerHandlerConfiguration h : configuration.handlers) {
handler = (OServerPlugin) Class.forName(h.clazz).newInstance();
if (handler instanceof ODistributedServerManager)
distributedManager = (ODistributedServerManager) handler;
pluginManager.registerPlugin(new OServerPluginInfo(handler.getName(), null, null, null, handler, null, 0, null));
handler.config(this, h.parameters);
handler.startup();
}
}
}
protected void defaultSettings() {
OGlobalConfiguration.TX_USE_LOG.setValue(true);
OGlobalConfiguration.TX_COMMIT_SYNCH.setValue(true);
}
protected void scanDatabaseDirectory(final String rootDirectory, final File directory, final Map<String, String> storages) {
if (directory.exists() && directory.isDirectory()) {
for (File db : directory.listFiles()) {
if (db.isDirectory()) {
final File localFile = new File(db.getAbsolutePath() + "/default.odh");
final File plocalFile = new File(db.getAbsolutePath() + "/default.pcl");
if (localFile.exists()) {
final String dbPath = db.getPath().replace('\\', '/');
// FOUND DB FOLDER
storages.put(OIOUtils.getDatabaseNameFromPath(dbPath.substring(rootDirectory.length())), "local:" + dbPath);
} else if (plocalFile.exists()) {
final String dbPath = db.getPath().replace('\\', '/');
storages.put(OIOUtils.getDatabaseNameFromPath(dbPath.substring(rootDirectory.length())), "plocal:" + dbPath);
} else
// TRY TO GO IN DEEP RECURSIVELY
scanDatabaseDirectory(rootDirectory, db, storages);
}
}
}
}
public ODatabaseComplex<?> openDatabase(final String iDbType, final String iDbUrl, final String iUser, final String iPassword) {
final String path = getStoragePath(iDbUrl);
final ODatabaseComplex<?> database = Orient.instance().getDatabaseFactory().createDatabase(iDbType, path);
if (database.isClosed())
if (database.getStorage() instanceof OStorageMemory)
database.create();
else {
try {
database.open(iUser, iPassword);
} catch (OSecurityException e) {
// TRY WITH SERVER'S USER
try {
serverLogin(iUser, iPassword, "database.passthrough");
} catch (OSecurityException ex) {
throw e;
}
// SERVER AUTHENTICATED, BYPASS SECURITY
database.setProperty(ODatabase.OPTIONS.SECURITY.toString(), Boolean.FALSE);
database.open(iUser, iPassword);
}
}
return database;
}
public ODistributedServerManager getDistributedManager() {
return distributedManager;
}
public ODatabaseDocumentPool getDatabasePool() {
return dbPool;
}
public static OServer getInstance(final String iServerId) {
return distributedServers.get(iServerId);
}
public static void registerServerInstance(final String iServerId, final OServer iServer) {
distributedServers.put(iServerId, iServer);
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_OServer.java
|
97 |
final Thread thread = new Thread() {
public void run() {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
hz1.getLifecycleService().terminate();
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientIssueTest.java
|
91 |
public class OScannerCommandStream implements OCommandStream {
private Scanner scanner;
public OScannerCommandStream(String commands) {
scanner = new Scanner(commands);
init();
}
public OScannerCommandStream(File file) throws FileNotFoundException {
scanner = new Scanner(file);
init();
}
private void init() {
scanner.useDelimiter(";(?=([^\"]*\"[^\"]*\")*[^\"]*$)(?=([^']*'[^']*')*[^']*$)|\n");
}
@Override
public boolean hasNext() {
return scanner.hasNext();
}
@Override
public String nextCommand() {
return scanner.next().trim();
}
@Override
public void close() {
scanner.close();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_OScannerCommandStream.java
|
2,049 |
public class EntryOperation extends LockAwareOperation implements BackupAwareOperation {
private static final EntryEventType __NO_NEED_TO_FIRE_EVENT = null;
private EntryProcessor entryProcessor;
private EntryEventType eventType;
private Object response;
protected Object oldValue;
public EntryOperation(String name, Data dataKey, EntryProcessor entryProcessor) {
super(name, dataKey);
this.entryProcessor = entryProcessor;
}
public EntryOperation() {
}
public void innerBeforeRun() {
final ManagedContext managedContext = getNodeEngine().getSerializationService().getManagedContext();
managedContext.initialize(entryProcessor);
}
public void run() {
final long start = System.currentTimeMillis();
oldValue = recordStore.getMapEntry(dataKey).getValue();
final LocalMapStatsImpl mapStats = mapService.getLocalMapStatsImpl(name);
final Object valueBeforeProcess = mapService.toObject(oldValue);
final MapEntrySimple entry = new MapEntrySimple(mapService.toObject(dataKey), valueBeforeProcess);
response = mapService.toData(entryProcessor.process(entry));
final Object valueAfterProcess = entry.getValue();
// no matching data by key.
if (oldValue == null && valueAfterProcess == null) {
eventType = __NO_NEED_TO_FIRE_EVENT;
} else if (valueAfterProcess == null) {
recordStore.remove(dataKey);
mapStats.incrementRemoves(getLatencyFrom(start));
eventType = EntryEventType.REMOVED;
} else {
if (oldValue == null) {
mapStats.incrementPuts(getLatencyFrom(start));
eventType = EntryEventType.ADDED;
}
// take this case as a read so no need to fire an event.
else if (!entry.isModified()) {
mapStats.incrementGets(getLatencyFrom(start));
eventType = __NO_NEED_TO_FIRE_EVENT;
} else {
mapStats.incrementPuts(getLatencyFrom(start));
eventType = EntryEventType.UPDATED;
}
if (eventType != __NO_NEED_TO_FIRE_EVENT) {
recordStore.put(new AbstractMap.SimpleImmutableEntry<Data, Object>(dataKey, entry.getValue()));
dataValue = mapService.toData(entry.getValue());
}
}
}
public void afterRun() throws Exception {
super.afterRun();
if (eventType == __NO_NEED_TO_FIRE_EVENT) {
return;
}
mapService.publishEvent(getCallerAddress(), name, eventType, dataKey, mapService.toData(oldValue), dataValue);
invalidateNearCaches();
if (mapContainer.getWanReplicationPublisher() != null && mapContainer.getWanMergePolicy() != null) {
if (EntryEventType.REMOVED.equals(eventType)) {
mapService.publishWanReplicationRemove(name, dataKey, Clock.currentTimeMillis());
} else {
Record record = recordStore.getRecord(dataKey);
final SimpleEntryView entryView = mapService.createSimpleEntryView(dataKey,mapService.toData(dataValue),record);
mapService.publishWanReplicationUpdate(name, entryView);
}
}
}
@Override
public void onWaitExpire() {
getResponseHandler().sendResponse(null);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
entryProcessor = in.readObject();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeObject(entryProcessor);
}
@Override
public Object getResponse() {
return response;
}
@Override
public String toString() {
return "EntryOperation{}";
}
public Operation getBackupOperation() {
EntryBackupProcessor backupProcessor = entryProcessor.getBackupProcessor();
return backupProcessor != null ? new EntryBackupOperation(name, dataKey, backupProcessor) : null;
}
public boolean shouldBackup() {
return entryProcessor.getBackupProcessor() != null;
}
public int getAsyncBackupCount() {
return mapContainer.getAsyncBackupCount();
}
public int getSyncBackupCount() {
return mapContainer.getBackupCount();
}
private long getLatencyFrom(long begin) {
return Clock.currentTimeMillis() - begin;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_EntryOperation.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.