Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
1,274 | return nodesService.execute(new TransportClientNodesService.NodeCallback<ActionFuture<Response>>() {
@Override
public ActionFuture<Response> doWithNode(DiscoveryNode node) throws ElasticsearchException {
return proxy.execute(node, request);
}
}); | 1no label
| src_main_java_org_elasticsearch_client_transport_support_InternalTransportIndicesAdminClient.java |
1,611 | public class ODeleteRecordTask extends OAbstractRecordReplicatedTask {
private static final long serialVersionUID = 1L;
public ODeleteRecordTask() {
}
public ODeleteRecordTask(final ORecordId iRid, final ORecordVersion iVersion) {
super(iRid, iVersion);
}
@Override
public Object execute(final OServer iServer, ODistributedServerManager iManager, final ODatabaseDocumentTx database)
throws Exception {
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), null, DIRECTION.IN, "delete record %s/%s v.%s",
database.getName(), rid.toString(), version.toString());
final ORecordInternal<?> record = database.load(rid);
final OBuffer buffer;
if (record != null) {
buffer = new OBuffer(record.toStream());
record.delete();
} else
buffer = new OBuffer();
return buffer;
}
@Override
public QUORUM_TYPE getQuorumType() {
return QUORUM_TYPE.WRITE;
}
@Override
public OFixDeleteRecordTask getFixTask(ODistributedRequest iRequest, ODistributedResponse iBadResponse,
final ODistributedResponse iGoodResponse) {
return new OFixDeleteRecordTask(rid, version);
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
out.writeUTF(rid.toString());
if (version == null)
version = OVersionFactory.instance().createUntrackedVersion();
version.getSerializer().writeTo(out, version);
}
@Override
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
rid = new ORecordId(in.readUTF());
if (version == null)
version = OVersionFactory.instance().createUntrackedVersion();
version.getSerializer().readFrom(in, version);
}
@Override
public String getName() {
return "record_delete";
}
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_distributed_task_ODeleteRecordTask.java |
9 | display.timerExec(100, new Runnable() {
@Override
public void run() {
fCompleted= true;
}
}); | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInformationControl.java |
736 | public class IndexDeleteByQueryResponse extends ActionResponse {
private String index;
private int successfulShards;
private int failedShards;
private ShardOperationFailedException[] failures;
IndexDeleteByQueryResponse(String index, int successfulShards, int failedShards, List<ShardOperationFailedException> failures) {
this.index = index;
this.successfulShards = successfulShards;
this.failedShards = failedShards;
if (failures == null || failures.isEmpty()) {
this.failures = new DefaultShardOperationFailedException[0];
} else {
this.failures = failures.toArray(new ShardOperationFailedException[failures.size()]);
}
}
IndexDeleteByQueryResponse() {
}
/**
* The index the delete by query operation was executed against.
*/
public String getIndex() {
return this.index;
}
/**
* The total number of shards the delete by query was executed on.
*/
public int getTotalShards() {
return failedShards + successfulShards;
}
/**
* The successful number of shards the delete by query was executed on.
*/
public int getSuccessfulShards() {
return successfulShards;
}
/**
* The failed number of shards the delete by query was executed on.
*/
public int getFailedShards() {
return failedShards;
}
public ShardOperationFailedException[] getFailures() {
return failures;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readString();
successfulShards = in.readVInt();
failedShards = in.readVInt();
int size = in.readVInt();
failures = new ShardOperationFailedException[size];
for (int i = 0; i < size; i++) {
failures[i] = DefaultShardOperationFailedException.readShardOperationFailed(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
out.writeVInt(successfulShards);
out.writeVInt(failedShards);
out.writeVInt(failures.length);
for (ShardOperationFailedException failure : failures) {
failure.writeTo(out);
}
}
} | 0true
| src_main_java_org_elasticsearch_action_deletebyquery_IndexDeleteByQueryResponse.java |
244 | public interface BroadleafCurrency extends Serializable {
public String getCurrencyCode();
public void setCurrencyCode(String code);
public String getFriendlyName();
public void setFriendlyName(String friendlyName);
public boolean getDefaultFlag();
public void setDefaultFlag(boolean defaultFlag);
} | 0true
| common_src_main_java_org_broadleafcommerce_common_currency_domain_BroadleafCurrency.java |
149 | public abstract class KeyBasedClientRequest extends PartitionClientRequest {
protected abstract Object getKey();
protected final int getPartition() {
Object key = getKey();
InternalPartitionService partitionService = clientEngine.getPartitionService();
if (key instanceof String) {
return partitionService.getPartitionId(getPartitionKey((String) key));
}
return partitionService.getPartitionId(key);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_KeyBasedClientRequest.java |
1,210 | public class PaymentResponseImpl implements PaymentResponse {
protected Map<PaymentInfo, PaymentResponseItem> responses = new HashMap<PaymentInfo, PaymentResponseItem>();
public void addPaymentResponseItem(PaymentInfo paymentInfo, PaymentResponseItem paymentResponseItem) {
responses.put(paymentInfo, paymentResponseItem);
}
public PaymentResponseItem getPaymentResponseItem(PaymentInfo paymentInfo) {
return responses.get(paymentInfo);
}
public Map<PaymentInfo, PaymentResponseItem> getResponseItems() {
return responses;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_module_PaymentResponseImpl.java |
400 | public class ORecordTrackedIterator implements Iterator<OIdentifiable> {
final private ORecord<?> sourceRecord;
final private Iterator<?> underlying;
public ORecordTrackedIterator(final ORecord<?> iSourceRecord, final Iterator<?> iIterator) {
this.sourceRecord = iSourceRecord;
this.underlying = iIterator;
}
public OIdentifiable next() {
return (OIdentifiable) underlying.next();
}
public boolean hasNext() {
return underlying.hasNext();
}
public void remove() {
underlying.remove();
if (sourceRecord != null)
sourceRecord.setDirty();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordTrackedIterator.java |
2,981 | public interface FilterCache extends IndexComponent, CloseableComponent {
static class EntriesStats {
public final long sizeInBytes;
public final long count;
public EntriesStats(long sizeInBytes, long count) {
this.sizeInBytes = sizeInBytes;
this.count = count;
}
}
// we need to "inject" the index service to not create cyclic dep
void setIndexService(IndexService indexService);
String type();
Filter cache(Filter filterToCache);
void clear(Object reader);
void clear(String reason);
void clear(String reason, String[] keys);
} | 0true
| src_main_java_org_elasticsearch_index_cache_filter_FilterCache.java |
1,402 | @XmlRootElement(name = "orderItemAttribute")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class OrderItemPriceDetailWrapper extends BaseWrapper implements
APIWrapper<OrderItemPriceDetail> {
@XmlElement
protected Long id;
@XmlElement
protected Money totalAdjustmentValue;
@XmlElement
protected Money totalAdjustedPrice;
@XmlElement
protected Integer quantity;
@XmlElement(name = "adjustment")
@XmlElementWrapper(name = "adjustments")
protected List<AdjustmentWrapper> orderItemPriceDetailAdjustments = new LinkedList<AdjustmentWrapper>();
@Override
public void wrapDetails(OrderItemPriceDetail model, HttpServletRequest request) {
this.id = model.getId();
this.quantity = model.getQuantity();
this.totalAdjustmentValue = model.getTotalAdjustmentValue();
this.totalAdjustedPrice = model.getTotalAdjustedPrice();
if (!model.getOrderItemPriceDetailAdjustments().isEmpty()) {
this.orderItemPriceDetailAdjustments = new ArrayList<AdjustmentWrapper>();
for (OrderItemPriceDetailAdjustment orderItemPriceDetail : model.getOrderItemPriceDetailAdjustments()) {
AdjustmentWrapper orderItemPriceDetailAdjustmentWrapper =
(AdjustmentWrapper) context.getBean(AdjustmentWrapper.class.getName());
orderItemPriceDetailAdjustmentWrapper.wrapSummary(orderItemPriceDetail, request);
this.orderItemPriceDetailAdjustments.add(orderItemPriceDetailAdjustmentWrapper);
}
}
}
@Override
public void wrapSummary(OrderItemPriceDetail model, HttpServletRequest request) {
wrapDetails(model, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_OrderItemPriceDetailWrapper.java |
806 | @SuppressWarnings("unchecked")
public class OClassImpl extends ODocumentWrapperNoClass implements OClass {
private static final long serialVersionUID = 1L;
protected OSchemaShared owner;
protected String name;
protected Class<?> javaClass;
protected final Map<String, OProperty> properties = new LinkedHashMap<String, OProperty>();
protected int[] clusterIds;
protected int defaultClusterId = -1;
protected OClassImpl superClass;
protected int[] polymorphicClusterIds;
protected List<OClass> baseClasses;
protected float overSize = 0f;
protected String shortName;
protected boolean strictMode = false; // @SINCE v1.0rc8
protected boolean abstractClass = false; // @SINCE v1.2.0
protected Map<String, String> customFields;
private static final Iterator<OClass> EMPTY_CLASSES = new ArrayList<OClass>().iterator();
/**
* Constructor used in unmarshalling.
*/
public OClassImpl() {
}
/**
* Constructor used in unmarshalling.
*/
protected OClassImpl(final OSchemaShared iOwner) {
document = new ODocument();
owner = iOwner;
}
/**
* Constructor used in unmarshalling.
*/
protected OClassImpl(final OSchemaShared iOwner, final ODocument iDocument) {
document = iDocument;
owner = iOwner;
}
protected OClassImpl(final OSchemaShared iOwner, final String iName, final int[] iClusterIds) {
this(iOwner);
name = iName;
setClusterIds(iClusterIds);
setPolymorphicClusterIds(iClusterIds);
defaultClusterId = iClusterIds[0];
if (defaultClusterId == -1)
abstractClass = true;
}
public <T> T newInstance() throws InstantiationException, IllegalAccessException {
if (javaClass == null)
throw new IllegalArgumentException("Cannot create an instance of class '" + name + "' since no Java class was specified");
return (T) javaClass.newInstance();
}
@Override
public <RET extends ODocumentWrapper> RET reload() {
return (RET) owner.reload();
}
public String getCustom(final String iName) {
if (customFields == null)
return null;
return customFields.get(iName);
}
public void setCustomInternal(final String iName, final String iValue) {
if (customFields == null)
customFields = new HashMap<String, String>();
if (iValue == null || "null".equalsIgnoreCase(iValue))
customFields.remove(iName);
else
customFields.put(iName, iValue);
}
public OClassImpl setCustom(final String iName, final String iValue) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s custom %s=%s", getName(), iName, iValue);
getDatabase().command(new OCommandSQL(cmd)).execute();
setCustomInternal(iName, iValue);
return this;
}
public Map<String, String> getCustomInternal() {
if (customFields != null)
return Collections.unmodifiableMap(customFields);
return null;
}
public void removeCustom(final String iName) {
setCustom(iName, null);
}
public void clearCustom() {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s custom clear", getName());
getDatabase().command(new OCommandSQL(cmd)).execute();
clearCustomInternal();
}
public void clearCustomInternal() {
customFields = null;
}
public Set<String> getCustomKeys() {
if (customFields != null)
return customFields.keySet();
return new HashSet<String>();
}
@SuppressWarnings("resource")
public void validateInstances() {
ODatabaseComplex<?> current = getDatabase().getDatabaseOwner();
while (current != null && current.getUnderlying() instanceof ODatabaseComplex<?> && !(current instanceof ODatabaseDocumentTx))
current = current.getUnderlying();
if (current != null)
for (ODocument d : ((ODatabaseDocumentTx) current).browseClass(name, true)) {
d.validate();
}
}
public OClass getSuperClass() {
return superClass;
}
/**
* Set the super class.
*
* @param iSuperClass
* Super class as OClass instance
* @return the object itself.
*/
public OClass setSuperClass(final OClass iSuperClass) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s superclass %s", name, iSuperClass != null ? iSuperClass.getName() : null);
getDatabase().command(new OCommandSQL(cmd)).execute();
setSuperClassInternal(iSuperClass);
return this;
}
public void setSuperClassInternal(final OClass iSuperClass) {
final OClassImpl cls = (OClassImpl) iSuperClass;
if (cls != null)
cls.addBaseClasses(this);
else if (superClass != null)
// REMOVE THE PREVIOUS ONE
superClass.removeBaseClassInternal(this);
this.superClass = cls;
}
public String getName() {
return name;
}
public OClass setName(final String iName) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s name %s", name, iName);
getDatabase().command(new OCommandSQL(cmd)).execute();
name = iName;
return this;
}
public void setNameInternal(final String iName) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
owner.changeClassName(name, iName);
name = iName;
}
public long getSize() {
long size = 0;
for (int clusterId : clusterIds)
size += getDatabase().getClusterRecordSizeById(clusterId);
return size;
}
public String getShortName() {
return shortName;
}
public OClass setShortName(String iShortName) {
if (iShortName != null) {
iShortName = iShortName.trim();
if (iShortName.isEmpty())
iShortName = null;
}
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s shortname %s", name, iShortName);
getDatabase().command(new OCommandSQL(cmd)).execute();
setShortNameInternal(iShortName);
return this;
}
public void setShortNameInternal(final String iShortName) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
if (this.shortName != null)
// UNREGISTER ANY PREVIOUS SHORT NAME
owner.classes.remove(this.shortName);
this.shortName = iShortName;
// REGISTER IT
if (null != iShortName)
owner.classes.put(iShortName.toLowerCase(), this);
}
public String getStreamableName() {
return shortName != null ? shortName : name;
}
public Collection<OProperty> declaredProperties() {
return Collections.unmodifiableCollection(properties.values());
}
public Collection<OProperty> properties() {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_READ);
Collection<OProperty> props = null;
OClassImpl currentClass = this;
do {
if (currentClass.properties != null) {
if (props == null)
props = new ArrayList<OProperty>();
props.addAll(currentClass.properties.values());
}
currentClass = (OClassImpl) currentClass.getSuperClass();
} while (currentClass != null);
return (Collection<OProperty>) (props != null ? props : Collections.emptyList());
}
public Collection<OProperty> getIndexedProperties() {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_READ);
Collection<OProperty> indexedProps = null;
OClassImpl currentClass = this;
do {
if (currentClass.properties != null) {
for (OProperty p : currentClass.properties.values())
if (areIndexed(p.getName())) {
if (indexedProps == null)
indexedProps = new ArrayList<OProperty>();
indexedProps.add(p);
}
}
currentClass = (OClassImpl) currentClass.getSuperClass();
} while (currentClass != null);
return (Collection<OProperty>) (indexedProps != null ? indexedProps : Collections.emptyList());
}
public OProperty getProperty(final String iPropertyName) {
OClassImpl currentClass = this;
OProperty p = null;
do {
if (currentClass.properties != null)
p = currentClass.properties.get(iPropertyName.toLowerCase());
if (p != null)
return p;
currentClass = (OClassImpl) currentClass.getSuperClass();
} while (currentClass != null);
return p;
}
public OProperty createProperty(final String iPropertyName, final OType iType) {
return addProperty(iPropertyName, iType, null, null);
}
public OProperty createProperty(final String iPropertyName, final OType iType, final OClass iLinkedClass) {
if (iLinkedClass == null)
throw new OSchemaException("Missing linked class");
return addProperty(iPropertyName, iType, null, iLinkedClass);
}
public OProperty createProperty(final String iPropertyName, final OType iType, final OType iLinkedType) {
return addProperty(iPropertyName, iType, iLinkedType, null);
}
public boolean existsProperty(final String iPropertyName) {
return properties.containsKey(iPropertyName.toLowerCase());
}
public void dropProperty(final String iPropertyName) {
if (getDatabase().getTransaction().isActive())
throw new IllegalStateException("Cannot drop a property inside a transaction");
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_DELETE);
final String lowerName = iPropertyName.toLowerCase();
if (!properties.containsKey(lowerName))
throw new OSchemaException("Property '" + iPropertyName + "' not found in class " + name + "'");
final StringBuilder cmd = new StringBuilder("drop property ");
// CLASS.PROPERTY NAME
cmd.append(name);
cmd.append('.');
cmd.append(iPropertyName);
getDatabase().command(new OCommandSQL(cmd.toString())).execute();
if (existsProperty(iPropertyName))
properties.remove(lowerName);
}
public void dropPropertyInternal(final String iPropertyName) {
if (getDatabase().getTransaction().isActive())
throw new IllegalStateException("Cannot drop a property inside a transaction");
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_DELETE);
final OProperty prop = properties.remove(iPropertyName.toLowerCase());
if (prop == null)
throw new OSchemaException("Property '" + iPropertyName + "' not found in class " + name + "'");
}
protected OProperty addProperty(final String iPropertyName, final OType iType, final OType iLinkedType, final OClass iLinkedClass) {
if (getDatabase().getTransaction().isActive())
throw new IllegalStateException("Cannot create a new property inside a transaction");
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String lowerName = iPropertyName.toLowerCase();
if (properties.containsKey(lowerName))
throw new OSchemaException("Class " + name + " already has property '" + iPropertyName + "'");
if (iType == null)
throw new OSchemaException("Property type not defined.");
final StringBuilder cmd = new StringBuilder("create property ");
// CLASS.PROPERTY NAME
cmd.append(name);
cmd.append('.');
cmd.append(iPropertyName);
// TYPE
cmd.append(' ');
cmd.append(iType.name);
if (iLinkedType != null) {
// TYPE
cmd.append(' ');
cmd.append(iLinkedType.name);
} else if (iLinkedClass != null) {
// TYPE
cmd.append(' ');
cmd.append(iLinkedClass.getName());
}
getDatabase().command(new OCommandSQL(cmd.toString())).execute();
if (existsProperty(iPropertyName))
return properties.get(lowerName);
else
// ADD IT LOCALLY AVOIDING TO RELOAD THE ENTIRE SCHEMA
return addPropertyInternal(iPropertyName, iType, iLinkedType, iLinkedClass);
}
@Override
public void fromStream() {
name = document.field("name");
if (document.containsField("shortName"))
shortName = document.field("shortName");
else
shortName = null;
defaultClusterId = (Integer) document.field("defaultClusterId");
if (document.containsField("strictMode"))
strictMode = (Boolean) document.field("strictMode");
else
strictMode = false;
if (document.containsField("abstract"))
abstractClass = (Boolean) document.field("abstract");
else
abstractClass = false;
if (document.field("overSize") != null)
overSize = (Float) document.field("overSize");
else
overSize = 0f;
final Object cc = document.field("clusterIds");
if (cc instanceof Collection<?>) {
final Collection<Integer> coll = document.field("clusterIds");
clusterIds = new int[coll.size()];
int i = 0;
for (final Integer item : coll)
clusterIds[i++] = item.intValue();
} else
clusterIds = (int[]) cc;
Arrays.sort(clusterIds);
setPolymorphicClusterIds(clusterIds);
// READ PROPERTIES
OPropertyImpl prop;
Collection<ODocument> storedProperties = document.field("properties");
if (storedProperties != null)
for (ODocument p : storedProperties) {
prop = new OPropertyImpl(this, p);
prop.fromStream();
properties.put(prop.getName().toLowerCase(), prop);
}
customFields = document.field("customFields", OType.EMBEDDEDMAP);
}
@Override
@OBeforeSerialization
public ODocument toStream() {
document.setInternalStatus(ORecordElement.STATUS.UNMARSHALLING);
try {
document.field("name", name);
document.field("shortName", shortName);
document.field("defaultClusterId", defaultClusterId);
document.field("clusterIds", clusterIds);
document.field("overSize", overSize);
document.field("strictMode", strictMode);
document.field("abstract", abstractClass);
if (properties != null) {
final Set<ODocument> props = new LinkedHashSet<ODocument>();
for (final OProperty p : properties.values()) {
props.add(((OPropertyImpl) p).toStream());
}
document.field("properties", props, OType.EMBEDDEDSET);
}
document.field("superClass", superClass != null ? superClass.getName() : null);
document.field("customFields", customFields != null && customFields.size() > 0 ? customFields : null, OType.EMBEDDEDMAP);
} finally {
document.setInternalStatus(ORecordElement.STATUS.LOADED);
}
return document;
}
public Class<?> getJavaClass() {
return javaClass;
}
public int getDefaultClusterId() {
return defaultClusterId;
}
public void setDefaultClusterId(final int iDefaultClusterId) {
this.defaultClusterId = iDefaultClusterId;
setDirty();
}
public int[] getClusterIds() {
return clusterIds;
}
public int[] getPolymorphicClusterIds() {
return polymorphicClusterIds;
}
public OClass addClusterId(final int iId) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s addcluster %d", name, iId);
getDatabase().command(new OCommandSQL(cmd)).execute();
addClusterIdInternal(iId);
return this;
}
private void addClusterIdToIndexes(int iId) {
String clusterName = getDatabase().getClusterNameById(iId);
for (OIndex<?> index : getIndexes()) {
if (index.getInternal() != null) {
index.getInternal().addCluster(clusterName);
}
}
}
public OClass addClusterIdInternal(final int iId) {
for (int currId : clusterIds)
if (currId == iId)
// ALREADY ADDED
return this;
clusterIds = OArrays.copyOf(clusterIds, clusterIds.length + 1);
clusterIds[clusterIds.length - 1] = iId;
Arrays.sort(clusterIds);
polymorphicClusterIds = OArrays.copyOf(polymorphicClusterIds, polymorphicClusterIds.length + 1);
polymorphicClusterIds[polymorphicClusterIds.length - 1] = iId;
Arrays.sort(polymorphicClusterIds);
if (defaultClusterId == -1)
defaultClusterId = iId;
setDirty();
addClusterIdToIndexes(iId);
return this;
}
public OClass removeClusterId(final int iId) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s removecluster %d", name, iId);
getDatabase().command(new OCommandSQL(cmd)).execute();
removeClusterIdInternal(iId);
return this;
}
public OClass removeClusterIdInternal(final int iId) {
boolean found = false;
for (int clusterId : clusterIds) {
if (clusterId == iId) {
found = true;
break;
}
}
if (found) {
final int[] newClusterIds = new int[clusterIds.length - 1];
for (int i = 0, k = 0; i < clusterIds.length; ++i) {
if (clusterIds[i] == iId)
// JUMP IT
continue;
newClusterIds[k] = clusterIds[i];
k++;
}
clusterIds = newClusterIds;
}
if (defaultClusterId == iId)
defaultClusterId = -1;
return this;
}
public OClass setDirty() {
document.setDirty();
if (owner != null)
owner.setDirty();
return this;
}
public Iterator<OClass> getBaseClasses() {
if (baseClasses == null || baseClasses.size() == 0)
return EMPTY_CLASSES;
return baseClasses.iterator();
}
/**
* Adds a base class to the current one. It adds also the base class cluster ids to the polymorphic cluster ids array.
*
* @param iBaseClass
* The base class to add.
*/
private OClass addBaseClasses(final OClass iBaseClass) {
if (baseClasses == null)
baseClasses = new ArrayList<OClass>();
if (baseClasses.contains(iBaseClass))
return this;
baseClasses.add(iBaseClass);
// ADD CLUSTER IDS OF BASE CLASS TO THIS CLASS AND ALL SUPER-CLASSES
OClassImpl currentClass = this;
while (currentClass != null) {
currentClass.addPolymorphicClusterIds((OClassImpl) iBaseClass);
currentClass = (OClassImpl) currentClass.getSuperClass();
}
return this;
}
public OClass removeBaseClassInternal(final OClass baseClass) {
if (baseClasses == null)
return this;
if (baseClasses.remove(baseClass)) {
OClassImpl currentClass = this;
while (currentClass != null) {
currentClass.removePolymorphicClusterIds((OClassImpl) baseClass);
currentClass = (OClassImpl) currentClass.getSuperClass();
}
}
return this;
}
private void removePolymorphicClusterIds(final OClassImpl iBaseClass) {
for (final int clusterId : iBaseClass.polymorphicClusterIds) {
final int index = Arrays.binarySearch(polymorphicClusterIds, clusterId);
if (index == -1)
continue;
if (index < polymorphicClusterIds.length - 1)
System
.arraycopy(polymorphicClusterIds, index + 1, polymorphicClusterIds, index, polymorphicClusterIds.length - (index + 1));
polymorphicClusterIds = Arrays.copyOf(polymorphicClusterIds, polymorphicClusterIds.length - 1);
}
}
public float getOverSize() {
if (overSize > 0)
// CUSTOM OVERSIZE SETTED
return overSize;
if (superClass != null)
// RETURN THE OVERSIZE OF THE SUPER CLASS
return superClass.getOverSize();
// NO OVERSIZE
return 0;
}
public OClass setOverSize(final float overSize) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s oversize %f", name, overSize);
getDatabase().command(new OCommandSQL(cmd)).execute();
setOverSizeInternal(overSize);
return this;
}
public void setOverSizeInternal(final float overSize) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
this.overSize = overSize;
}
public float getOverSizeInternal() {
return overSize;
}
public boolean isAbstract() {
return abstractClass;
}
public OClass setAbstract(boolean iAbstract) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s abstract %s", name, iAbstract);
getDatabase().command(new OCommandSQL(cmd)).execute();
setAbstractInternal(iAbstract);
return this;
}
public void setAbstractInternal(final boolean iAbstract) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
if (iAbstract) {
// SWITCH TO ABSTRACT
if (defaultClusterId > -1) {
// CHECK
final ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.get();
if (count() > 0)
throw new IllegalStateException("Cannot set the class as abstract because contains records.");
if (name.toLowerCase().equals(db.getClusterNameById(defaultClusterId))) {
// DROP THE DEFAULT CLUSTER CALLED WITH THE SAME NAME ONLY IF EMPTY
if (ODatabaseRecordThreadLocal.INSTANCE.get().getClusterRecordSizeById(defaultClusterId) == 0)
ODatabaseRecordThreadLocal.INSTANCE.get().dropCluster(defaultClusterId, true);
}
}
} else {
// SWITCH TO NOT ABSTRACT
this.defaultClusterId = getDatabase().getDefaultClusterId();
this.clusterIds[0] = this.defaultClusterId;
}
this.abstractClass = iAbstract;
}
public boolean isStrictMode() {
return strictMode;
}
public OClass setStrictMode(final boolean iStrict) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
final String cmd = String.format("alter class %s strictmode %s", name, iStrict);
getDatabase().command(new OCommandSQL(cmd)).execute();
setStrictModeInternal(iStrict);
return this;
}
public void setStrictModeInternal(final boolean iStrict) {
getDatabase().checkSecurity(ODatabaseSecurityResources.SCHEMA, ORole.PERMISSION_UPDATE);
this.strictMode = iStrict;
}
@Override
public String toString() {
return name;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((owner == null) ? 0 : owner.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final OClassImpl other = (OClassImpl) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
return true;
}
public int compareTo(final OClass o) {
return name.compareTo(o.getName());
}
public long count() {
return count(true);
}
public long count(final boolean iPolymorphic) {
if (iPolymorphic)
return getDatabase().countClusterElements(readableClusters(getDatabase(), polymorphicClusterIds));
return getDatabase().countClusterElements(readableClusters(getDatabase(), clusterIds));
}
public static int[] readableClusters(final ODatabaseRecord iDatabase, final int[] iClusterIds) {
List<Integer> listOfReadableIds = new ArrayList<Integer>();
boolean all = true;
for (int clusterId : iClusterIds) {
try {
String clusterName = iDatabase.getClusterNameById(clusterId);
iDatabase.checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, clusterName);
listOfReadableIds.add(clusterId);
} catch (OSecurityAccessException securityException) {
all = false;
// if the cluster is inaccessible it's simply not processed in the list.add
}
}
if (all)
// JUST RETURN INPUT ARRAY (FASTER)
return iClusterIds;
int[] readableClusterIds = new int[listOfReadableIds.size()];
int index = 0;
for (int clusterId : listOfReadableIds) {
readableClusterIds[index++] = clusterId;
}
return readableClusterIds;
}
private ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
/**
* Truncates all the clusters the class uses.
*
* @throws IOException
*/
public void truncate() throws IOException {
getDatabase().checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_UPDATE);
if (isSubClassOf(OSecurityShared.RESTRICTED_CLASSNAME))
throw new OSecurityException("Class " + getName()
+ " cannot be truncated because has record level security enabled (extends " + OSecurityShared.RESTRICTED_CLASSNAME + ")");
getDatabase().getStorage().callInLock(new Callable<Object>() {
public Object call() throws Exception {
for (int id : clusterIds) {
final OStorage storage = getDatabase().getStorage();
storage.getClusterById(id).truncate();
storage.getLevel2Cache().freeCluster(id);
}
for (OIndex<?> index : getClassIndexes()) {
index.clear();
}
return null;
}
}, true);
}
/**
* Returns true if the current instance extends the passed schema class (iClass).
*
* @param iClassName
* @return
* @see #isSuperClassOf(OClass)
*/
public boolean isSubClassOf(final String iClassName) {
if (iClassName == null)
return false;
if (iClassName.equals(name) || iClassName.equals(shortName))
// SPEEDUP CHECK IF CLASS NAME ARE THE SAME
return true;
if (superClass == null)
return false;
return isSubClassOf(owner.getClass(iClassName));
}
/**
* Returns true if the current instance extends the passed schema class (iClass).
*
* @param iClass
* @return
* @see #isSuperClassOf(OClass)
*/
public boolean isSubClassOf(final OClass iClass) {
if (iClass == null)
return false;
OClass cls = this;
while (cls != null) {
if (cls.equals(iClass))
return true;
cls = cls.getSuperClass();
}
return false;
}
/**
* Returns true if the passed schema class (iClass) extends the current instance.
*
* @param iClass
* @return Returns true if the passed schema class extends the current instance
* @see #isSubClassOf(OClass)
*/
public boolean isSuperClassOf(final OClass iClass) {
if (iClass == null)
return false;
return iClass.isSubClassOf(this);
}
public Object get(final ATTRIBUTES iAttribute) {
if (iAttribute == null)
throw new IllegalArgumentException("attribute is null");
switch (iAttribute) {
case NAME:
return getName();
case SHORTNAME:
return getShortName();
case SUPERCLASS:
return getSuperClass();
case OVERSIZE:
return getOverSize();
case STRICTMODE:
return isStrictMode();
case ABSTRACT:
return isAbstract();
case CUSTOM:
return getCustomInternal();
}
throw new IllegalArgumentException("Cannot find attribute '" + iAttribute + "'");
}
public void setInternalAndSave(final ATTRIBUTES attribute, final Object iValue) {
if (attribute == null)
throw new IllegalArgumentException("attribute is null");
final String stringValue = iValue != null ? iValue.toString() : null;
final boolean isNull = stringValue == null || stringValue.equalsIgnoreCase("NULL");
switch (attribute) {
case NAME:
setNameInternal(stringValue);
break;
case SHORTNAME:
setShortNameInternal(isNull ? null : stringValue);
break;
case SUPERCLASS:
setSuperClassInternal(isNull ? null : getDatabase().getMetadata().getSchema().getClass(stringValue));
break;
case OVERSIZE:
setOverSizeInternal(Float.parseFloat(stringValue.replace(',', '.')));
break;
case STRICTMODE:
setStrictModeInternal(Boolean.parseBoolean(stringValue));
break;
case ABSTRACT:
setAbstractInternal(Boolean.parseBoolean(stringValue));
break;
case ADDCLUSTER: {
int clId = getClusterId(stringValue);
if (clId == -1)
throw new IllegalArgumentException("Cluster id '" + stringValue + "' cannot be added");
addClusterIdInternal(clId);
break;
}
case REMOVECLUSTER: {
int clId = getClusterId(stringValue);
if (clId == -1)
throw new IllegalArgumentException("Cluster id '" + stringValue + "' cannot be removed");
removeClusterIdInternal(clId);
break;
}
case CUSTOM:
if (iValue.toString().indexOf("=") == -1) {
if (iValue.toString().equalsIgnoreCase("clear")) {
clearCustomInternal();
} else
throw new IllegalArgumentException("Syntax error: expected <name> = <value> or clear, instead found: " + iValue);
} else {
final List<String> words = OStringSerializerHelper.smartSplit(iValue.toString(), '=');
setCustomInternal(words.get(0).trim(), words.get(1).trim());
}
break;
}
saveInternal();
}
protected int getClusterId(final String stringValue) {
int clId;
try {
clId = Integer.parseInt(stringValue);
} catch (NumberFormatException e) {
clId = getDatabase().getClusterIdByName(stringValue);
}
return clId;
}
public OClass set(final ATTRIBUTES attribute, final Object iValue) {
if (attribute == null)
throw new IllegalArgumentException("attribute is null");
final String stringValue = iValue != null ? iValue.toString() : null;
switch (attribute) {
case NAME:
setName(stringValue);
break;
case SHORTNAME:
setShortName(stringValue);
break;
case SUPERCLASS:
setSuperClass(getDatabase().getMetadata().getSchema().getClass(stringValue));
break;
case OVERSIZE:
setOverSize(Float.parseFloat(stringValue));
break;
case STRICTMODE:
setStrictMode(Boolean.parseBoolean(stringValue));
break;
case ABSTRACT:
setAbstract(Boolean.parseBoolean(stringValue));
break;
case ADDCLUSTER: {
int clId = getClusterId(stringValue);
if (clId == -1)
throw new IllegalArgumentException("Cluster id '" + stringValue + "' cannot be added");
addClusterId(clId);
break;
}
case REMOVECLUSTER:
int clId = getClusterId(stringValue);
if (clId == -1)
throw new IllegalArgumentException("Cluster id '" + stringValue + "' cannot be added");
removeClusterId(clId);
break;
case CUSTOM:
if (iValue.toString().indexOf("=") == -1) {
if (iValue.toString().equalsIgnoreCase("clear")) {
clearCustom();
} else
throw new IllegalArgumentException("Syntax error: expected <name> = <value> or clear, instead found: " + iValue);
} else {
final List<String> words = OStringSerializerHelper.smartSplit(iValue.toString(), '=');
setCustom(words.get(0).trim(), words.get(1).trim());
}
break;
}
return this;
}
/**
* Add different cluster id to the "polymorphic cluster ids" array.
*/
private void addPolymorphicClusterIds(final OClassImpl iBaseClass) {
boolean found;
for (int i : iBaseClass.polymorphicClusterIds) {
found = false;
for (int k : polymorphicClusterIds) {
if (i == k) {
found = true;
break;
}
}
if (!found) {
// ADD IT
polymorphicClusterIds = OArrays.copyOf(polymorphicClusterIds, polymorphicClusterIds.length + 1);
polymorphicClusterIds[polymorphicClusterIds.length - 1] = i;
Arrays.sort(polymorphicClusterIds);
}
}
}
public OPropertyImpl addPropertyInternal(final String iName, final OType iType, final OType iLinkedType, final OClass iLinkedClass) {
if (iName == null || iName.length() == 0)
throw new OSchemaException("Found property name null");
final Character wrongCharacter = OSchemaShared.checkNameIfValid(iName);
if (wrongCharacter != null)
throw new OSchemaException("Invalid property name found. Character '" + wrongCharacter + "' cannot be used in property name.");
final String lowerName = iName.toLowerCase();
if (properties.containsKey(lowerName))
throw new OSchemaException("Class " + name + " already has property '" + iName + "'");
final OPropertyImpl prop = new OPropertyImpl(this, iName, iType);
properties.put(lowerName, prop);
if (iLinkedType != null)
prop.setLinkedTypeInternal(iLinkedType);
else if (iLinkedClass != null)
prop.setLinkedClassInternal(iLinkedClass);
return prop;
}
public void saveInternal() {
owner.saveInternal();
}
public OIndex<?> createIndex(final String iName, final INDEX_TYPE iType, final String... fields) {
return createIndex(iName, iType.name(), fields);
}
public OIndex<?> createIndex(final String iName, final String iType, final String... fields) {
return createIndex(iName, iType, null, fields);
}
public OIndex<?> createIndex(final String iName, final INDEX_TYPE iType, final OProgressListener iProgressListener,
final String... fields) {
return createIndex(iName, iType.name(), iProgressListener, fields);
}
public OIndex<?> createIndex(final String iName, String iType, final OProgressListener iProgressListener, final String... fields) {
if (iType == null)
throw new IllegalArgumentException("Index type is null");
iType = iType.toUpperCase();
try {
final INDEX_TYPE recognizedIdxType = INDEX_TYPE.valueOf(iType);
if (!recognizedIdxType.isAutomaticIndexable())
throw new IllegalArgumentException("Index type '" + iType + "' cannot be used as automatic index against properties");
} catch (IllegalArgumentException e) {
// IGNORE IT
}
if (fields.length == 0) {
throw new OIndexException("List of fields to index cannot be empty.");
}
final Set<String> existingFieldNames = new HashSet<String>();
OClassImpl currentClass = this;
do {
existingFieldNames.addAll(currentClass.properties.keySet());
currentClass = (OClassImpl) currentClass.getSuperClass();
} while (currentClass != null);
for (final String fieldToIndex : fields) {
final String fieldName = OIndexDefinitionFactory.extractFieldName(fieldToIndex);
if (!existingFieldNames.contains(fieldName.toLowerCase()))
throw new OIndexException("Index with name : '" + iName + "' cannot be created on class : '" + name + "' because field: '"
+ fieldName + "' is absent in class definition.");
}
final OIndexDefinition indexDefinition = OIndexDefinitionFactory.createIndexDefinition(this, Arrays.asList(fields),
extractFieldTypes(fields));
if (fields.length == 1) {
// TRY TO DETERMINE THE COLLATE IF ANY
final OProperty p = getProperty(fields[0]);
if (p != null) {
indexDefinition.setCollate(p.getCollate());
}
}
return getDatabase().getMetadata().getIndexManager()
.createIndex(iName, iType, indexDefinition, polymorphicClusterIds, iProgressListener);
}
private List<OType> extractFieldTypes(String[] fieldNames) {
final List<OType> types = new ArrayList<OType>(fieldNames.length);
for (String fieldName : fieldNames) {
types.add(getProperty(OIndexDefinitionFactory.extractFieldName(fieldName).toLowerCase()).getType());
}
return types;
}
public boolean areIndexed(final String... fields) {
return areIndexed(Arrays.asList(fields));
}
public boolean areIndexed(final Collection<String> fields) {
final OIndexManager indexManager = getDatabase().getMetadata().getIndexManager();
final boolean currentClassResult = indexManager.areIndexed(name, fields);
if (superClass != null)
return currentClassResult || superClass.areIndexed(fields);
return currentClassResult;
}
public Set<OIndex<?>> getInvolvedIndexes(final String... fields) {
return getInvolvedIndexes(Arrays.asList(fields));
}
public Set<OIndex<?>> getInvolvedIndexes(final Collection<String> fields) {
final Set<OIndex<?>> result = new HashSet<OIndex<?>>(getClassInvolvedIndexes(fields));
if (superClass != null)
result.addAll(superClass.getInvolvedIndexes(fields));
return result;
}
public Set<OIndex<?>> getClassInvolvedIndexes(final Collection<String> fields) {
final OIndexManager indexManager = getDatabase().getMetadata().getIndexManager();
return indexManager.getClassInvolvedIndexes(name, fields);
}
public Set<OIndex<?>> getClassInvolvedIndexes(final String... fields) {
return getClassInvolvedIndexes(Arrays.asList(fields));
}
public OIndex<?> getClassIndex(final String iName) {
final OIndexManager indexManager = getDatabase().getMetadata().getIndexManager();
return indexManager.getClassIndex(name, iName);
}
public Set<OIndex<?>> getClassIndexes() {
final OIndexManager indexManager = getDatabase().getMetadata().getIndexManager();
return indexManager.getClassIndexes(name);
}
public Set<OIndex<?>> getIndexes() {
final Set<OIndex<?>> indexes = getClassIndexes();
if (superClass == null)
return indexes;
final Set<OIndex<?>> result = new HashSet<OIndex<?>>(indexes);
result.addAll(superClass.getIndexes());
return result;
}
private void setPolymorphicClusterIds(final int[] iClusterIds) {
polymorphicClusterIds = iClusterIds;
Arrays.sort(polymorphicClusterIds);
}
private OClass setClusterIds(final int[] iClusterIds) {
clusterIds = iClusterIds;
Arrays.sort(clusterIds);
return this;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_metadata_schema_OClassImpl.java |
957 | public abstract class ClusterInfoRequest<T extends ClusterInfoRequest> extends MasterNodeReadOperationRequest<T> {
private String[] indices = Strings.EMPTY_ARRAY;
private String[] types = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.strict();
@SuppressWarnings("unchecked")
public T indices(String... indices) {
this.indices = indices;
return (T) this;
}
@SuppressWarnings("unchecked")
public T types(String... types) {
this.types = types;
return (T) this;
}
@SuppressWarnings("unchecked")
public T indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return (T) this;
}
public String[] indices() {
return indices;
}
public String[] types() {
return types;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
types = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readLocal(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
out.writeStringArray(types);
indicesOptions.writeIndicesOptions(out);
writeLocal(out);
}
} | 0true
| src_main_java_org_elasticsearch_action_support_master_info_ClusterInfoRequest.java |
1,622 | @BindingAnnotation
@Target({FIELD, PARAMETER})
@Retention(RUNTIME)
@Documented
public @interface ClusterDynamicSettings {
} | 0true
| src_main_java_org_elasticsearch_cluster_settings_ClusterDynamicSettings.java |
1,316 | class ExecutionCallbackAdapterFactory {
//Updates the ExecutionCallbackAdapterFactory.done field. An AtomicBoolean is simpler, but creates another unwanted
//object. Using this approach, you don't create that object.
private static final AtomicReferenceFieldUpdater<ExecutionCallbackAdapterFactory, Boolean> DONE_FIELD_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(ExecutionCallbackAdapterFactory.class, Boolean.class, "done");
private final MultiExecutionCallback multiExecutionCallback;
private final ConcurrentMap<Member, ValueWrapper> responses;
private final Collection<Member> members;
private final ILogger logger;
@SuppressWarnings("CanBeFinal")
private volatile Boolean done = Boolean.FALSE;
ExecutionCallbackAdapterFactory(NodeEngine nodeEngine, Collection<Member> members,
MultiExecutionCallback multiExecutionCallback) {
this.multiExecutionCallback = multiExecutionCallback;
this.responses = new ConcurrentHashMap<Member, ValueWrapper>(members.size());
this.members = new HashSet<Member>(members);
this.logger = nodeEngine.getLogger(ExecutionCallbackAdapterFactory.class);
}
private void onResponse(Member member, Object response) {
assertNotDone();
assertIsMember(member);
placeResponse(member, response);
triggerOnResponse(member, response);
triggerOnComplete();
}
private void triggerOnComplete() {
if (members.size() == responses.size() && setDone()) {
Map<Member, Object> realResponses = new HashMap<Member, Object>(members.size());
for (Map.Entry<Member, ValueWrapper> entry : responses.entrySet()) {
Member key = entry.getKey();
Object value = entry.getValue().value;
realResponses.put(key, value);
}
multiExecutionCallback.onComplete(realResponses);
}
}
private boolean setDone() {
return DONE_FIELD_UPDATER.compareAndSet(this, Boolean.FALSE, Boolean.TRUE);
}
private void triggerOnResponse(Member member, Object response) {
try {
multiExecutionCallback.onResponse(member, response);
} catch (Throwable e) {
logger.warning(e.getMessage(), e);
}
}
private void placeResponse(Member member, Object response) {
ValueWrapper current = responses.put(member, new ValueWrapper(response));
if (current != null) {
logger.warning("Replacing current callback value[" + current.value
+ " with value[" + response + "].");
}
}
private void assertIsMember(Member member) {
if (!members.contains(member)) {
throw new IllegalArgumentException(member + " is not known by this callback!");
}
}
private void assertNotDone() {
if (done) {
throw new IllegalStateException("This callback is invalid!");
}
}
<V> ExecutionCallback<V> callbackFor(Member member) {
return new InnerExecutionCallback<V>(member);
}
private static final class ValueWrapper {
final Object value;
private ValueWrapper(Object value) {
this.value = value;
}
}
private final class InnerExecutionCallback<V> implements ExecutionCallback<V> {
private final Member member;
private InnerExecutionCallback(Member member) {
this.member = member;
}
@Override
public void onResponse(V response) {
ExecutionCallbackAdapterFactory.this.onResponse(member, response);
}
@Override
public void onFailure(Throwable t) {
ExecutionCallbackAdapterFactory.this.onResponse(member, t);
}
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_executor_ExecutionCallbackAdapterFactory.java |
186 | @RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ClientSetTest {
static final String name = "test";
static HazelcastInstance hz;
static HazelcastInstance server;
static ISet set;
@BeforeClass
public static void init(){
Config config = new Config();
server = Hazelcast.newHazelcastInstance(config);
hz = HazelcastClient.newHazelcastClient(null);
set = hz.getSet(name);
}
@AfterClass
public static void destroy() {
hz.shutdown();
Hazelcast.shutdownAll();
}
@Before
@After
public void clear() throws IOException {
set.clear();
}
@Test
public void testAddAll() {
List l = new ArrayList();
l.add("item1");
l.add("item2");
assertTrue(set.addAll(l));
assertEquals(2, set.size());
assertFalse(set.addAll(l));
assertEquals(2, set.size());
}
@Test
public void testAddRemove() {
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertEquals(3, set.size());
assertFalse(set.add("item3"));
assertEquals(3, set.size());
assertFalse(set.remove("item4"));
assertTrue(set.remove("item3"));
}
@Test
public void testIterator(){
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertTrue(set.add("item4"));
Iterator iter = set.iterator();
assertTrue(((String)iter.next()).startsWith("item"));
assertTrue(((String)iter.next()).startsWith("item"));
assertTrue(((String)iter.next()).startsWith("item"));
assertTrue(((String)iter.next()).startsWith("item"));
assertFalse(iter.hasNext());
}
@Test
public void testContains(){
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertTrue(set.add("item4"));
assertFalse(set.contains("item5"));
assertTrue(set.contains("item2"));
List l = new ArrayList();
l.add("item6");
l.add("item3");
assertFalse(set.containsAll(l));
assertTrue(set.add("item6"));
assertTrue(set.containsAll(l));
}
@Test
public void removeRetainAll(){
assertTrue(set.add("item1"));
assertTrue(set.add("item2"));
assertTrue(set.add("item3"));
assertTrue(set.add("item4"));
List l = new ArrayList();
l.add("item4");
l.add("item3");
assertTrue(set.removeAll(l));
assertEquals(2, set.size());
assertFalse(set.removeAll(l));
assertEquals(2, set.size());
l.clear();
l.add("item1");
l.add("item2");
assertFalse(set.retainAll(l));
assertEquals(2, set.size());
l.clear();
assertTrue(set.retainAll(l));
assertEquals(0, set.size());
}
@Test
public void testListener() throws Exception {
// final ISet tempSet = server.getSet(name);
final ISet tempSet = set;
final CountDownLatch latch = new CountDownLatch(6);
ItemListener listener = new ItemListener() {
public void itemAdded(ItemEvent itemEvent) {
latch.countDown();
}
public void itemRemoved(ItemEvent item) {
}
};
String registrationId = tempSet.addItemListener(listener, true);
new Thread(){
public void run() {
for (int i=0; i<5; i++){
tempSet.add("item" + i);
}
tempSet.add("done");
}
}.start();
assertTrue(latch.await(20, TimeUnit.SECONDS));
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_collections_ClientSetTest.java |
147 | public abstract class InvocationClientRequest extends ClientRequest {
@Override
final void process() throws Exception {
invoke();
}
protected abstract void invoke();
protected final InvocationBuilder createInvocationBuilder(String serviceName, Operation op, int partitionId) {
return clientEngine.createInvocationBuilder(serviceName, op, partitionId);
}
protected final InvocationBuilder createInvocationBuilder(String serviceName, Operation op, Address target) {
return clientEngine.createInvocationBuilder(serviceName, op, target);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_InvocationClientRequest.java |
5 | @Component("blChildCategoriesCustomPersistenceHandler")
public class ChildCategoriesCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
return (!ArrayUtils.isEmpty(persistencePackage.getCustomCriteria()) && persistencePackage.getCustomCriteria()[0].equals("blcAllParentCategories"));
}
@Override
public Entity add(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
AdornedTargetList adornedTargetList = (AdornedTargetList) persistencePackage.getPersistencePerspective().getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.ADORNEDTARGETLIST);
String targetPath = adornedTargetList.getTargetObjectPath() + "." + adornedTargetList.getTargetIdProperty();
String linkedPath = adornedTargetList.getLinkedObjectPath() + "." + adornedTargetList.getLinkedIdProperty();
Long parentId = Long.parseLong(persistencePackage.getEntity().findProperty(linkedPath).getValue());
Long childId = Long.parseLong(persistencePackage.getEntity().findProperty(targetPath).getValue());
Category parent = (Category) dynamicEntityDao.retrieve(CategoryImpl.class, parentId);
Category child = (Category) dynamicEntityDao.retrieve(CategoryImpl.class, childId);
CategoryXref categoryXref = new CategoryXrefImpl();
categoryXref.setSubCategory(child);
categoryXref.setCategory(parent);
if (parent.getAllChildCategoryXrefs().contains(categoryXref)) {
throw new ServiceException("Add unsuccessful. Cannot add a duplicate child category.");
}
checkParents(child, parent);
return helper.getCompatibleModule(OperationType.ADORNEDTARGETLIST).add(persistencePackage);
}
protected void checkParents(Category child, Category parent) throws ServiceException {
if (child.getId().equals(parent.getId())) {
throw new ServiceException("Add unsuccessful. Cannot add a category to itself.");
}
for (CategoryXref category : parent.getAllParentCategoryXrefs()) {
if (!CollectionUtils.isEmpty(category.getCategory().getAllParentCategoryXrefs())) {
checkParents(child, category.getCategory());
}
}
}
} | 0true
| admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_handler_ChildCategoriesCustomPersistenceHandler.java |
380 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_LOCALE")
@Cache(usage= CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blCMSElements")
@AdminPresentationClass(friendlyName = "LocaleImpl_baseLocale")
public class LocaleImpl implements Locale {
private static final long serialVersionUID = 1L;
@Id
@Column (name = "LOCALE_CODE")
@AdminPresentation(friendlyName = "LocaleImpl_Locale_Code", order = 1,
group = "LocaleImpl_Details",
prominent = true, gridOrder = 2)
protected String localeCode;
@Column (name = "FRIENDLY_NAME")
@AdminPresentation(friendlyName = "LocaleImpl_Name", order = 2,
group = "LocaleImpl_Details",
prominent = true, gridOrder = 1)
protected String friendlyName;
@Column (name = "DEFAULT_FLAG")
@AdminPresentation(friendlyName = "LocaleImpl_Is_Default", order = 3,
group = "LocaleImpl_Details",
prominent = true, gridOrder = 3)
protected Boolean defaultFlag = false;
@ManyToOne(targetEntity = BroadleafCurrencyImpl.class)
@JoinColumn(name = "CURRENCY_CODE")
@AdminPresentation(friendlyName = "LocaleImpl_Currency", order = 4,
group = "LocaleImpl_Details",
prominent = true)
protected BroadleafCurrency defaultCurrency;
@Column (name = "USE_IN_SEARCH_INDEX")
@AdminPresentation(friendlyName = "LocaleImpl_Use_In_Search_Index", order = 5,
group = "LocaleImpl_Details",
prominent = true, gridOrder = 3)
protected Boolean useInSearchIndex = false;
@Override
public String getLocaleCode() {
return localeCode;
}
@Override
public void setLocaleCode(String localeCode) {
this.localeCode = localeCode;
}
@Override
public String getFriendlyName() {
return friendlyName;
}
@Override
public void setFriendlyName(String friendlyName) {
this.friendlyName = friendlyName;
}
@Override
public void setDefaultFlag(Boolean defaultFlag) {
this.defaultFlag = defaultFlag;
}
@Override
public Boolean getDefaultFlag() {
if (defaultFlag == null) {
return Boolean.FALSE;
} else {
return defaultFlag;
}
}
@Override
public BroadleafCurrency getDefaultCurrency() {
return defaultCurrency;
}
@Override
public void setDefaultCurrency(BroadleafCurrency defaultCurrency) {
this.defaultCurrency = defaultCurrency;
}
@Override
public Boolean getUseInSearchIndex() {
return useInSearchIndex == null ? false : useInSearchIndex;
}
@Override
public void setUseInSearchIndex(Boolean useInSearchIndex) {
this.useInSearchIndex = useInSearchIndex;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Locale)) {
return false;
}
LocaleImpl locale = (LocaleImpl) o;
if (localeCode != null ? !localeCode.equals(locale.localeCode) : locale.localeCode != null) {
return false;
}
if (friendlyName != null ? !friendlyName.equals(locale.friendlyName) : locale.friendlyName != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = localeCode != null ? localeCode.hashCode() : 0;
result = 31 * result + (friendlyName != null ? friendlyName.hashCode() : 0);
return result;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_locale_domain_LocaleImpl.java |
545 | public enum TYPE {
ANY, BEFORE_CREATE, BEFORE_READ, BEFORE_UPDATE, BEFORE_DELETE, AFTER_CREATE, AFTER_READ, AFTER_UPDATE, AFTER_DELETE, CREATE_FAILED, READ_FAILED, UPDATE_FAILED, DELETE_FAILED, CREATE_REPLICATED, READ_REPLICATED, UPDATE_REPLICATED, DELETE_REPLICATED, BEFORE_REPLICA_ADD, AFTER_REPLICA_ADD, BEFORE_REPLICA_UPDATE, AFTER_REPLICA_UPDATE, BEFORE_REPLICA_DELETE, AFTER_REPLICA_DELETE, REPLICA_ADD_FAILED, REPLICA_UPDATE_FAILED, REPLICA_DELETE_FAILED
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_hook_ORecordHook.java |
216 | private class RefactorQuickAccessAction extends QuickMenuAction {
public RefactorQuickAccessAction() {
super(REFACTOR_MENU_ID);
}
protected void fillMenu(IMenuManager menu) {
IContributionItem[] cis = new RefactorMenuItems().getContributionItems();
for (IContributionItem ci: cis) {
menu.add(ci);
}
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_CeylonEditor.java |
163 | public interface URLHandlerDao {
public URLHandler findURLHandlerByURI(String uri);
/**
* Gets all the URL handlers configured in the system
* @return
*/
public List<URLHandler> findAllURLHandlers();
public URLHandler saveURLHandler(URLHandler handler);
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_url_dao_URLHandlerDao.java |
412 | @Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD})
public @interface AdminPresentationAdornedTargetCollection {
/**
* <p>Optional - field name will be used if not specified</p>
*
* <p>The friendly name to present to a user for this field in a GUI. If supporting i18N,
* the friendly name may be a key to retrieve a localized friendly name using
* the GWT support for i18N.</p>
*
* @return the friendly name
*/
String friendlyName() default "";
/**
* <p>Optional - only required if you wish to apply security to this field</p>
*
* <p>If a security level is specified, it is registered with the SecurityManager.
* The SecurityManager checks the permission of the current user to
* determine if this field should be disabled based on the specified level.</p>
*
* @return the security level
*/
String securityLevel() default "";
/**
* <p>Optional - fields are not excluded by default</p>
*
* <p>Specify if this field should be excluded from inclusion in the
* admin presentation layer</p>
*
* @return whether or not the field should be excluded
*/
boolean excluded() default false;
/**
* <p>Optional - propertyName , only required if you want hide the field based on this property's value</p>
*
* <p>If the property is defined and found to be set to false, in the AppConfiguraionService, then this field will be excluded in the
* admin presentation layer</p>
*
* @return name of the property
*/
String showIfProperty() default "";
/**
* Optional - only required if you want to make the field immutable
*
* Explicityly specify whether or not this field is mutable.
*
* @return whether or not this field is read only
*/
boolean readOnly() default false;
/**
* <p>Optional - only required if you want to make the field ignore caching</p>
*
* <p>Explicitly specify whether or not this field will use server-side
* caching during inspection</p>
*
* @return whether or not this field uses caching
*/
boolean useServerSideInspectionCache() default true;
/**
* <p>Optional - only required in the absence of a "mappedBy" property
* on the JPA annotation</p>
*
* <p>This is the field in the adorned target entity that refers
* back to the parent entity</p>
*
* @return the field that refers back to the parent entity
*/
String parentObjectProperty() default "";
/**
* <p>Optional - only required if the primary key property of the
* parent entity is called something other than "id"</p>
*
* <p>This is the field in the parent entity that represents
* its primary key</p>
*
* @return primary key field of the parent entity
*/
String parentObjectIdProperty() default "id";
/**
* <p>This is the field in the adorned target entity that refers
* to the target entity</p>
*
* @return target entity field of the adorned target
*/
String targetObjectProperty() default "";
/**
* <p>Optional - only required if the adorned target has fields
* (other than the sort property) that should be populated
* by the user</p>
*
* <p>List of fields to include in the add/update form
* for the adorned target entity.</p>
*
* @return user populated fields on the adorned target
*/
String[] maintainedAdornedTargetFields() default {};
/**
* <p>Optional - only required when it is desirable to override
* the property prominence settings from the adorned target and the
* target object</p>
*
* <p>List of fields visible in the adorned target grid UI in the
* admin tool. Fields are referenced relative to the adorned target
* entity, or the target entity. For example, in CrossSaleProductImpl,
* to show the product name and promotionMesssage fields, the
* gridVisibleFields value would be : {"defaultSku.name", "promotionMessage"}</p>
*
*
* @return List of fields visible in the adorned target grid UI in the admin tool
*/
String[] gridVisibleFields() default {};
/**
* <p>Optional - only required if the primary key property of the
* target entity is called something other than "id"</p>
*
* <p>This is the field in the target entity that represents
* its primary key</p>
*
* <p>Note that this should just be the property name, not the path to the property.
* For example, if the target object is CountryImpl, then the value for the
* targetObjectIdProperty should just be "abbreviation".
*
* @return primary key field of the target entity
*/
String targetObjectIdProperty() default "id";
/**
* <p>Optional - only required if there is an entity that is responsible
* for modeling the join table for this adorned collection.</p>
*
* <p>For example, consider the scenario that a product has many possible
* parent categories. Also consider that you might want to sort the parent
* categories in a specific way. The join entity in this case would hold a
* link to both a category and a product as well as a sequence field. This
* property provides the ability to specify that mapping.</p>
*
* @return the join entity class (if any)
*/
String joinEntityClass() default "";
/**
* <p>Optional - only required if the adorned target has
* a field used for sorting</p>
*
* <p>This is the field by which the adorned targets are sorted</p>
*
* @return the sort field in the adorned target entity
*/
String sortProperty() default "";
/**
* <p>Optional - only required if the sort order should be
* descending</p>
*
* <p>This is the sort direction for the adorned targets</p>
*
* @return the sort direction
*/
boolean sortAscending() default true;
/**
* <p>Optional - only required if the system should not query
* the user for the adorned property values.</p>
*
* <p>Defines whether or not the system should prompt the user
* for the adorned property values (if any) after searching
* for the target entity. This is an advanced feature and is
* rarely used.</p>
*
* @return whether to ignore the adorned properties
*/
boolean ignoreAdornedProperties() default false;
/**
* <p>Optional - only required if you want to specify ordering for this field</p>
*
* <p>The order in which this field will appear in a GUI relative to other collections from the same class</p>
*
* @return the display order
*/
int order() default 99999;
/**
* Optional - only required if you want the field to appear under a different tab
*
* Specify a GUI tab for this field
*
* @return the tab for this field
*/
String tab() default "General";
/**
* Optional - only required if you want to order the appearance of the tabs in the UI
*
* Specify an order for this tab. Tabs will be sorted int he resulting form in
* ascending order based on this parameter.
*
* The default tab will render with an order of 100.
*
* @return the order for this tab
*/
int tabOrder() default 100;
/**
* <p>Optional - only required if you need to specially handle crud operations for this
* specific collection on the server</p>
*
* <p>Custom string values that will be passed to the server during CRUB operations on this
* collection. These criteria values can be detected in a custom persistence handler
* (@CustomPersistenceHandler) in order to engage special handling through custom server
* side code for this collection.</p>
*
* @return the custom string array to pass to the server during CRUD operations
*/
String[] customCriteria() default {};
/**
* <p>Optional - only required if a special operation type is required for a CRUD operation. This
* setting is not normally changed and is an advanced setting</p>
*
* <p>The operation type for a CRUD operation</p>
*
* @return the operation type
*/
AdminPresentationOperationTypes operationTypes() default @AdminPresentationOperationTypes(addType = OperationType.ADORNEDTARGETLIST, fetchType = OperationType.ADORNEDTARGETLIST, inspectType = OperationType.BASIC, removeType = OperationType.ADORNEDTARGETLIST, updateType = OperationType.ADORNEDTARGETLIST);
/**
* Optional - If you have FieldType set to SupportedFieldType.MONEY, *
* then you can specify a money currency property field.
*
*
* @return the currency property field
*/
String currencyCodeField() default "";
} | 0true
| common_src_main_java_org_broadleafcommerce_common_presentation_AdminPresentationAdornedTargetCollection.java |
2,174 | final class Item {
public final DocIdSetIterator iter;
public int doc;
public Item(DocIdSetIterator iter) {
this.iter = iter;
this.doc = -1;
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_docset_OrDocIdSet.java |
3,617 | public static class ValueAndBoost {
private final String value;
private final float boost;
public ValueAndBoost(String value, float boost) {
this.value = value;
this.boost = boost;
}
/**
* Value of string field.
* @return value of string field
*/
public String value() {
return value;
}
/**
* Boost either parsed from the document or defaulted.
* @return boost either parsed from the document or defaulted
*/
public float boost() {
return boost;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_StringFieldMapper.java |
1,389 | @XmlRootElement(name = "category")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class CategoryWrapper extends BaseWrapper implements APIWrapper<Category> {
@XmlElement
protected Long id;
@XmlElement
protected String name;
@XmlElement
protected String description;
@XmlElement
protected Boolean active;
@XmlElement
protected String url;
@XmlElement
protected String urlKey;
@XmlElement
@XmlJavaTypeAdapter(ISO8601DateAdapter.class)
protected Date activeStartDate;
@XmlElement
@XmlJavaTypeAdapter(ISO8601DateAdapter.class)
protected Date activeEndDate;
@XmlElement(name = "category")
@XmlElementWrapper(name = "subcategories")
protected List<CategoryWrapper> subcategories;
@XmlElement(name = "product")
@XmlElementWrapper(name = "products")
protected List<ProductWrapper> products;
@XmlElement(name = "categoryAttribute")
@XmlElementWrapper(name = "categoryAttributes")
protected List<CategoryAttributeWrapper> categoryAttributes;
@Override
public void wrapDetails(Category category, HttpServletRequest request) {
this.id = category.getId();
this.name = category.getName();
this.description = category.getDescription();
this.active = category.isActive();
this.activeStartDate = category.getActiveStartDate();
this.activeEndDate = category.getActiveEndDate();
this.url = category.getUrl();
this.urlKey = category.getUrlKey();
if (category.getCategoryAttributes() != null && !category.getCategoryAttributes().isEmpty()) {
categoryAttributes = new ArrayList<CategoryAttributeWrapper>();
for (CategoryAttribute attribute : category.getCategoryAttributes()) {
CategoryAttributeWrapper wrapper = (CategoryAttributeWrapper) context.getBean(CategoryAttributeWrapper.class.getName());
wrapper.wrapSummary(attribute, request);
categoryAttributes.add(wrapper);
}
}
Integer productLimit = (Integer) request.getAttribute("productLimit");
Integer productOffset = (Integer) request.getAttribute("productOffset");
Integer subcategoryLimit = (Integer) request.getAttribute("subcategoryLimit");
Integer subcategoryOffset = (Integer) request.getAttribute("subcategoryOffset");
if (productLimit != null && productOffset == null) {
productOffset = 1;
}
if (subcategoryLimit != null && subcategoryOffset == null) {
subcategoryOffset = 1;
}
if (productLimit != null && productOffset != null) {
SearchService searchService = (SearchService) context.getBean("blSearchService");
ProductSearchCriteria searchCriteria = new ProductSearchCriteria();
searchCriteria.setPage(productOffset);
searchCriteria.setPageSize(productLimit);
searchCriteria.setFilterCriteria(new HashMap<String, String[]>());
try {
ProductSearchResult result = searchService.findExplicitProductsByCategory(category, searchCriteria);
List<Product> productList = result.getProducts();
if (productList != null && !productList.isEmpty()) {
if (products == null) {
products = new ArrayList<ProductWrapper>();
}
for (Product p : productList) {
ProductWrapper productSummaryWrapper = (ProductWrapper) context.getBean(ProductWrapper.class.getName());
productSummaryWrapper.wrapSummary(p, request);
products.add(productSummaryWrapper);
}
}
} catch (ServiceException e) {
throw new WebApplicationException(Response.status(Response.Status.INTERNAL_SERVER_ERROR)
.type(MediaType.TEXT_PLAIN).entity("An unexpected error occured " + e.getMessage()).build());
}
}
if (subcategoryLimit != null && subcategoryOffset != null) {
subcategories = buildSubcategoryTree(subcategories, category, request);
}
}
@Override
public void wrapSummary(Category category, HttpServletRequest request) {
this.id = category.getId();
this.name = category.getName();
this.description = category.getDescription();
this.active = category.isActive();
}
protected List<CategoryWrapper> buildSubcategoryTree(List<CategoryWrapper> wrappers, Category root, HttpServletRequest request) {
CatalogService catalogService = (CatalogService) context.getBean("blCatalogService");
Integer subcategoryLimit = (Integer) request.getAttribute("subcategoryLimit");
Integer subcategoryOffset = (Integer) request.getAttribute("subcategoryOffset");
List<Category> subcategories = catalogService.findActiveSubCategoriesByCategory(root, subcategoryLimit, subcategoryOffset);
if (subcategories !=null && !subcategories.isEmpty() && wrappers == null) {
wrappers = new ArrayList<CategoryWrapper>();
}
for (Category c : subcategories) {
CategoryWrapper subcategoryWrapper = (CategoryWrapper) context.getBean(CategoryWrapper.class.getName());
subcategoryWrapper.wrapSummary(c, request);
wrappers.add(subcategoryWrapper);
}
return wrappers;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_CategoryWrapper.java |
3,404 | public class UidAndSourceFieldsVisitor extends FieldsVisitor {
@Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
return Status.YES;
} else if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
return Status.YES;
}
return uid != null && source != null ? Status.STOP : Status.NO;
}
} | 0true
| src_main_java_org_elasticsearch_index_fieldvisitor_UidAndSourceFieldsVisitor.java |
4,474 | public class RecoverySettings extends AbstractComponent {
public static final String INDICES_RECOVERY_FILE_CHUNK_SIZE = "indices.recovery.file_chunk_size";
public static final String INDICES_RECOVERY_TRANSLOG_OPS = "indices.recovery.translog_ops";
public static final String INDICES_RECOVERY_TRANSLOG_SIZE = "indices.recovery.translog_size";
public static final String INDICES_RECOVERY_COMPRESS = "indices.recovery.compress";
public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams";
public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams";
public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec";
public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb").bytes();
/**
* Use {@link #INDICES_RECOVERY_MAX_BYTES_PER_SEC} instead
*/
@Deprecated
public static final String INDICES_RECOVERY_MAX_SIZE_PER_SEC = "indices.recovery.max_size_per_sec";
private volatile ByteSizeValue fileChunkSize;
private volatile boolean compress;
private volatile int translogOps;
private volatile ByteSizeValue translogSize;
private volatile int concurrentStreams;
private volatile int concurrentSmallFileStreams;
private final ThreadPoolExecutor concurrentStreamPool;
private final ThreadPoolExecutor concurrentSmallFileStreamPool;
private volatile ByteSizeValue maxBytesPerSec;
private volatile SimpleRateLimiter rateLimiter;
@Inject
public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
this.fileChunkSize = componentSettings.getAsBytesSize("file_chunk_size", settings.getAsBytesSize("index.shard.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB)));
this.translogOps = componentSettings.getAsInt("translog_ops", settings.getAsInt("index.shard.recovery.translog_ops", 1000));
this.translogSize = componentSettings.getAsBytesSize("translog_size", settings.getAsBytesSize("index.shard.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB)));
this.compress = componentSettings.getAsBoolean("compress", true);
this.concurrentStreams = componentSettings.getAsInt("concurrent_streams", settings.getAsInt("index.shard.recovery.concurrent_streams", 3));
this.concurrentStreamPool = EsExecutors.newScaling(0, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[recovery_stream]"));
this.concurrentSmallFileStreams = componentSettings.getAsInt("concurrent_small_file_streams", settings.getAsInt("index.shard.recovery.concurrent_small_file_streams", 2));
this.concurrentSmallFileStreamPool = EsExecutors.newScaling(0, concurrentSmallFileStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]"));
this.maxBytesPerSec = componentSettings.getAsBytesSize("max_bytes_per_sec", componentSettings.getAsBytesSize("max_size_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB)));
if (maxBytesPerSec.bytes() <= 0) {
rateLimiter = null;
} else {
rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
}
logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]",
maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress);
nodeSettingsService.addListener(new ApplySettings());
}
public void close() {
concurrentStreamPool.shutdown();
try {
concurrentStreamPool.awaitTermination(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// that's fine...
}
concurrentStreamPool.shutdownNow();
}
public ByteSizeValue fileChunkSize() {
return fileChunkSize;
}
public boolean compress() {
return compress;
}
public int translogOps() {
return translogOps;
}
public ByteSizeValue translogSize() {
return translogSize;
}
public int concurrentStreams() {
return concurrentStreams;
}
public ThreadPoolExecutor concurrentStreamPool() {
return concurrentStreamPool;
}
public ThreadPoolExecutor concurrentSmallFileStreamPool() {
return concurrentSmallFileStreamPool;
}
public RateLimiter rateLimiter() {
return rateLimiter;
}
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, settings.getAsBytesSize(INDICES_RECOVERY_MAX_SIZE_PER_SEC, RecoverySettings.this.maxBytesPerSec));
if (!Objects.equal(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) {
logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec);
RecoverySettings.this.maxBytesPerSec = maxSizePerSec;
if (maxSizePerSec.bytes() <= 0) {
rateLimiter = null;
} else if (rateLimiter != null) {
rateLimiter.setMbPerSec(maxSizePerSec.mbFrac());
} else {
rateLimiter = new SimpleRateLimiter(maxSizePerSec.mbFrac());
}
}
ByteSizeValue fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.this.fileChunkSize);
if (!fileChunkSize.equals(RecoverySettings.this.fileChunkSize)) {
logger.info("updating [indices.recovery.file_chunk_size] from [{}] to [{}]", RecoverySettings.this.fileChunkSize, fileChunkSize);
RecoverySettings.this.fileChunkSize = fileChunkSize;
}
int translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, RecoverySettings.this.translogOps);
if (translogOps != RecoverySettings.this.translogOps) {
logger.info("updating [indices.recovery.translog_ops] from [{}] to [{}]", RecoverySettings.this.translogOps, translogOps);
RecoverySettings.this.translogOps = translogOps;
}
ByteSizeValue translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.this.translogSize);
if (!translogSize.equals(RecoverySettings.this.translogSize)) {
logger.info("updating [indices.recovery.translog_size] from [{}] to [{}]", RecoverySettings.this.translogSize, translogSize);
RecoverySettings.this.translogSize = translogSize;
}
boolean compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, RecoverySettings.this.compress);
if (compress != RecoverySettings.this.compress) {
logger.info("updating [indices.recovery.compress] from [{}] to [{}]", RecoverySettings.this.compress, compress);
RecoverySettings.this.compress = compress;
}
int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams);
if (concurrentStreams != RecoverySettings.this.concurrentStreams) {
logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams);
RecoverySettings.this.concurrentStreams = concurrentStreams;
RecoverySettings.this.concurrentStreamPool.setMaximumPoolSize(concurrentStreams);
}
int concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RecoverySettings.this.concurrentSmallFileStreams);
if (concurrentSmallFileStreams != RecoverySettings.this.concurrentSmallFileStreams) {
logger.info("updating [indices.recovery.concurrent_small_file_streams] from [{}] to [{}]", RecoverySettings.this.concurrentSmallFileStreams, concurrentSmallFileStreams);
RecoverySettings.this.concurrentSmallFileStreams = concurrentSmallFileStreams;
RecoverySettings.this.concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams);
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_indices_recovery_RecoverySettings.java |
858 | public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAction {
@Inject
public TransportSearchDfsQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
super(settings, threadPool, clusterService, searchService, searchPhaseController);
}
@Override
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
new AsyncAction(searchRequest, listener).start();
}
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
super(request, listener);
queryFetchResults = new AtomicArray<QueryFetchSearchResult>(firstResults.length());
}
@Override
protected String firstPhaseName() {
return "dfs";
}
@Override
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<DfsSearchResult> listener) {
searchService.sendExecuteDfs(node, request, listener);
}
@Override
protected void moveToSecondPhase() {
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
int localOperations = 0;
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
DfsSearchResult dfsResult = entry.value;
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
localOperations++;
} else {
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
}
if (localOperations > 0) {
if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
DfsSearchResult dfsResult = entry.value;
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
}
}
});
} else {
boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
final DfsSearchResult dfsResult = entry.value;
final DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
final QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
try {
if (localAsync) {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
});
} else {
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
} catch (Throwable t) {
onSecondPhaseFailure(t, querySearchRequest, entry.index, dfsResult, counter);
}
}
}
}
}
}
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
searchService.sendExecuteFetch(node, querySearchRequest, new SearchServiceListener<QueryFetchSearchResult>() {
@Override
public void onResult(QueryFetchSearchResult result) {
result.shardTarget(dfsResult.shardTarget());
queryFetchResults.set(shardIndex, result);
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Throwable t) {
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
}
});
}
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
successulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
void finishHim() {
try {
innerFinishHim();
} catch (Throwable e) {
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", e, buildShardFailures());
if (logger.isDebugEnabled()) {
logger.debug("failed to reduce search", failure);
}
listener.onFailure(failure);
} finally {
//
}
}
void innerFinishHim() throws Exception {
sortedShardList = searchPhaseController.sortDocs(queryFetchResults);
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults);
String scrollId = null;
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
}
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
} | 0true
| src_main_java_org_elasticsearch_action_search_type_TransportSearchDfsQueryAndFetchAction.java |
727 | public class ShardDeleteRequest extends ShardReplicationOperationRequest<ShardDeleteRequest> {
private int shardId;
private String type;
private String id;
private boolean refresh = false;
private long version;
ShardDeleteRequest(IndexDeleteRequest request, int shardId) {
super(request);
this.index = request.index();
this.shardId = shardId;
this.type = request.type();
this.id = request.id();
replicationType(request.replicationType());
consistencyLevel(request.consistencyLevel());
timeout = request.timeout();
this.refresh = request.refresh();
this.version = request.version();
}
ShardDeleteRequest() {
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (type == null) {
addValidationError("type is missing", validationException);
}
if (id == null) {
addValidationError("id is missing", validationException);
}
return validationException;
}
public int shardId() {
return this.shardId;
}
public String type() {
return this.type;
}
public String id() {
return this.id;
}
public boolean refresh() {
return this.refresh;
}
public void version(long version) {
this.version = version;
}
public long version() {
return this.version;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = in.readVInt();
type = in.readString();
id = in.readString();
refresh = in.readBoolean();
version = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(shardId);
out.writeString(type);
out.writeString(id);
out.writeBoolean(refresh);
out.writeLong(version);
}
} | 0true
| src_main_java_org_elasticsearch_action_delete_index_ShardDeleteRequest.java |
816 | public class AlterOperation extends AbstractAlterOperation {
public AlterOperation() {
}
public AlterOperation(String name, IFunction<Long, Long> function) {
super(name, function);
}
@Override
public int getId() {
return AtomicLongDataSerializerHook.ALTER;
}
@Override
public void run() throws Exception {
LongWrapper reference = getNumber();
long input = reference.get();
long output = function.apply(input);
shouldBackup = input != output;
if (shouldBackup) {
backup = output;
reference.set(backup);
}
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_operations_AlterOperation.java |
2,045 | public class ContainsValueOperation extends AbstractMapOperation implements PartitionAwareOperation {
private boolean contains = false;
private Data testValue;
public ContainsValueOperation(String name, Data testValue) {
super(name);
this.testValue = testValue;
}
public ContainsValueOperation() {
}
public void run() {
MapService mapService = (MapService) getService();
RecordStore recordStore = mapService.getRecordStore(getPartitionId(), name);
contains = recordStore.containsValue(testValue);
if (mapContainer.getMapConfig().isStatisticsEnabled()) {
((MapService) getService()).getLocalMapStatsImpl(name).incrementOtherOperations();
}
}
@Override
public Object getResponse() {
return contains;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
IOUtil.writeNullableData(out, testValue);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
testValue = IOUtil.readNullableData(in);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_map_operation_ContainsValueOperation.java |
3,757 | final NumericDocValues versionValues = new NumericDocValues() {
@Override
public long get(int index) {
return versions.get(index);
}
}; | 0true
| src_main_java_org_elasticsearch_index_merge_policy_IndexUpgraderMergePolicy.java |
360 | public static class ExceptionThrowingMapper
implements Mapper<Integer, Integer, String, Integer> {
@Override
public void map(Integer key, Integer value, Context<String, Integer> context) {
throw new NullPointerException("BUMM!");
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java |
270 | public class ElasticsearchNullPointerException extends ElasticsearchException {
public ElasticsearchNullPointerException() {
super(null);
}
public ElasticsearchNullPointerException(String msg) {
super(msg);
}
public ElasticsearchNullPointerException(String msg, Throwable cause) {
super(msg, cause);
}
} | 0true
| src_main_java_org_elasticsearch_ElasticsearchNullPointerException.java |
1,766 | public class GeoUtils {
/** Earth ellipsoid major axis defined by WGS 84 in meters */
public static final double EARTH_SEMI_MAJOR_AXIS = 6378137.0; // meters (WGS 84)
/** Earth ellipsoid minor axis defined by WGS 84 in meters */
public static final double EARTH_SEMI_MINOR_AXIS = 6356752.314245; // meters (WGS 84)
/** Earth mean radius defined by WGS 84 in meters */
public static final double EARTH_MEAN_RADIUS = 6371008.7714D; // meters (WGS 84)
/** Earth axis ratio defined by WGS 84 (0.996647189335) */
public static final double EARTH_AXIS_RATIO = EARTH_SEMI_MINOR_AXIS / EARTH_SEMI_MAJOR_AXIS;
/** Earth ellipsoid equator length in meters */
public static final double EARTH_EQUATOR = 2*Math.PI * EARTH_SEMI_MAJOR_AXIS;
/** Earth ellipsoid polar distance in meters */
public static final double EARTH_POLAR_DISTANCE = Math.PI * EARTH_SEMI_MINOR_AXIS;
/**
* Calculate the width (in meters) of geohash cells at a specific level
* @param level geohash level must be greater or equal to zero
* @return the width of cells at level in meters
*/
public static double geoHashCellWidth(int level) {
assert level>=0;
// Geohash cells are split into 32 cells at each level. the grid
// alternates at each level between a 8x4 and a 4x8 grid
return EARTH_EQUATOR / (1L<<((((level+1)/2)*3) + ((level/2)*2)));
}
/**
* Calculate the width (in meters) of quadtree cells at a specific level
* @param level quadtree level must be greater or equal to zero
* @return the width of cells at level in meters
*/
public static double quadTreeCellWidth(int level) {
assert level >=0;
return EARTH_EQUATOR / (1L<<level);
}
/**
* Calculate the height (in meters) of geohash cells at a specific level
* @param level geohash level must be greater or equal to zero
* @return the height of cells at level in meters
*/
public static double geoHashCellHeight(int level) {
assert level>=0;
// Geohash cells are split into 32 cells at each level. the grid
// alternates at each level between a 8x4 and a 4x8 grid
return EARTH_POLAR_DISTANCE / (1L<<((((level+1)/2)*2) + ((level/2)*3)));
}
/**
* Calculate the height (in meters) of quadtree cells at a specific level
* @param level quadtree level must be greater or equal to zero
* @return the height of cells at level in meters
*/
public static double quadTreeCellHeight(int level) {
assert level>=0;
return EARTH_POLAR_DISTANCE / (1L<<level);
}
/**
* Calculate the size (in meters) of geohash cells at a specific level
* @param level geohash level must be greater or equal to zero
* @return the size of cells at level in meters
*/
public static double geoHashCellSize(int level) {
assert level>=0;
final double w = geoHashCellWidth(level);
final double h = geoHashCellHeight(level);
return Math.sqrt(w*w + h*h);
}
/**
* Calculate the size (in meters) of quadtree cells at a specific level
* @param level quadtree level must be greater or equal to zero
* @return the size of cells at level in meters
*/
public static double quadTreeCellSize(int level) {
assert level>=0;
return Math.sqrt(EARTH_POLAR_DISTANCE*EARTH_POLAR_DISTANCE + EARTH_EQUATOR*EARTH_EQUATOR) / (1L<<level);
}
/**
* Calculate the number of levels needed for a specific precision. Quadtree
* cells will not exceed the specified size (diagonal) of the precision.
* @param meters Maximum size of cells in meters (must greater than zero)
* @return levels need to achieve precision
*/
public static int quadTreeLevelsForPrecision(double meters) {
assert meters >= 0;
if(meters == 0) {
return QuadPrefixTree.MAX_LEVELS_POSSIBLE;
} else {
final double ratio = 1+(EARTH_POLAR_DISTANCE / EARTH_EQUATOR); // cell ratio
final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width
final long part = Math.round(Math.ceil(EARTH_EQUATOR / width));
final int level = Long.SIZE - Long.numberOfLeadingZeros(part)-1; // (log_2)
return (part<=(1l<<level)) ?level :(level+1); // adjust level
}
}
/**
* Calculate the number of levels needed for a specific precision. QuadTree
* cells will not exceed the specified size (diagonal) of the precision.
* @param distance Maximum size of cells as unit string (must greater or equal to zero)
* @return levels need to achieve precision
*/
public static int quadTreeLevelsForPrecision(String distance) {
return quadTreeLevelsForPrecision(DistanceUnit.METERS.parse(distance, DistanceUnit.DEFAULT));
}
/**
* Calculate the number of levels needed for a specific precision. GeoHash
* cells will not exceed the specified size (diagonal) of the precision.
* @param meters Maximum size of cells in meters (must greater or equal to zero)
* @return levels need to achieve precision
*/
public static int geoHashLevelsForPrecision(double meters) {
assert meters >= 0;
if(meters == 0) {
return GeohashPrefixTree.getMaxLevelsPossible();
} else {
final double ratio = 1+(EARTH_POLAR_DISTANCE / EARTH_EQUATOR); // cell ratio
final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width
final double part = Math.ceil(EARTH_EQUATOR / width);
if(part == 1)
return 1;
final int bits = (int)Math.round(Math.ceil(Math.log(part) / Math.log(2)));
final int full = bits / 5; // number of 5 bit subdivisions
final int left = bits - full*5; // bit representing the last level
final int even = full + (left>0?1:0); // number of even levels
final int odd = full + (left>3?1:0); // number of odd levels
return even+odd;
}
}
/**
* Calculate the number of levels needed for a specific precision. GeoHash
* cells will not exceed the specified size (diagonal) of the precision.
* @param distance Maximum size of cells as unit string (must greater or equal to zero)
* @return levels need to achieve precision
*/
public static int geoHashLevelsForPrecision(String distance) {
return geoHashLevelsForPrecision(DistanceUnit.METERS.parse(distance, DistanceUnit.DEFAULT));
}
/**
* Normalize longitude to lie within the -180 (exclusive) to 180 (inclusive) range.
*
* @param lon Longitude to normalize
* @return The normalized longitude.
*/
public static double normalizeLon(double lon) {
return centeredModulus(lon, 360);
}
/**
* Normalize latitude to lie within the -90 to 90 (both inclusive) range.
* <p/>
* Note: You should not normalize longitude and latitude separately,
* because when normalizing latitude it may be necessary to
* add a shift of 180° in the longitude.
* For this purpose, you should call the
* {@link #normalizePoint(GeoPoint)} function.
*
* @param lat Latitude to normalize
* @return The normalized latitude.
* @see #normalizePoint(GeoPoint)
*/
public static double normalizeLat(double lat) {
lat = centeredModulus(lat, 360);
if (lat < -90) {
lat = -180 - lat;
} else if (lat > 90) {
lat = 180 - lat;
}
return lat;
}
/**
* Normalize the geo {@code Point} for its coordinates to lie within their
* respective normalized ranges.
* <p/>
* Note: A shift of 180° is applied in the longitude if necessary,
* in order to normalize properly the latitude.
*
* @param point The point to normalize in-place.
*/
public static void normalizePoint(GeoPoint point) {
normalizePoint(point, true, true);
}
/**
* Normalize the geo {@code Point} for the given coordinates to lie within
* their respective normalized ranges.
* <p/>
* You can control which coordinate gets normalized with the two flags.
* <p/>
* Note: A shift of 180° is applied in the longitude if necessary,
* in order to normalize properly the latitude.
* If normalizing latitude but not longitude, it is assumed that
* the longitude is in the form x+k*360, with x in ]-180;180],
* and k is meaningful to the application.
* Therefore x will be adjusted while keeping k preserved.
*
* @param point The point to normalize in-place.
* @param normLat Whether to normalize latitude or leave it as is.
* @param normLon Whether to normalize longitude.
*/
public static void normalizePoint(GeoPoint point, boolean normLat, boolean normLon) {
double lat = point.lat();
double lon = point.lon();
normLat = normLat && (lat>90 || lat <= -90);
normLon = normLon && (lon>180 || lon <= -180);
if (normLat) {
lat = centeredModulus(lat, 360);
boolean shift = true;
if (lat < -90) {
lat = -180 - lat;
} else if (lat > 90) {
lat = 180 - lat;
} else {
// No need to shift the longitude, and the latitude is normalized
shift = false;
}
if (shift) {
if (normLon) {
lon += 180;
} else {
// Longitude won't be normalized,
// keep it in the form x+k*360 (with x in ]-180;180])
// by only changing x, assuming k is meaningful for the user application.
lon += normalizeLon(lon) > 0 ? -180 : 180;
}
}
}
if (normLon) {
lon = centeredModulus(lon, 360);
}
point.reset(lat, lon);
}
private static double centeredModulus(double dividend, double divisor) {
double rtn = dividend % divisor;
if (rtn <= 0) {
rtn += divisor;
}
if (rtn > divisor / 2) {
rtn -= divisor;
}
return rtn;
}
} | 1no label
| src_main_java_org_elasticsearch_common_geo_GeoUtils.java |
44 | public class TouchCommand extends AbstractTextCommand {
String key;
int expiration;
boolean noreply;
ByteBuffer response;
public TouchCommand(TextCommandType type, String key, int expiration, boolean noReply) {
super(type);
this.key = key;
this.expiration = expiration;
this.noreply = noReply;
}
public boolean writeTo(ByteBuffer destination) {
if (response == null) {
response = ByteBuffer.wrap(STORED);
}
while (destination.hasRemaining() && response.hasRemaining()) {
destination.put(response.get());
}
return !response.hasRemaining();
}
public boolean readFrom(ByteBuffer source) {
return true;
}
public boolean shouldReply() {
return !noreply;
}
public String getKey() {
return key;
}
public int getExpiration() {
return expiration;
}
public void setResponse(byte[] value) {
this.response = ByteBuffer.wrap(value);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_ascii_memcache_TouchCommand.java |
3,095 | static class Refresh {
private final String source;
private boolean force = false;
public Refresh(String source) {
this.source = source;
}
/**
* Forces calling refresh, overriding the check that dirty operations even happened. Defaults
* to true (note, still lightweight if no refresh is needed).
*/
public Refresh force(boolean force) {
this.force = force;
return this;
}
public boolean force() {
return this.force;
}
public String source() {
return this.source;
}
@Override
public String toString() {
return "force[" + force + "], source [" + source + "]";
}
} | 0true
| src_main_java_org_elasticsearch_index_engine_Engine.java |
3,086 | static class Get {
private final boolean realtime;
private final Term uid;
private boolean loadSource = true;
private long version;
private VersionType versionType;
public Get(boolean realtime, Term uid) {
this.realtime = realtime;
this.uid = uid;
}
public boolean realtime() {
return this.realtime;
}
public Term uid() {
return uid;
}
public boolean loadSource() {
return this.loadSource;
}
public Get loadSource(boolean loadSource) {
this.loadSource = loadSource;
return this;
}
public long version() {
return version;
}
public Get version(long version) {
this.version = version;
return this;
}
public VersionType versionType() {
return versionType;
}
public Get versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
} | 0true
| src_main_java_org_elasticsearch_index_engine_Engine.java |
2,474 | static class Job extends PrioritizedRunnable {
private final int result;
private final List<Integer> results;
private final CountDownLatch latch;
Job(int result, Priority priority, List<Integer> results, CountDownLatch latch) {
super(priority);
this.result = result;
this.results = results;
this.latch = latch;
}
@Override
public void run() {
results.add(result);
latch.countDown();
}
} | 0true
| src_test_java_org_elasticsearch_common_util_concurrent_PrioritizedExecutorsTests.java |
15 | exe.submit(new Runnable() {
private final int number = atomicInt.incrementAndGet();
@Override
public void run() {
try {
Thread.sleep(150);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println(number);
}
}); | 0true
| titan-test_src_main_java_com_thinkaurelius_titan_TestBed.java |
1,558 | @XmlRootElement(name = "entry")
@XmlType(propOrder = { "value", "name" })
public class OServerEntryConfiguration {
@XmlAttribute
public String name;
@XmlAttribute
public String value;
public OServerEntryConfiguration() {
}
public OServerEntryConfiguration(final String iName, final String iValue) {
name = iName;
value = iValue;
}
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_config_OServerEntryConfiguration.java |
171 | private class TxManagerDataSourceRegistrationListener implements DataSourceRegistrationListener
{
@Override
public void registeredDataSource( XaDataSource ds )
{
branches.put( new RecoveredBranchInfo( ds.getBranchId() ), true );
boolean everythingRegistered = true;
for ( boolean dsRegistered : branches.values() )
{
everythingRegistered &= dsRegistered;
}
if ( everythingRegistered )
{
doRecovery();
}
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
branches.put( new RecoveredBranchInfo( ds.getBranchId() ), false );
boolean everythingUnregistered = true;
for ( boolean dsRegistered : branches.values() )
{
everythingUnregistered &= !dsRegistered;
}
if ( everythingUnregistered )
{
closeLog();
}
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_TxManager.java |
26 | public class CacheLayer implements StoreReadLayer
{
private static final Function<? super SchemaRule, IndexDescriptor> TO_INDEX_RULE =
new Function<SchemaRule, IndexDescriptor>()
{
@Override
public IndexDescriptor apply( SchemaRule from )
{
IndexRule rule = (IndexRule) from;
// We know that we only have int range of property key ids.
return new IndexDescriptor( rule.getLabel(), rule.getPropertyKey() );
}
};
private final CacheLoader<Iterator<DefinedProperty>> nodePropertyLoader = new CacheLoader<Iterator<DefinedProperty>>()
{
@Override
public Iterator<DefinedProperty> load( long id ) throws EntityNotFoundException
{
return diskLayer.nodeGetAllProperties( id );
}
};
private final CacheLoader<Iterator<DefinedProperty>> relationshipPropertyLoader = new CacheLoader<Iterator<DefinedProperty>>()
{
@Override
public Iterator<DefinedProperty> load( long id ) throws EntityNotFoundException
{
return diskLayer.relationshipGetAllProperties( id );
}
};
private final CacheLoader<Iterator<DefinedProperty>> graphPropertyLoader = new CacheLoader<Iterator<DefinedProperty>>()
{
@Override
public Iterator<DefinedProperty> load( long id ) throws EntityNotFoundException
{
return diskLayer.graphGetAllProperties();
}
};
private final CacheLoader<int[]> nodeLabelLoader = new CacheLoader<int[]>()
{
@Override
public int[] load( long id ) throws EntityNotFoundException
{
return primitiveIntIteratorToIntArray( diskLayer.nodeGetLabels( id ) );
}
};
private final PersistenceCache persistenceCache;
private final SchemaCache schemaCache;
private final DiskLayer diskLayer;
private final IndexingService indexingService;
public CacheLayer(
DiskLayer diskLayer,
PersistenceCache persistenceCache,
IndexingService indexingService,
SchemaCache schemaCache )
{
this.diskLayer = diskLayer;
this.persistenceCache = persistenceCache;
this.indexingService = indexingService;
this.schemaCache = schemaCache;
}
@Override
public boolean nodeHasLabel( KernelStatement state, long nodeId, int labelId ) throws EntityNotFoundException
{
return persistenceCache.nodeHasLabel( state, nodeId, labelId, nodeLabelLoader );
}
@Override
public PrimitiveIntIterator nodeGetLabels( KernelStatement state, long nodeId ) throws EntityNotFoundException
{
return new PrimitiveIntIteratorForArray( persistenceCache.nodeGetLabels( state, nodeId, nodeLabelLoader ) );
}
@Override
public Iterator<IndexDescriptor> indexesGetForLabel( KernelStatement state, int labelId )
{
return toIndexDescriptors( schemaCache.schemaRulesForLabel( labelId ), SchemaRule.Kind.INDEX_RULE );
}
@Override
public Iterator<IndexDescriptor> indexesGetAll( KernelStatement state )
{
return toIndexDescriptors( schemaCache.schemaRules(), SchemaRule.Kind.INDEX_RULE );
}
@Override
public Iterator<IndexDescriptor> uniqueIndexesGetForLabel( KernelStatement state, int labelId )
{
return toIndexDescriptors( schemaCache.schemaRulesForLabel( labelId ),
SchemaRule.Kind.CONSTRAINT_INDEX_RULE );
}
@Override
public Iterator<IndexDescriptor> uniqueIndexesGetAll( KernelStatement state )
{
return toIndexDescriptors( schemaCache.schemaRules(), SchemaRule.Kind.CONSTRAINT_INDEX_RULE );
}
private static Iterator<IndexDescriptor> toIndexDescriptors( Iterable<SchemaRule> rules,
final SchemaRule.Kind kind )
{
Iterator<SchemaRule> filteredRules = filter( new Predicate<SchemaRule>()
{
@Override
public boolean accept( SchemaRule item )
{
return item.getKind() == kind;
}
}, rules.iterator() );
return map( TO_INDEX_RULE, filteredRules );
}
@Override
public Long indexGetOwningUniquenessConstraintId( KernelStatement state, IndexDescriptor index )
throws SchemaRuleNotFoundException
{
IndexRule rule = indexRule( index, SchemaStorage.IndexRuleKind.ALL );
if ( rule != null )
{
return rule.getOwningConstraint();
}
return diskLayer.indexGetOwningUniquenessConstraintId( index );
}
@Override
public long indexGetCommittedId( KernelStatement state, IndexDescriptor index, SchemaStorage.IndexRuleKind kind )
throws SchemaRuleNotFoundException
{
IndexRule rule = indexRule( index, kind );
if ( rule != null )
{
return rule.getId();
}
return diskLayer.indexGetCommittedId( index );
}
@Override
public IndexRule indexRule( IndexDescriptor index, SchemaStorage.IndexRuleKind kind )
{
for ( SchemaRule rule : schemaCache.schemaRulesForLabel( index.getLabelId() ) )
{
if ( rule instanceof IndexRule )
{
IndexRule indexRule = (IndexRule) rule;
if ( kind.isOfKind( indexRule ) && indexRule.getPropertyKey() == index.getPropertyKeyId() )
{
return indexRule;
}
}
}
return null;
}
@Override
public PrimitiveLongIterator nodeGetPropertyKeys( KernelStatement state, long nodeId ) throws EntityNotFoundException
{
return persistenceCache.nodeGetPropertyKeys( nodeId, nodePropertyLoader );
}
@Override
public Property nodeGetProperty( KernelStatement state, long nodeId, int propertyKeyId ) throws EntityNotFoundException
{
return persistenceCache.nodeGetProperty( nodeId, propertyKeyId, nodePropertyLoader );
}
@Override
public Iterator<DefinedProperty> nodeGetAllProperties( KernelStatement state, long nodeId ) throws EntityNotFoundException
{
return persistenceCache.nodeGetProperties( nodeId, nodePropertyLoader );
}
@Override
public PrimitiveLongIterator relationshipGetPropertyKeys( KernelStatement state, long relationshipId )
throws EntityNotFoundException
{
return new PropertyKeyIdIterator( relationshipGetAllProperties( state, relationshipId ) );
}
@Override
public Property relationshipGetProperty( KernelStatement state, long relationshipId, int propertyKeyId )
throws EntityNotFoundException
{
return persistenceCache.relationshipGetProperty( relationshipId, propertyKeyId,
relationshipPropertyLoader );
}
@Override
public Iterator<DefinedProperty> relationshipGetAllProperties( KernelStatement state, long nodeId )
throws EntityNotFoundException
{
return persistenceCache.relationshipGetProperties( nodeId, relationshipPropertyLoader );
}
@Override
public PrimitiveLongIterator graphGetPropertyKeys( KernelStatement state )
{
return persistenceCache.graphGetPropertyKeys( graphPropertyLoader );
}
@Override
public Property graphGetProperty( KernelStatement state, int propertyKeyId )
{
return persistenceCache.graphGetProperty( graphPropertyLoader, propertyKeyId );
}
@Override
public Iterator<DefinedProperty> graphGetAllProperties( KernelStatement state )
{
return persistenceCache.graphGetProperties( graphPropertyLoader );
}
@Override
public Iterator<UniquenessConstraint> constraintsGetForLabelAndPropertyKey(
KernelStatement state, int labelId, int propertyKeyId )
{
return schemaCache.constraintsForLabelAndProperty( labelId, propertyKeyId );
}
@Override
public Iterator<UniquenessConstraint> constraintsGetForLabel( KernelStatement state, int labelId )
{
return schemaCache.constraintsForLabel( labelId );
}
@Override
public Iterator<UniquenessConstraint> constraintsGetAll( KernelStatement state )
{
return schemaCache.constraints();
}
@Override
public PrimitiveLongIterator nodeGetUniqueFromIndexLookup(
KernelStatement state,
IndexDescriptor index,
Object value )
throws IndexNotFoundKernelException, IndexBrokenKernelException
{
return diskLayer.nodeGetUniqueFromIndexLookup( state, schemaCache.indexId( index ), value );
}
@Override
public PrimitiveLongIterator nodesGetForLabel( KernelStatement state, int labelId )
{
return diskLayer.nodesGetForLabel( state, labelId );
}
@Override
public PrimitiveLongIterator nodesGetFromIndexLookup( KernelStatement state, IndexDescriptor index, Object value )
throws IndexNotFoundKernelException
{
return diskLayer.nodesGetFromIndexLookup( state, schemaCache.indexId( index ), value );
}
@Override
public IndexDescriptor indexesGetForLabelAndPropertyKey( KernelStatement state, int labelId, int propertyKey )
throws SchemaRuleNotFoundException
{
return schemaCache.indexDescriptor( labelId, propertyKey );
}
@Override
public InternalIndexState indexGetState( KernelStatement state, IndexDescriptor descriptor )
throws IndexNotFoundKernelException
{
return indexingService.getProxyForRule( schemaCache.indexId( descriptor ) ).getState();
}
@Override
public String indexGetFailure( Statement state, IndexDescriptor descriptor ) throws IndexNotFoundKernelException
{
return diskLayer.indexGetFailure( descriptor );
}
@Override
public int labelGetForName( String labelName )
{
return diskLayer.labelGetForName( labelName );
}
@Override
public String labelGetName( int labelId ) throws LabelNotFoundKernelException
{
return diskLayer.labelGetName( labelId );
}
@Override
public int propertyKeyGetForName( String propertyKeyName )
{
return diskLayer.propertyKeyGetForName( propertyKeyName );
}
@Override
public int propertyKeyGetOrCreateForName( String propertyKeyName )
{
return diskLayer.propertyKeyGetOrCreateForName( propertyKeyName );
}
@Override
public String propertyKeyGetName( int propertyKeyId ) throws PropertyKeyIdNotFoundKernelException
{
return diskLayer.propertyKeyGetName( propertyKeyId );
}
@Override
public Iterator<Token> propertyKeyGetAllTokens()
{
return diskLayer.propertyKeyGetAllTokens().iterator();
}
@Override
public Iterator<Token> labelsGetAllTokens()
{
return diskLayer.labelGetAllTokens().iterator();
}
@Override
public int relationshipTypeGetForName( String relationshipTypeName )
{
return diskLayer.relationshipTypeGetForName( relationshipTypeName );
}
@Override
public String relationshipTypeGetName( int relationshipTypeId ) throws RelationshipTypeIdNotFoundKernelException
{
return diskLayer.relationshipTypeGetName( relationshipTypeId );
}
@Override
public int labelGetOrCreateForName( String labelName ) throws TooManyLabelsException
{
return diskLayer.labelGetOrCreateForName( labelName );
}
@Override
public int relationshipTypeGetOrCreateForName( String relationshipTypeName )
{
return diskLayer.relationshipTypeGetOrCreateForName( relationshipTypeName );
}
} | 1no label
| community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheLayer.java |
1,181 | public interface PaymentResponseItem extends Serializable {
public String getAuthorizationCode();
public void setAuthorizationCode(String authorizationCode);
public String getMiddlewareResponseCode();
public void setMiddlewareResponseCode(String middlewareResponseCode);
public String getMiddlewareResponseText();
public void setMiddlewareResponseText(String middlewareResponseText);
public String getProcessorResponseCode();
public void setProcessorResponseCode(String processorResponseCode);
public String getProcessorResponseText();
public void setProcessorResponseText(String processorResponseText);
public String getReferenceNumber();
public void setReferenceNumber(String referenceNumber);
/**
* @deprecated
* @see #getTransactionAmount()
*/
public Money getAmountPaid();
/**
* @deprecated setTransactionAmount() instead.
* @see #setTransactionAmount(org.broadleafcommerce.common.money.Money)
*/
public void setAmountPaid(Money amount);
/**
* The amount that the system processed. For example, when submitting an order, this would be the order.getTotal.
* If refunding $10, this would be 10.
*
* @return
*/
public Money getTransactionAmount();
/**
* Sets the transaction amount.
*
* @param amount
*/
public void setTransactionAmount(Money amount);
public Boolean getTransactionSuccess();
public void setTransactionSuccess(Boolean transactionSuccess);
public Date getTransactionTimestamp();
public void setTransactionTimestamp(Date transactionTimestamp);
public String getImplementorResponseCode();
public void setImplementorResponseCode(String implementorResponseCode);
public String getImplementorResponseText();
public void setImplementorResponseText(String implementorResponseText);
public String getTransactionId();
public void setTransactionId(String transactionId);
public String getAvsCode();
public void setAvsCode(String avsCode);
public String getCvvCode();
public void setCvvCode(String cvvCode);
// TODO: Rename to getRemainingTransactionAmount
public Money getRemainingBalance();
public void setRemainingBalance(Money remainingBalance);
public TransactionType getTransactionType();
public void setTransactionType(TransactionType transactionType);
public Map<String, String> getAdditionalFields();
public void setAdditionalFields(Map<String, String> additionalFields);
public Long getPaymentInfoId();
public void setPaymentInfoId(Long paymentInfoId);
public String getUserName();
public void setUserName(String userName);
public Customer getCustomer();
public void setCustomer(Customer customer);
public String getPaymentInfoReferenceNumber();
public void setPaymentInfoReferenceNumber(String paymentInfoReferenceNumber);
public void setCurrency(BroadleafCurrency currency);
public BroadleafCurrency getCurrency();
public void setPaymentInfo(PaymentInfo paymentInfo);
public PaymentInfo getPaymentInfo();
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_domain_PaymentResponseItem.java |
1,438 | public static class Factory implements MetaData.Custom.Factory<RepositoriesMetaData> {
/**
* {@inheritDoc}
*/
@Override
public String type() {
return TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public RepositoriesMetaData readFrom(StreamInput in) throws IOException {
RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()];
for (int i = 0; i < repository.length; i++) {
repository[i] = RepositoryMetaData.readFrom(in);
}
return new RepositoriesMetaData(repository);
}
/**
* {@inheritDoc}
*/
@Override
public void writeTo(RepositoriesMetaData repositories, StreamOutput out) throws IOException {
out.writeVInt(repositories.repositories().size());
for (RepositoryMetaData repository : repositories.repositories()) {
repository.writeTo(out);
}
}
/**
* {@inheritDoc}
*/
@Override
public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token;
List<RepositoryMetaData> repository = new ArrayList<RepositoryMetaData>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String name = parser.currentName();
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object");
}
String type = null;
Settings settings = ImmutableSettings.EMPTY;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
if ("type".equals(currentFieldName)) {
if (parser.nextToken() != XContentParser.Token.VALUE_STRING) {
throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type");
}
type = parser.text();
} else if ("settings".equals(currentFieldName)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params");
}
settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build();
} else {
throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]");
}
} else {
throw new ElasticsearchParseException("failed to parse repository [" + name + "]");
}
}
if (type == null) {
throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type");
}
repository.add(new RepositoryMetaData(name, type, settings));
} else {
throw new ElasticsearchParseException("failed to parse repositories");
}
}
return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()]));
}
/**
* {@inheritDoc}
*/
@Override
public void toXContent(RepositoriesMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
for (RepositoryMetaData repository : customIndexMetaData.repositories()) {
toXContent(repository, builder, params);
}
}
/**
* Serializes information about a single repository
*
* @param repository repository metadata
* @param builder XContent builder
* @param params serialization parameters
* @throws IOException
*/
public void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("type", repository.type());
builder.startObject("settings");
for (Map.Entry<String, String> settingEntry : repository.settings().getAsMap().entrySet()) {
builder.field(settingEntry.getKey(), settingEntry.getValue());
}
builder.endObject();
builder.endObject();
}
/**
* {@inheritDoc}
*/
@Override
public boolean isPersistent() {
return true;
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_RepositoriesMetaData.java |
283 | public class OCommandScriptException extends OException {
private String text;
private int position;
private static final long serialVersionUID = -7430575036316163711L;
public OCommandScriptException(String iMessage) {
super(iMessage, null);
}
public OCommandScriptException(String iMessage, Throwable cause) {
super(iMessage, cause);
}
public OCommandScriptException(String iMessage, String iText, int iPosition, Throwable cause) {
super(iMessage, cause);
text = iText;
position = iPosition < 0 ? 0 : iPosition;
}
public OCommandScriptException(String iMessage, String iText, int iPosition) {
super(iMessage);
text = iText;
position = iPosition < 0 ? 0 : iPosition;
}
@Override
public String getMessage() {
if (text == null)
return super.getMessage();
final StringBuilder buffer = new StringBuilder();
buffer.append("Error on parsing script at position #");
buffer.append(position);
buffer.append(": " + super.getMessage());
buffer.append("\nScript: ");
buffer.append(text);
buffer.append("\n------");
for (int i = 0; i < position - 1; ++i)
buffer.append("-");
buffer.append("^");
return buffer.toString();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_script_OCommandScriptException.java |
50 | @Component("blTimeDTOCustomPersistenceHandler")
public class TimeDTOCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
private static final Log LOG = LogFactory.getLog(TimeDTOCustomPersistenceHandler.class);
@Override
public Boolean canHandleFetch(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleRemove(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
return canHandleInspect(persistencePackage);
}
@Override
public Boolean canHandleInspect(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
return TimeDTO.class.getName().equals(ceilingEntityFullyQualifiedClassname);
}
@Override
public DynamicResultSet inspect(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, InspectHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
Map<MergedPropertyType, Map<String, FieldMetadata>> allMergedProperties = new HashMap<MergedPropertyType, Map<String, FieldMetadata>>();
Map<String, FieldMetadata> mergedProperties = dynamicEntityDao.getSimpleMergedProperties(ceilingEntityFullyQualifiedClassname, persistencePackage.getPersistencePerspective());
allMergedProperties.put(MergedPropertyType.PRIMARY, mergedProperties);
ClassMetadata mergedMetadata = helper.getMergedClassMetadata(new Class<?>[]{Class.forName(ceilingEntityFullyQualifiedClassname)}, allMergedProperties);
DynamicResultSet results = new DynamicResultSet(mergedMetadata);
return results;
} catch (Exception e) {
ServiceException ex = new ServiceException("Unable to retrieve inspection results for " + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e);
throw ex;
}
}
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_TimeDTOCustomPersistenceHandler.java |
1,748 | public static class Issue1764Data implements DataSerializable {
public static AtomicInteger serializationCount = new AtomicInteger();
public static AtomicInteger deserializationCount = new AtomicInteger();
private String attr1;
private String attr2;
public Issue1764Data() {
//For deserialization...
}
public Issue1764Data(String attr1, String attr2) {
this.attr1 = attr1;
this.attr2 = attr2;
}
public String getAttr1() {
return attr1;
}
public void setAttr1(String attr1) {
this.attr1 = attr1;
}
public String getAttr2() {
return attr2;
}
public void setAttr2(String attr2) {
this.attr2 = attr2;
}
@Override
public String toString() {
return "[" + attr1 + " " + attr2 + "]";
}
public void writeData(ObjectDataOutput out) throws IOException {
serializationCount.incrementAndGet();
out.writeObject(attr1);
out.writeObject(attr2);
}
public void readData(ObjectDataInput in) throws IOException {
attr1 = in.readObject();
attr2 = in.readObject();
deserializationCount.incrementAndGet();
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_map_EntryProcessorTest.java |
143 | @Test
public class FloatSerializerTest {
private static final int FIELD_SIZE = 4;
private static final Float OBJECT = 3.14f;
private OFloatSerializer floatSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
floatSerializer = new OFloatSerializer();
}
public void testFieldSize() {
Assert.assertEquals(floatSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
floatSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(floatSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
floatSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(floatSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
floatSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(floatSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
} | 0true
| commons_src_test_java_com_orientechnologies_common_serialization_types_FloatSerializerTest.java |
658 | constructors[LIST_ADD_ALL] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new ListAddAllOperation();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java |
230 | @PreInitializeConfigOptions
public abstract class AbstractCassandraStoreManager extends DistributedStoreManager implements KeyColumnValueStoreManager {
public enum Partitioner {
RANDOM, BYTEORDER;
public static Partitioner getPartitioner(IPartitioner<?> partitioner) {
return getPartitioner(partitioner.getClass().getSimpleName());
}
public static Partitioner getPartitioner(String className) {
if (className.endsWith("RandomPartitioner") || className.endsWith("Murmur3Partitioner"))
return Partitioner.RANDOM;
else if (className.endsWith("ByteOrderedPartitioner")) return Partitioner.BYTEORDER;
else throw new IllegalArgumentException("Unsupported partitioner: " + className);
}
}
//################### CASSANDRA SPECIFIC CONFIGURATION OPTIONS ######################
public static final ConfigNamespace CASSANDRA_NS =
new ConfigNamespace(GraphDatabaseConfiguration.STORAGE_NS, "cassandra", "Cassandra storage backend options");
public static final ConfigOption<String> CASSANDRA_KEYSPACE =
new ConfigOption<String>(CASSANDRA_NS, "keyspace",
"The name of Titan's keyspace. It will be created if it does not exist.",
ConfigOption.Type.LOCAL, "titan");
// Consistency Levels and Atomic Batch
public static final ConfigOption<String> CASSANDRA_READ_CONSISTENCY =
new ConfigOption<String>(CASSANDRA_NS, "read-consistency-level",
"The consistency level of read operations against Cassandra",
ConfigOption.Type.MASKABLE, "QUORUM");
public static final ConfigOption<String> CASSANDRA_WRITE_CONSISTENCY =
new ConfigOption<String>(CASSANDRA_NS, "write-consistency-level",
"The consistency level of write operations against Cassandra",
ConfigOption.Type.MASKABLE, "QUORUM");
public static final ConfigOption<Boolean> ATOMIC_BATCH_MUTATE =
new ConfigOption<Boolean>(CASSANDRA_NS, "atomic-batch-mutate",
"True to use Cassandra atomic batch mutation, false to use non-atomic batches",
ConfigOption.Type.MASKABLE, true);
// Replication
public static final ConfigOption<Integer> REPLICATION_FACTOR =
new ConfigOption<Integer>(CASSANDRA_NS, "replication-factor",
"The number of data replicas (including the original copy) that should be kept. " +
"This is only meaningful for storage backends that natively support data replication.",
ConfigOption.Type.GLOBAL_OFFLINE, 1);
public static final ConfigOption<String> REPLICATION_STRATEGY =
new ConfigOption<String>(CASSANDRA_NS, "replication-strategy-class",
"The replication strategy to use for Titan keyspace",
ConfigOption.Type.FIXED, "org.apache.cassandra.locator.SimpleStrategy");
public static final ConfigOption<String[]> REPLICATION_OPTIONS =
new ConfigOption<String[]>(CASSANDRA_NS, "replication-strategy-options",
"Replication strategy options, e.g. factor or replicas per datacenter. This list is interpreted as a " +
"map. It must have an even number of elements in [key,val,key,val,...] form. A replication_factor set " +
"here takes precedence over one set with " + ConfigElement.getPath(REPLICATION_FACTOR),
ConfigOption.Type.FIXED, String[].class);
// Compression
public static final ConfigOption<Boolean> CF_COMPRESSION =
new ConfigOption<Boolean>(CASSANDRA_NS, "compression",
"Whether the storage backend should use compression when storing the data", ConfigOption.Type.FIXED, true);
public static final ConfigOption<String> CF_COMPRESSION_TYPE =
new ConfigOption<String>(CASSANDRA_NS, "compression-type",
"The sstable_compression value Titan uses when creating column families. " +
"This accepts any value allowed by Cassandra's sstable_compression option. " +
"Leave this unset to disable sstable_compression on Titan-created CFs.",
ConfigOption.Type.MASKABLE, "LZ4Compressor");
public static final ConfigOption<Integer> CF_COMPRESSION_BLOCK_SIZE =
new ConfigOption<Integer>(CASSANDRA_NS, "compression-block-size",
"The size of the compression blocks in kilobytes", ConfigOption.Type.FIXED, 64);
// SSL
public static final ConfigNamespace SSL_NS =
new ConfigNamespace(CASSANDRA_NS, "ssl", "Configuration options for SSL");
public static final ConfigNamespace SSL_TRUSTSTORE_NS =
new ConfigNamespace(SSL_NS, "truststore", "Configuration options for SSL Truststore.");
public static final ConfigOption<Boolean> SSL_ENABLED =
new ConfigOption<Boolean>(SSL_NS, "enabled",
"Controls use of the SSL connection to Cassandra", ConfigOption.Type.LOCAL, false);
public static final ConfigOption<String> SSL_TRUSTSTORE_LOCATION =
new ConfigOption<String>(SSL_TRUSTSTORE_NS, "location",
"Marks the location of the SSL Truststore.", ConfigOption.Type.LOCAL, "");
public static final ConfigOption<String> SSL_TRUSTSTORE_PASSWORD =
new ConfigOption<String>(SSL_TRUSTSTORE_NS, "password",
"The password to access SSL Truststore.", ConfigOption.Type.LOCAL, "");
/**
* The default Thrift port used by Cassandra. Set
* {@link GraphDatabaseConfiguration#STORAGE_PORT} to override.
* <p>
* Value = {@value}
*/
public static final int PORT_DEFAULT = 9160;
public static final String SYSTEM_KS = "system";
protected final String keySpaceName;
protected final Map<String, String> strategyOptions;
protected final boolean compressionEnabled;
protected final int compressionChunkSizeKB;
protected final String compressionClass;
protected final boolean atomicBatch;
private volatile StoreFeatures features = null;
private Partitioner partitioner = null;
private static final Logger log =
LoggerFactory.getLogger(AbstractCassandraStoreManager.class);
public AbstractCassandraStoreManager(Configuration config) {
super(config, PORT_DEFAULT);
this.keySpaceName = config.get(CASSANDRA_KEYSPACE);
this.compressionEnabled = config.get(CF_COMPRESSION);
this.compressionChunkSizeKB = config.get(CF_COMPRESSION_BLOCK_SIZE);
this.compressionClass = config.get(CF_COMPRESSION_TYPE);
this.atomicBatch = config.get(ATOMIC_BATCH_MUTATE);
// SSL truststore location sanity check
if (config.get(SSL_ENABLED) && config.get(SSL_TRUSTSTORE_LOCATION).isEmpty())
throw new IllegalArgumentException(SSL_TRUSTSTORE_LOCATION.getName() + " could not be empty when SSL is enabled.");
if (config.has(REPLICATION_OPTIONS)) {
String[] options = config.get(REPLICATION_OPTIONS);
if (options.length % 2 != 0)
throw new IllegalArgumentException(REPLICATION_OPTIONS.getName() + " should have even number of elements.");
Map<String, String> converted = new HashMap<String, String>(options.length / 2);
for (int i = 0; i < options.length; i += 2) {
converted.put(options[i], options[i + 1]);
}
this.strategyOptions = ImmutableMap.copyOf(converted);
} else {
this.strategyOptions = ImmutableMap.of("replication_factor", String.valueOf(config.get(REPLICATION_FACTOR)));
}
}
public final Partitioner getPartitioner() {
if (partitioner == null) {
try {
partitioner = Partitioner.getPartitioner(getCassandraPartitioner());
} catch (BackendException e) {
throw new TitanException("Could not connect to Cassandra to read partitioner information. Please check the connection", e);
}
}
assert partitioner != null;
return partitioner;
}
public abstract IPartitioner<? extends Token<?>> getCassandraPartitioner() throws BackendException;
@Override
public StoreTransaction beginTransaction(final BaseTransactionConfig config) {
return new CassandraTransaction(config);
}
@Override
public String toString() {
return "[" + keySpaceName + "@" + super.toString() + "]";
}
@Override
public StoreFeatures getFeatures() {
if (features == null) {
Configuration global = GraphDatabaseConfiguration.buildConfiguration()
.set(CASSANDRA_READ_CONSISTENCY, "QUORUM")
.set(CASSANDRA_WRITE_CONSISTENCY, "QUORUM")
.set(METRICS_PREFIX, GraphDatabaseConfiguration.METRICS_SYSTEM_PREFIX_DEFAULT);
Configuration local = GraphDatabaseConfiguration.buildConfiguration()
.set(CASSANDRA_READ_CONSISTENCY, "LOCAL_QUORUM")
.set(CASSANDRA_WRITE_CONSISTENCY, "LOCAL_QUORUM")
.set(METRICS_PREFIX, GraphDatabaseConfiguration.METRICS_SYSTEM_PREFIX_DEFAULT);
StandardStoreFeatures.Builder fb = new StandardStoreFeatures.Builder();
fb.batchMutation(true).distributed(true);
fb.timestamps(true).cellTTL(true);
fb.keyConsistent(global, local);
boolean keyOrdered;
switch (getPartitioner()) {
case RANDOM:
keyOrdered = false;
fb.keyOrdered(keyOrdered).orderedScan(false).unorderedScan(true);
break;
case BYTEORDER:
keyOrdered = true;
fb.keyOrdered(keyOrdered).orderedScan(true).unorderedScan(false);
break;
default:
throw new IllegalArgumentException("Unrecognized partitioner: " + getPartitioner());
}
switch (getDeployment()) {
case REMOTE:
fb.multiQuery(true);
break;
case LOCAL:
fb.multiQuery(true).localKeyPartition(keyOrdered);
break;
case EMBEDDED:
fb.multiQuery(false).localKeyPartition(keyOrdered);
break;
default:
throw new IllegalArgumentException("Unrecognized deployment mode: " + getDeployment());
}
features = fb.build();
}
return features;
}
/**
* Returns a map of compression options for the column family {@code cf}.
* The contents of the returned map must be identical to the contents of the
* map returned by
* {@link org.apache.cassandra.thrift.CfDef#getCompression_options()}, even
* for implementations of this method that don't use Thrift.
*
* @param cf the name of the column family for which to return compression
* options
* @return map of compression option names to compression option values
* @throws com.thinkaurelius.titan.diskstorage.BackendException if reading from Cassandra fails
*/
public abstract Map<String, String> getCompressionOptions(String cf) throws BackendException;
public String getName() {
return getClass().getSimpleName() + keySpaceName;
}
} | 0true
| titan-cassandra_src_main_java_com_thinkaurelius_titan_diskstorage_cassandra_AbstractCassandraStoreManager.java |
3,221 | return new LongValues(values.isMultiValued()) {
@Override
public int setDocument(int docId) {
return values.setDocument(docId);
}
@Override
public long nextValue() {
return (long) values.nextValue();
}
}; | 0true
| src_main_java_org_elasticsearch_index_fielddata_LongValues.java |
708 | public static class Name {
public static final String Marketing = "ProductImpl_Marketing_Tab";
public static final String Media = "SkuImpl_Media_Tab";
public static final String ProductOptions = "ProductImpl_Product_Options_Tab";
public static final String Inventory = "ProductImpl_Inventory_Tab";
public static final String Shipping = "ProductImpl_Shipping_Tab";
public static final String Advanced = "ProductImpl_Advanced_Tab";
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_ProductImpl.java |
2,127 | public class LogConfigurator {
private static boolean loaded;
private static ImmutableMap<String, String> replacements = new MapBuilder<String, String>()
.put("console", "org.elasticsearch.common.logging.log4j.ConsoleAppender")
.put("async", "org.apache.log4j.AsyncAppender")
.put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender")
.put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender")
.put("file", "org.apache.log4j.FileAppender")
.put("jdbc", "org.apache.log4j.jdbc.JDBCAppender")
.put("jms", "org.apache.log4j.net.JMSAppender")
.put("lf5", "org.apache.log4j.lf5.LF5Appender")
.put("ntevent", "org.apache.log4j.nt.NTEventLogAppender")
.put("null", "org.apache.log4j.NullAppender")
.put("rollingFile", "org.apache.log4j.RollingFileAppender")
.put("smtp", "org.apache.log4j.net.SMTPAppender")
.put("socket", "org.apache.log4j.net.SocketAppender")
.put("socketHub", "org.apache.log4j.net.SocketHubAppender")
.put("syslog", "org.apache.log4j.net.SyslogAppender")
.put("telnet", "org.apache.log4j.net.TelnetAppender")
// layouts
.put("simple", "org.apache.log4j.SimpleLayout")
.put("html", "org.apache.log4j.HTMLLayout")
.put("pattern", "org.apache.log4j.PatternLayout")
.put("consolePattern", "org.apache.log4j.PatternLayout")
.put("ttcc", "org.apache.log4j.TTCCLayout")
.put("xml", "org.apache.log4j.XMLLayout")
.immutableMap();
public static void configure(Settings settings) {
if (loaded) {
return;
}
loaded = true;
Environment environment = new Environment(settings);
ImmutableSettings.Builder settingsBuilder = settingsBuilder().put(settings);
try {
settingsBuilder.loadFromUrl(environment.resolveConfig("logging.yml"));
} catch (FailedToResolveConfigException e) {
// ignore
} catch (NoClassDefFoundError e) {
// ignore, no yaml
}
try {
settingsBuilder.loadFromUrl(environment.resolveConfig("logging.json"));
} catch (FailedToResolveConfigException e) {
// ignore
}
try {
settingsBuilder.loadFromUrl(environment.resolveConfig("logging.properties"));
} catch (FailedToResolveConfigException e) {
// ignore
}
settingsBuilder
.putProperties("elasticsearch.", System.getProperties())
.putProperties("es.", System.getProperties())
.replacePropertyPlaceholders();
Properties props = new Properties();
for (Map.Entry<String, String> entry : settingsBuilder.build().getAsMap().entrySet()) {
String key = "log4j." + entry.getKey();
String value = entry.getValue();
if (replacements.containsKey(value)) {
value = replacements.get(value);
}
if (key.endsWith(".value")) {
props.setProperty(key.substring(0, key.length() - ".value".length()), value);
} else if (key.endsWith(".type")) {
props.setProperty(key.substring(0, key.length() - ".type".length()), value);
} else {
props.setProperty(key, value);
}
}
PropertyConfigurator.configure(props);
}
} | 0true
| src_main_java_org_elasticsearch_common_logging_log4j_LogConfigurator.java |
1,564 | @XmlRootElement(name = "parameter")
@XmlType(propOrder = { "value", "name" })
public class OServerParameterConfiguration {
@XmlAttribute
public String name;
@XmlAttribute
public String value;
public OServerParameterConfiguration() {
}
public OServerParameterConfiguration(final String iName, final String iValue) {
name = iName;
value = iValue;
}
@Override
public String toString() {
return name + "=" + value;
}
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_config_OServerParameterConfiguration.java |
7 | static final class ApplyToEither<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final CompletableFuture<? extends T> snd;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
final Executor executor;
ApplyToEither(CompletableFuture<? extends T> src,
CompletableFuture<? extends T> snd,
Fun<? super T,? extends U> fn,
CompletableFuture<U> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final CompletableFuture<? extends T> b;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(((a = this.src) != null && (r = a.result) != null) ||
((b = this.snd) != null && (r = b.result) != null)) &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
Executor e = executor;
U u = null;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncApply<T,U>(t, fn, dst));
else
u = fn.apply(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(u, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
} | 0true
| src_main_java_jsr166e_CompletableFuture.java |
163 | return executeRead(new Callable<KeyIterator>() {
@Override
public KeyIterator call() throws Exception {
return (storeFeatures.isKeyOrdered())
? edgeStore.getKeys(new KeyRangeQuery(EDGESTORE_MIN_KEY, EDGESTORE_MAX_KEY, sliceQuery), storeTx)
: edgeStore.getKeys(sliceQuery, storeTx);
}
@Override
public String toString() {
return "EdgeStoreKeys";
}
}); | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_BackendTransaction.java |
1,994 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_PHONE")
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
public class PhoneImpl implements Phone {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "PhoneId")
@GenericGenerator(
name="PhoneId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="PhoneImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.profile.core.domain.PhoneImpl")
}
)
@Column(name = "PHONE_ID")
protected Long id;
@Column(name = "PHONE_NUMBER", nullable=false)
@AdminPresentation(friendlyName = "PhoneImpl_Phone_Number", order=1, group = "PhoneImpl_Phone")
protected String phoneNumber;
@Column(name = "IS_DEFAULT")
@AdminPresentation(friendlyName = "PhoneImpl_Default_Phone", order=2, group = "PhoneImpl_Phone")
protected boolean isDefault = false;
@Column(name = "IS_ACTIVE")
@AdminPresentation(friendlyName = "PhoneImpl_Active_Phone", order=3, group = "PhoneImpl_Phone")
protected boolean isActive = true;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getPhoneNumber() {
return phoneNumber;
}
@Override
public void setPhoneNumber(String phoneNumber) {
this.phoneNumber = phoneNumber;
}
@Override
public boolean isDefault() {
return isDefault;
}
@Override
public void setDefault(boolean isDefault) {
this.isDefault = isDefault;
}
@Override
public boolean isActive() {
return isActive;
}
@Override
public void setActive(boolean isActive) {
this.isActive = isActive;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (isActive ? 1231 : 1237);
result = prime * result + (isDefault ? 1231 : 1237);
result = prime * result + ((phoneNumber == null) ? 0 : phoneNumber.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PhoneImpl other = (PhoneImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (isActive != other.isActive)
return false;
if (isDefault != other.isDefault)
return false;
if (phoneNumber == null) {
if (other.phoneNumber != null)
return false;
} else if (!phoneNumber.equals(other.phoneNumber))
return false;
return true;
}
} | 1no label
| core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_domain_PhoneImpl.java |
1,995 | assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(0, map.size());
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java |
137 | public final class ClientTypes {
public static final String JAVA = "JVM";
public static final String CSHARP = "CSP";
public static final String CPP = "CPP";
public static final String PYTHON = "PHY";
public static final String RUBY = "RBY";
private ClientTypes() {
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_ClientTypes.java |
1,653 | public class PersistencePackageRequest {
protected Type type;
protected String ceilingEntityClassname;
protected String configKey;
protected AdornedTargetList adornedList;
protected MapStructure mapStructure;
protected Entity entity;
protected ForeignKey foreignKey;
protected Integer startIndex;
protected Integer maxIndex;
protected Map<String, PersistencePackageRequest> subRequests = new LinkedHashMap<String, PersistencePackageRequest>();
protected boolean validateUnsubmittedProperties = true;
protected OperationTypes operationTypesOverride = null;
// These properties are accessed via getters and setters that operate on arrays.
// We back them with a list so that we can have the convenience .add methods
protected List<ForeignKey> additionalForeignKeys = new ArrayList<ForeignKey>();
protected List<String> customCriteria = new ArrayList<String>();
protected List<FilterAndSortCriteria> filterAndSortCriteria = new ArrayList<FilterAndSortCriteria>();
public enum Type {
STANDARD,
ADORNED,
MAP
}
/* ******************* */
/* STATIC INITIALIZERS */
/* ******************* */
public static PersistencePackageRequest standard() {
return new PersistencePackageRequest(Type.STANDARD);
}
public static PersistencePackageRequest adorned() {
return new PersistencePackageRequest(Type.ADORNED);
}
public static PersistencePackageRequest map() {
return new PersistencePackageRequest(Type.MAP);
}
/**
* Creates a semi-populate PersistencePacakageRequest based on the specified FieldMetadata. This initializer
* will copy over persistence perspective items from the metadata as well as set the appropriate OperationTypes
* as specified in the annotation/xml configuration for the field.
*
* @param md
* @return the newly created PersistencePackageRequest
*/
public static PersistencePackageRequest fromMetadata(FieldMetadata md) {
final PersistencePackageRequest request = new PersistencePackageRequest();
md.accept(new MetadataVisitor() {
@Override
public void visit(BasicFieldMetadata fmd) {
request.setType(Type.STANDARD);
request.setCeilingEntityClassname(fmd.getForeignKeyClass());
}
@Override
public void visit(BasicCollectionMetadata fmd) {
ForeignKey foreignKey = (ForeignKey) fmd.getPersistencePerspective()
.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.FOREIGNKEY);
request.setType(Type.STANDARD);
request.setCeilingEntityClassname(fmd.getCollectionCeilingEntity());
request.setOperationTypesOverride(fmd.getPersistencePerspective().getOperationTypes());
request.setForeignKey(foreignKey);
}
@Override
public void visit(AdornedTargetCollectionMetadata fmd) {
AdornedTargetList adornedList = (AdornedTargetList) fmd.getPersistencePerspective()
.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.ADORNEDTARGETLIST);
request.setType(Type.ADORNED);
request.setCeilingEntityClassname(fmd.getCollectionCeilingEntity());
request.setOperationTypesOverride(fmd.getPersistencePerspective().getOperationTypes());
request.setAdornedList(adornedList);
}
@Override
public void visit(MapMetadata fmd) {
MapStructure mapStructure = (MapStructure) fmd.getPersistencePerspective()
.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.MAPSTRUCTURE);
ForeignKey foreignKey = (ForeignKey) fmd.getPersistencePerspective().
getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.FOREIGNKEY);
request.setType(Type.MAP);
request.setCeilingEntityClassname(foreignKey.getForeignKeyClass());
request.setOperationTypesOverride(fmd.getPersistencePerspective().getOperationTypes());
request.setMapStructure(mapStructure);
request.setForeignKey(foreignKey);
}
});
if (md instanceof CollectionMetadata) {
request.setCustomCriteria(((CollectionMetadata) md).getCustomCriteria());
}
return request;
}
/* ************ */
/* CONSTRUCTORS */
/* ************ */
public PersistencePackageRequest() {
}
public PersistencePackageRequest(Type type) {
this.type = type;
}
/* ************ */
/* WITH METHODS */
/* ************ */
public PersistencePackageRequest withType(Type type) {
setType(type);
return this;
}
public PersistencePackageRequest withCeilingEntityClassname(String className) {
setCeilingEntityClassname(className);
return this;
}
public PersistencePackageRequest withForeignKey(ForeignKey foreignKey) {
setForeignKey(foreignKey);
return this;
}
public PersistencePackageRequest withConfigKey(String configKey) {
setConfigKey(configKey);
return this;
}
public PersistencePackageRequest withFilterAndSortCriteria(FilterAndSortCriteria[] filterAndSortCriteria) {
if (ArrayUtils.isNotEmpty(filterAndSortCriteria)) {
setFilterAndSortCriteria(filterAndSortCriteria);
}
return this;
}
public PersistencePackageRequest withAdornedList(AdornedTargetList adornedList) {
setAdornedList(adornedList);
return this;
}
public PersistencePackageRequest withMapStructure(MapStructure mapStructure) {
setMapStructure(mapStructure);
return this;
}
public PersistencePackageRequest withCustomCriteria(String[] customCriteria) {
if (ArrayUtils.isNotEmpty(customCriteria)) {
setCustomCriteria(customCriteria);
}
return this;
}
public PersistencePackageRequest withEntity(Entity entity) {
setEntity(entity);
return this;
}
public PersistencePackageRequest withStartIndex(Integer startIndex) {
setStartIndex(startIndex);
return this;
}
public PersistencePackageRequest withMaxIndex(Integer maxIndex) {
setMaxIndex(maxIndex);
return this;
}
/* *********** */
/* ADD METHODS */
/* *********** */
public PersistencePackageRequest addAdditionalForeignKey(ForeignKey foreignKey) {
additionalForeignKeys.add(foreignKey);
return this;
}
public PersistencePackageRequest addSubRequest(String infoPropertyName, PersistencePackageRequest subRequest) {
subRequests.put(infoPropertyName, subRequest);
return this;
}
public PersistencePackageRequest addCustomCriteria(String customCriteria) {
if (StringUtils.isNotBlank(customCriteria)) {
this.customCriteria.add(customCriteria);
}
return this;
}
public PersistencePackageRequest addFilterAndSortCriteria(FilterAndSortCriteria filterAndSortCriteria) {
this.filterAndSortCriteria.add(filterAndSortCriteria);
return this;
}
public PersistencePackageRequest addFilterAndSortCriteria(FilterAndSortCriteria[] filterAndSortCriteria) {
if (filterAndSortCriteria != null) {
this.filterAndSortCriteria.addAll(Arrays.asList(filterAndSortCriteria));
}
return this;
}
public PersistencePackageRequest addFilterAndSortCriteria(List<FilterAndSortCriteria> filterAndSortCriteria) {
this.filterAndSortCriteria.addAll(filterAndSortCriteria);
return this;
}
/* ************************ */
/* CUSTOM GETTERS / SETTERS */
/* ************************ */
public String[] getCustomCriteria() {
String[] arr = new String[this.customCriteria.size()];
arr = this.customCriteria.toArray(arr);
return arr;
}
public ForeignKey[] getAdditionalForeignKeys() {
ForeignKey[] arr = new ForeignKey[this.additionalForeignKeys.size()];
arr = this.additionalForeignKeys.toArray(arr);
return arr;
}
public void setAdditionalForeignKeys(ForeignKey[] additionalForeignKeys) {
this.additionalForeignKeys = Arrays.asList(additionalForeignKeys);
}
public void setCustomCriteria(String[] customCriteria) {
this.customCriteria = Arrays.asList(customCriteria);
}
public FilterAndSortCriteria[] getFilterAndSortCriteria() {
FilterAndSortCriteria[] arr = new FilterAndSortCriteria[this.filterAndSortCriteria.size()];
arr = this.filterAndSortCriteria.toArray(arr);
return arr;
}
public void setFilterAndSortCriteria(FilterAndSortCriteria[] filterAndSortCriteria) {
this.filterAndSortCriteria.addAll(Arrays.asList(filterAndSortCriteria));
}
/* ************************** */
/* STANDARD GETTERS / SETTERS */
/* ************************** */
public ForeignKey getForeignKey() {
return foreignKey;
}
public void setForeignKey(ForeignKey foreignKey) {
this.foreignKey = foreignKey;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public String getCeilingEntityClassname() {
return ceilingEntityClassname;
}
public void setCeilingEntityClassname(String ceilingEntityClassname) {
this.ceilingEntityClassname = ceilingEntityClassname;
}
public String getConfigKey() {
return configKey;
}
public void setConfigKey(String configKey) {
this.configKey = configKey;
}
public AdornedTargetList getAdornedList() {
return adornedList;
}
public void setAdornedList(AdornedTargetList adornedList) {
this.adornedList = adornedList;
}
public MapStructure getMapStructure() {
return mapStructure;
}
public void setMapStructure(MapStructure mapStructure) {
this.mapStructure = mapStructure;
}
public Entity getEntity() {
return entity;
}
public void setEntity(Entity entity) {
this.entity = entity;
}
public OperationTypes getOperationTypesOverride() {
return operationTypesOverride;
}
public void setOperationTypesOverride(OperationTypes operationTypesOverride) {
this.operationTypesOverride = operationTypesOverride;
}
public Integer getStartIndex() {
return startIndex;
}
public void setStartIndex(Integer startIndex) {
this.startIndex = startIndex;
}
public Integer getMaxIndex() {
return maxIndex;
}
public void setMaxIndex(Integer maxIndex) {
this.maxIndex = maxIndex;
}
public Map<String, PersistencePackageRequest> getSubRequests() {
return subRequests;
}
public void setSubRequests(Map<String, PersistencePackageRequest> subRequests) {
this.subRequests = subRequests;
}
public boolean isValidateUnsubmittedProperties() {
return validateUnsubmittedProperties;
}
public void setValidateUnsubmittedProperties(boolean validateUnsubmittedProperties) {
this.validateUnsubmittedProperties = validateUnsubmittedProperties;
}
} | 1no label
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_domain_PersistencePackageRequest.java |
255 | {
@Override
public LabelScanReader newReader()
{
return LabelScanReader.EMPTY;
}
@Override
public LabelScanWriter newWriter()
{
return LabelScanWriter.EMPTY;
}
@Override
public void stop()
{ // Do nothing
}
@Override
public void start()
{ // Do nothing
}
@Override
public void shutdown()
{ // Do nothing
}
@Override
public void recover( Iterator<NodeLabelUpdate> updates )
{ // Do nothing
}
@Override
public AllEntriesLabelScanReader newAllEntriesReader()
{
return null;
}
@Override
public ResourceIterator<File> snapshotStoreFiles()
{
return emptyIterator();
}
@Override
public void init()
{ // Do nothing
}
@Override
public void force()
{ // Do nothing
}
}; | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java |
175 | public class ORef<T> {
public T value;
public ORef() {
}
public ORef(final T object) {
this.value = object;
}
public ORef<T> clear() {
value = null;
return this;
}
@Override
public String toString() {
return value != null ? value.toString() : "ORef<null>";
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_types_ORef.java |
1,968 | class NullOutputException extends NullPointerException {
public NullOutputException(String s) {
super(s);
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_internal_NullOutputException.java |
407 | EventHandler<PortableItemEvent> eventHandler = new EventHandler<PortableItemEvent>() {
public void handle(PortableItemEvent portableItemEvent) {
E item = includeValue ? (E) getContext().getSerializationService().toObject(portableItemEvent.getItem()) : null;
Member member = getContext().getClusterService().getMember(portableItemEvent.getUuid());
ItemEvent<E> itemEvent = new ItemEvent<E>(getName(), portableItemEvent.getEventType(), item, member);
if (portableItemEvent.getEventType() == ItemEventType.ADDED) {
listener.itemAdded(itemEvent);
} else {
listener.itemRemoved(itemEvent);
}
}
@Override
public void onListenerRegister() {
}
}; | 1no label
| hazelcast-client_src_main_java_com_hazelcast_client_proxy_AbstractClientCollectionProxy.java |
576 | public interface CacheRequest {
public List<CacheItemRequest> getCacheItemRequests();
} | 0true
| common_src_main_java_org_broadleafcommerce_common_vendor_service_cache_CacheRequest.java |
979 | public class LockOperation extends BaseLockOperation implements WaitSupport, BackupAwareOperation {
public LockOperation() {
}
public LockOperation(ObjectNamespace namespace, Data key, long threadId, long timeout) {
super(namespace, key, threadId, timeout);
}
public LockOperation(ObjectNamespace namespace, Data key, long threadId, long ttl, long timeout) {
super(namespace, key, threadId, ttl, timeout);
}
@Override
public void run() throws Exception {
response = getLockStore().lock(key, getCallerUuid(), threadId, ttl);
}
@Override
public Operation getBackupOperation() {
return new LockBackupOperation(namespace, key, threadId, getCallerUuid());
}
@Override
public boolean shouldBackup() {
return Boolean.TRUE.equals(response);
}
@Override
public final WaitNotifyKey getWaitKey() {
return new LockWaitNotifyKey(namespace, key);
}
@Override
public final boolean shouldWait() {
return getWaitTimeout() != 0 && !getLockStore().canAcquireLock(key, getCallerUuid(), threadId);
}
@Override
public int getId() {
return LockDataSerializerHook.LOCK;
}
@Override
public final void onWaitExpire() {
Object response;
long timeout = getWaitTimeout();
if (timeout < 0 || timeout == Long.MAX_VALUE) {
response = new OperationTimeoutException();
} else {
response = Boolean.FALSE;
}
getResponseHandler().sendResponse(response);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_lock_operations_LockOperation.java |
2,337 | public class JsonSettingsLoaderTests extends ElasticsearchTestCase {
@Test
public void testSimpleJsonSettings() throws Exception {
Settings settings = settingsBuilder()
.loadFromClasspath("org/elasticsearch/common/settings/loader/test-settings.json")
.build();
assertThat(settings.get("test1.value1"), equalTo("value1"));
assertThat(settings.get("test1.test2.value2"), equalTo("value2"));
assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2));
// check array
assertThat(settings.get("test1.test3.0"), equalTo("test3-1"));
assertThat(settings.get("test1.test3.1"), equalTo("test3-2"));
assertThat(settings.getAsArray("test1.test3").length, equalTo(2));
assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
}
} | 0true
| src_test_java_org_elasticsearch_common_settings_loader_JsonSettingsLoaderTests.java |
1,881 | public interface Provider<T> {
/**
* Provides an instance of {@code T}. Must never return {@code null}.
*
* @throws OutOfScopeException when an attempt is made to access a scoped object while the scope
* in question is not currently active
* @throws ProvisionException if an instance cannot be provided. Such exceptions include messages
* and throwables to describe why provision failed.
*/
T get();
} | 0true
| src_main_java_org_elasticsearch_common_inject_Provider.java |
5,808 | public class FastVectorHighlighter implements Highlighter {
private static final SimpleBoundaryScanner DEFAULT_BOUNDARY_SCANNER = new SimpleBoundaryScanner();
private static final String CACHE_KEY = "highlight-fsv";
private final Boolean termVectorMultiValue;
@Inject
public FastVectorHighlighter(Settings settings) {
this.termVectorMultiValue = settings.getAsBoolean("search.highlight.term_vector_multi_value", true);
}
@Override
public String[] names() {
return new String[]{"fvh", "fast-vector-highlighter"};
}
@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
SearchContextHighlight.Field field = highlighterContext.field;
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
FieldMapper<?> mapper = highlighterContext.mapper;
if (!(mapper.fieldType().storeTermVectors() && mapper.fieldType().storeTermVectorOffsets() && mapper.fieldType().storeTermVectorPositions())) {
throw new ElasticsearchIllegalArgumentException("the field [" + field.field() + "] should be indexed with term vector with position offsets to be used with fast vector highlighter");
}
Encoder encoder = field.encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
}
HighlighterEntry cache = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
try {
FieldQuery fieldQuery;
if (field.requireFieldMatch()) {
if (cache.fieldMatchFieldQuery == null) {
// we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)
cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query.originalQuery(), hitContext.topLevelReader(), true, field.requireFieldMatch());
}
fieldQuery = cache.fieldMatchFieldQuery;
} else {
if (cache.noFieldMatchFieldQuery == null) {
// we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)
cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query.originalQuery(), hitContext.topLevelReader(), true, field.requireFieldMatch());
}
fieldQuery = cache.noFieldMatchFieldQuery;
}
MapperHighlightEntry entry = cache.mappers.get(mapper);
if (entry == null) {
FragListBuilder fragListBuilder;
BaseFragmentsBuilder fragmentsBuilder;
BoundaryScanner boundaryScanner = DEFAULT_BOUNDARY_SCANNER;
if (field.boundaryMaxScan() != SimpleBoundaryScanner.DEFAULT_MAX_SCAN || field.boundaryChars() != SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS) {
boundaryScanner = new SimpleBoundaryScanner(field.boundaryMaxScan(), field.boundaryChars());
}
if (field.numberOfFragments() == 0) {
fragListBuilder = new SingleFragListBuilder();
if (!field.forceSource() && mapper.fieldType().stored()) {
fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.preTags(), field.postTags(), boundaryScanner);
} else {
fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.preTags(), field.postTags(), boundaryScanner);
}
} else {
fragListBuilder = field.fragmentOffset() == -1 ? new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fragmentOffset());
if (field.scoreOrdered()) {
if (!field.forceSource() && mapper.fieldType().stored()) {
fragmentsBuilder = new ScoreOrderFragmentsBuilder(field.preTags(), field.postTags(), boundaryScanner);
} else {
fragmentsBuilder = new SourceScoreOrderFragmentsBuilder(mapper, context, field.preTags(), field.postTags(), boundaryScanner);
}
} else {
if (!field.forceSource() && mapper.fieldType().stored()) {
fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.preTags(), field.postTags(), boundaryScanner);
} else {
fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.preTags(), field.postTags(), boundaryScanner);
}
}
}
fragmentsBuilder.setDiscreteMultiValueHighlighting(termVectorMultiValue);
entry = new MapperHighlightEntry();
entry.fragListBuilder = fragListBuilder;
entry.fragmentsBuilder = fragmentsBuilder;
if (cache.fvh == null) {
// parameters to FVH are not requires since:
// first two booleans are not relevant since they are set on the CustomFieldQuery (phrase and fieldMatch)
// fragment builders are used explicitly
cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();
}
CustomFieldQuery.highlightFilters.set(field.highlightFilter());
cache.mappers.put(mapper, entry);
}
cache.fvh.setPhraseLimit(field.phraseLimit());
String[] fragments;
// a HACK to make highlighter do highlighting, even though its using the single frag list builder
int numberOfFragments = field.numberOfFragments() == 0 ? Integer.MAX_VALUE : field.numberOfFragments();
int fragmentCharSize = field.numberOfFragments() == 0 ? Integer.MAX_VALUE : field.fragmentCharSize();
// we highlight against the low level reader and docId, because if we load source, we want to reuse it if possible
// Only send matched fields if they were requested to save time.
if (field.matchedFields() != null && !field.matchedFields().isEmpty()) {
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.names().indexName(), field.matchedFields(), fragmentCharSize,
numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.preTags(), field.postTags(), encoder);
} else {
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.names().indexName(), fragmentCharSize,
numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.preTags(), field.postTags(), encoder);
}
if (fragments != null && fragments.length > 0) {
return new HighlightField(field.field(), StringText.convertFromStringArray(fragments));
}
int noMatchSize = highlighterContext.field.noMatchSize();
if (noMatchSize > 0) {
// Essentially we just request that a fragment is built from 0 to noMatchSize using the normal fragmentsBuilder
FieldFragList fieldFragList = new SimpleFieldFragList(-1 /*ignored*/);
fieldFragList.add(0, noMatchSize, Collections.<WeightedPhraseInfo>emptyList());
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.names().indexName(),
fieldFragList, 1, field.preTags(), field.postTags(), encoder);
if (fragments != null && fragments.length > 0) {
return new HighlightField(field.field(), StringText.convertFromStringArray(fragments));
}
}
return null;
} catch (Exception e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
}
private class MapperHighlightEntry {
public FragListBuilder fragListBuilder;
public FragmentsBuilder fragmentsBuilder;
public org.apache.lucene.search.highlight.Highlighter highlighter;
}
private class HighlighterEntry {
public org.apache.lucene.search.vectorhighlight.FastVectorHighlighter fvh;
public FieldQuery noFieldMatchFieldQuery;
public FieldQuery fieldMatchFieldQuery;
public Map<FieldMapper, MapperHighlightEntry> mappers = Maps.newHashMap();
}
} | 1no label
| src_main_java_org_elasticsearch_search_highlight_FastVectorHighlighter.java |
1,357 | @Service("blZipCodeService")
public class ZipCodeServiceImpl implements ZipCodeService {
@Resource(name="blZipCodeDao")
private ZipCodeDao zipCodeDao;
public ZipCode findZipCodeByZipCode(Integer zipCode) {
return zipCodeDao.findZipCodeByZipCode(zipCode);
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_store_service_ZipCodeServiceImpl.java |
1,000 | public class OStreamSerializerHelper {
public static final String SEPARATOR = "|";
private static final char SHORT_FORM_PREFIX = '!';
public static StringBuilder writeRecordType(final Class<?> cls, final StringBuilder iBuffer) {
// SEARCH INTO THE SERIALIZER REGISTER IF THE IMPLEMENTATION WAS REGISTERED TO GET THE SHORT FORM (AND OPTIMIZING IN SIZE AND
// WRITE TIMES)
Character c = OClassDictionary.instance().getCodeByClass(cls);
if (c != null) {
// FOUND: WRITE THE SHORT FORM
iBuffer.append(SHORT_FORM_PREFIX);
iBuffer.append(c);
} else {
// NOT FOUND: PROBABLY A CUSTOM IMPL: WRITE THE FULL CLASS NAME
iBuffer.append(cls.getName());
iBuffer.append(SEPARATOR);
}
return iBuffer;
}
public static Class<?> readRecordType(final String iBuffer, final StringBuilder iContent) throws ClassNotFoundException {
Class<?> cls;
final int pos;
if (iBuffer.charAt(0) == SHORT_FORM_PREFIX) {
// SHORT FORM
cls = OClassDictionary.instance().getClassByCode(iBuffer.charAt(1));
pos = 1;
} else {
// LONG FORM
pos = iBuffer.indexOf(SEPARATOR);
if (pos < 0)
OLogManager.instance().error(null, "Class signature not found in the buffer: " + iBuffer, OSerializationException.class);
final String className = iBuffer.substring(0, pos);
cls = Class.forName(className);
}
// SET THE CONTENT
iContent.append(iBuffer.substring(pos + 1));
return cls;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_serialization_serializer_stream_OStreamSerializerHelper.java |
254 | public interface EmailTarget extends Serializable {
public String getEmailAddress();
public void setEmailAddress(String emailAddress);
public String[] getCCAddresses();
public void setCCAddresses(String[] ccAddresses);
public String[] getBCCAddresses();
public void setBCCAddresses(String[] BCCAddresses);
} | 0true
| common_src_main_java_org_broadleafcommerce_common_email_domain_EmailTarget.java |
1,071 | public class ORuntimeResult {
private final Object fieldValue;
private final Map<String, Object> projections;
private final ODocument value;
private OCommandContext context;
public ORuntimeResult(final Object iFieldValue, final Map<String, Object> iProjections, final int iProgressive,
final OCommandContext iContext) {
fieldValue = iFieldValue;
projections = iProjections;
context = iContext;
value = createProjectionDocument(iProgressive);
}
public void applyRecord(final OIdentifiable iRecord) {
applyRecord(value, projections, context, iRecord);
}
/**
* Set a single value. This is useful in case of query optimization like with indexes
*
* @param iName
* Field name
* @param iValue
* Field value
*/
public void applyValue(final String iName, final Object iValue) {
value.field(iName, iValue);
}
public ODocument getResult() {
return getResult(value, projections);
}
public static ODocument createProjectionDocument(final int iProgressive) {
final ODocument doc = new ODocument().setOrdered(true);
// ASSIGN A TEMPORARY RID TO ALLOW PAGINATION IF ANY
((ORecordId) doc.getIdentity()).clusterId = -2;
((ORecordId) doc.getIdentity()).clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(iProgressive);
return doc;
}
public static ODocument applyRecord(final ODocument iValue, final Map<String, Object> iProjections,
final OCommandContext iContext, final OIdentifiable iRecord) {
// APPLY PROJECTIONS
final ODocument inputDocument = (ODocument) (iRecord != null ? iRecord.getRecord() : null);
if (iProjections.isEmpty())
// SELECT * CASE
inputDocument.copyTo(iValue);
else {
for (Entry<String, Object> projection : iProjections.entrySet()) {
final Object v = projection.getValue();
if (v == null)
continue;
final Object projectionValue;
if (v.equals("*")) {
// COPY ALL
inputDocument.copyTo(iValue);
projectionValue = null;
} else if (v instanceof OSQLFilterItemVariable) {
// RETURN A VARIABLE FROM THE CONTEXT
projectionValue = ((OSQLFilterItemVariable) v).getValue(inputDocument, iContext);
} else if (v instanceof OSQLFilterItemField)
projectionValue = ((OSQLFilterItemField) v).getValue(inputDocument, iContext);
else if (v instanceof OSQLFunctionRuntime) {
final OSQLFunctionRuntime f = (OSQLFunctionRuntime) v;
projectionValue = f.execute(inputDocument, iValue, iContext);
} else
projectionValue = v;
if (projectionValue != null)
iValue.field(projection.getKey(), projectionValue);
}
}
return iValue;
}
public static ODocument getResult(final ODocument iValue, final Map<String, Object> iProjections) {
if (iValue != null) {
boolean canExcludeResult = false;
for (Entry<String, Object> projection : iProjections.entrySet()) {
if (!iValue.containsField(projection.getKey())) {
// ONLY IF NOT ALREADY CONTAINS A VALUE, OTHERWISE HAS BEEN SET MANUALLY (INDEX?)
final Object v = projection.getValue();
if (v instanceof OSQLFunctionRuntime) {
final OSQLFunctionRuntime f = (OSQLFunctionRuntime) v;
canExcludeResult = f.filterResult();
Object fieldValue = f.getResult();
if (fieldValue != null)
iValue.field(projection.getKey(), fieldValue);
}
}
}
if (canExcludeResult && iValue.isEmpty())
// RESULT EXCLUDED FOR EMPTY RECORD
return null;
// AVOID SAVING OF TEMP RECORD
iValue.unsetDirty();
}
return iValue;
}
public static ODocument getProjectionResult(final int iId, final Map<String, Object> iProjections,
final OCommandContext iContext, final OIdentifiable iRecord) {
return ORuntimeResult.getResult(
ORuntimeResult.applyRecord(ORuntimeResult.createProjectionDocument(iId), iProjections, iContext, iRecord), iProjections);
}
public Object getFieldValue() {
return fieldValue;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_ORuntimeResult.java |
380 | public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {
AllocationCommands commands = new AllocationCommands();
boolean dryRun;
public ClusterRerouteRequest() {
}
/**
* Adds allocation commands to be applied to the cluster. Note, can be empty, in which case
* will simply run a simple "reroute".
*/
public ClusterRerouteRequest add(AllocationCommand... commands) {
this.commands.add(commands);
return this;
}
/**
* Sets a dry run flag (defaults to <tt>false</tt>) allowing to run the commands without
* actually applying them to the cluster state, and getting the resulting cluster state back.
*/
public ClusterRerouteRequest dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
/**
* Returns the current dry run flag which allows to run the commands without actually applying them,
* just to get back the resulting cluster state back.
*/
public boolean dryRun() {
return this.dryRun;
}
/**
* Sets the source for the request.
*/
public ClusterRerouteRequest source(BytesReference source) throws Exception {
XContentParser parser = XContentHelper.createParser(source);
try {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("commands".equals(currentFieldName)) {
this.commands = AllocationCommands.fromXContent(parser);
} else {
throw new ElasticsearchParseException("failed to parse reroute request, got start array with wrong field name [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("dry_run".equals(currentFieldName) || "dryRun".equals(currentFieldName)) {
dryRun = parser.booleanValue();
} else {
throw new ElasticsearchParseException("failed to parse reroute request, got value with wrong field name [" + currentFieldName + "]");
}
}
}
} finally {
parser.close();
}
return this;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
commands = AllocationCommands.readFrom(in);
dryRun = in.readBoolean();
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
AllocationCommands.writeTo(commands, out);
out.writeBoolean(dryRun);
writeTimeout(out);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_reroute_ClusterRerouteRequest.java |
652 | public class ProductDataProvider {
/**
* A basic product is actually a Product and a Sku
*/
@DataProvider(name="basicProduct")
public static Object[][] provideBasicProduct() {
Product ci = new ProductImpl();
Sku defaultSku = new SkuImpl();
defaultSku.setName("setOfAggieDominoes");
defaultSku.setDescription("a fine set of bones for 42");
ci.setDefaultSku(defaultSku);
return new Object[][]{{ci}};
}
@DataProvider(name="setupProducts")
public static Object[][] createProducts() {
Product p1 = getProduct(null);
Product p2 = getProduct(null);
Product p3 = getProduct(null);
Product p4 = getProduct(null);
Product p5 = getProduct(null);
Product p6 = getProduct(null);
Product p7 = getProduct(null);
Object[][] objs = new Object[7][1];
objs[0] = new Object[]{p1};
objs[1] = new Object[]{p2};
objs[2] = new Object[]{p3};
objs[3] = new Object[]{p4};
objs[4] = new Object[]{p5};
objs[5] = new Object[]{p6};
objs[6] = new Object[]{p7};
return objs;
}
private static Product getProduct(Long id) {
Calendar activeStartCal = Calendar.getInstance();
activeStartCal.add(Calendar.DAY_OF_YEAR, -2);
Product product = new ProductImpl();
Sku defaultSku = new SkuImpl();
defaultSku.setRetailPrice(new Money(BigDecimal.valueOf(15.0)));
defaultSku.setSalePrice(new Money(BigDecimal.valueOf(10.0)));
defaultSku.setActiveStartDate(activeStartCal.getTime());
product.setDefaultSku(defaultSku);
if (id == null) {
defaultSku.setName("productNameTest");
return product;
}
product.setId(id);
defaultSku.setName(id.toString());
defaultSku.setId(id);
return product;
}
} | 0true
| integration_src_test_java_org_broadleafcommerce_core_catalog_ProductDataProvider.java |
895 | threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
executePhase(shardIndex, node, target.v2());
}
}); | 0true
| src_main_java_org_elasticsearch_action_search_type_TransportSearchScrollScanAction.java |
384 | public class OMultiValueChangeEvent<K, V> {
/**
* Operation that is performed on collection.
*/
public static enum OChangeType {
ADD, UPDATE, REMOVE
}
/**
* Operation that is performed on collection.
*/
private final OChangeType changeType;
/**
* Value that indicates position of item inside collection.
*/
private final K key;
/**
* New item value.
*/
private V value;
/**
* Previous item value.
*/
private V oldValue;
public OMultiValueChangeEvent(final OChangeType changeType, final K key, final V value) {
this.changeType = changeType;
this.key = key;
this.value = value;
}
public OMultiValueChangeEvent(final OChangeType changeType, final K key, final V value, final V oldValue) {
this.changeType = changeType;
this.key = key;
this.value = value;
this.oldValue = oldValue;
}
public K getKey() {
return key;
}
public V getValue() {
return value;
}
public OChangeType getChangeType() {
return changeType;
}
public V getOldValue() {
return oldValue;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final OMultiValueChangeEvent<?, ?> that = (OMultiValueChangeEvent<?, ?>) o;
if (changeType != that.changeType) {
return false;
}
if (!key.equals(that.key)) {
return false;
}
if (oldValue != null ? !oldValue.equals(that.oldValue) : that.oldValue != null) {
return false;
}
if (value != null ? !value.equals(that.value) : that.value != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = changeType.hashCode();
result = 31 * result + key.hashCode();
result = 31 * result + (value != null ? value.hashCode() : 0);
result = 31 * result + (oldValue != null ? oldValue.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "OMultiValueChangeEvent{" + "changeType=" + changeType + ", key=" + key + ", value=" + value + ", oldValue=" + oldValue
+ '}';
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_record_OMultiValueChangeEvent.java |
620 | public class PostJoinOperation extends AbstractOperation implements UrgentSystemOperation, JoinOperation {
private Operation[] operations;
public PostJoinOperation() {
}
public PostJoinOperation(final Operation... ops) {
for (Operation op : ops) {
if (op == null) {
throw new NullPointerException();
}
if (op instanceof PartitionAwareOperation) {
throw new IllegalArgumentException("Post join operation can not be a PartitionAwareOperation!");
}
}
// we may need to do array copy!
operations = ops;
}
@Override
public void beforeRun() throws Exception {
if (operations != null && operations.length > 0) {
final NodeEngine nodeEngine = getNodeEngine();
final int len = operations.length;
for (int i = 0; i < len; i++) {
final Operation op = operations[i];
op.setNodeEngine(nodeEngine)
.setResponseHandler(ResponseHandlerFactory.createEmptyResponseHandler());
OperationAccessor.setCallerAddress(op, getCallerAddress());
OperationAccessor.setConnection(op, getConnection());
operations[i] = op;
}
}
}
@Override
public void run() throws Exception {
if (operations != null && operations.length > 0) {
final OperationService operationService = getNodeEngine().getOperationService();
for (final Operation op : operations) {
operationService.runOperationOnCallingThread(op);
}
}
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
public boolean validatesTarget() {
return false;
}
@Override
protected void writeInternal(final ObjectDataOutput out) throws IOException {
final int len = operations != null ? operations.length : 0;
out.writeInt(len);
if (len > 0) {
for (Operation op : operations) {
out.writeObject(op);
}
}
}
@Override
protected void readInternal(final ObjectDataInput in) throws IOException {
final int len = in.readInt();
operations = new Operation[len];
for (int i = 0; i < len; i++) {
operations[i] = in.readObject();
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("PostJoinOperation{");
sb.append("operations=").append(Arrays.toString(operations));
sb.append('}');
return sb.toString();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_cluster_PostJoinOperation.java |
263 | @Service("blEmailService")
public class EmailServiceImpl implements EmailService {
@Resource(name = "blEmailTrackingManager")
protected EmailTrackingManager emailTrackingManager;
@Resource(name = "blServerInfo")
protected ServerInfo serverInfo;
protected EmailServiceProducer emailServiceProducer;
@Resource(name = "blMessageCreator")
protected MessageCreator messageCreator;
@Resource(name = "blEmailReportingDao")
protected EmailReportingDao emailReportingDao;
public boolean sendTemplateEmail(EmailTarget emailTarget, EmailInfo emailInfo, HashMap<String, Object> props) {
if (props == null) {
props = new HashMap<String, Object>();
}
if (emailInfo == null) {
emailInfo = new EmailInfo();
}
props.put(EmailPropertyType.INFO.getType(), emailInfo);
props.put(EmailPropertyType.USER.getType(), emailTarget);
Long emailId = emailTrackingManager.createTrackedEmail(emailTarget.getEmailAddress(), emailInfo.getEmailType(), null);
props.put("emailTrackingId", emailId);
return sendBasicEmail(emailInfo, emailTarget, props);
}
public boolean sendTemplateEmail(String emailAddress, EmailInfo emailInfo, HashMap<String, Object> props) {
if (!(emailInfo instanceof NullEmailInfo)) {
EmailTarget emailTarget = emailReportingDao.createTarget();
emailTarget.setEmailAddress(emailAddress);
return sendTemplateEmail(emailTarget, emailInfo, props);
} else {
return true;
}
}
public boolean sendBasicEmail(EmailInfo emailInfo, EmailTarget emailTarget, HashMap<String, Object> props) {
if (props == null) {
props = new HashMap<String, Object>();
}
if (emailInfo == null) {
emailInfo = new EmailInfo();
}
props.put(EmailPropertyType.INFO.getType(), emailInfo);
props.put(EmailPropertyType.USER.getType(), emailTarget);
if (Boolean.parseBoolean(emailInfo.getSendEmailReliableAsync())) {
if (emailServiceProducer == null) {
throw new EmailException("The property sendEmailReliableAsync on EmailInfo is true, but the EmailService does not have an instance of JMSEmailServiceProducer set.");
}
emailServiceProducer.send(props);
} else {
messageCreator.sendMessage(props);
}
return true;
}
/**
* @return the emailTrackingManager
*/
public EmailTrackingManager getEmailTrackingManager() {
return emailTrackingManager;
}
/**
* @param emailTrackingManager the emailTrackingManager to set
*/
public void setEmailTrackingManager(EmailTrackingManager emailTrackingManager) {
this.emailTrackingManager = emailTrackingManager;
}
/**
* @return the serverInfo
*/
public ServerInfo getServerInfo() {
return serverInfo;
}
/**
* @param serverInfo the serverInfo to set
*/
public void setServerInfo(ServerInfo serverInfo) {
this.serverInfo = serverInfo;
}
/**
* @return the emailServiceProducer
*/
public EmailServiceProducer getEmailServiceProducer() {
return emailServiceProducer;
}
/**
* @param emailServiceProducer the emailServiceProducer to set
*/
public void setEmailServiceProducer(EmailServiceProducer emailServiceProducer) {
this.emailServiceProducer = emailServiceProducer;
}
/**
* @return the messageCreator
*/
public MessageCreator getMessageCreator() {
return messageCreator;
}
/**
* @param messageCreator the messageCreator to set
*/
public void setMessageCreator(MessageCreator messageCreator) {
this.messageCreator = messageCreator;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_email_service_EmailServiceImpl.java |
3,092 | public class QueueService implements ManagedService, MigrationAwareService, TransactionalService,
RemoteService, EventPublishingService<QueueEvent, ItemListener> {
public static final String SERVICE_NAME = "hz:impl:queueService";
private final EntryTaskScheduler queueEvictionScheduler;
private final NodeEngine nodeEngine;
private final ConcurrentMap<String, QueueContainer> containerMap
= new ConcurrentHashMap<String, QueueContainer>();
private final ConcurrentMap<String, LocalQueueStatsImpl> statsMap
= new ConcurrentHashMap<String, LocalQueueStatsImpl>(1000);
private final ConstructorFunction<String, LocalQueueStatsImpl> localQueueStatsConstructorFunction
= new ConstructorFunction<String, LocalQueueStatsImpl>() {
@Override
public LocalQueueStatsImpl createNew(String key) {
return new LocalQueueStatsImpl();
}
};
public QueueService(NodeEngine nodeEngine) {
this.nodeEngine = nodeEngine;
ScheduledExecutorService defaultScheduledExecutor
= nodeEngine.getExecutionService().getDefaultScheduledExecutor();
QueueEvictionProcessor entryProcessor = new QueueEvictionProcessor(nodeEngine, this);
this.queueEvictionScheduler = EntryTaskSchedulerFactory.newScheduler(
defaultScheduledExecutor, entryProcessor, ScheduleType.POSTPONE);
}
public void scheduleEviction(String name, long delay) {
queueEvictionScheduler.schedule(delay, name, null);
}
public void cancelEviction(String name) {
queueEvictionScheduler.cancel(name);
}
@Override
public void init(NodeEngine nodeEngine, Properties properties) {
}
@Override
public void reset() {
containerMap.clear();
}
@Override
public void shutdown(boolean terminate) {
reset();
}
public QueueContainer getOrCreateContainer(final String name, boolean fromBackup) throws Exception {
QueueContainer container = containerMap.get(name);
if (container == null) {
container = new QueueContainer(name, nodeEngine.getConfig().findQueueConfig(name), nodeEngine, this);
QueueContainer existing = containerMap.putIfAbsent(name, container);
if (existing != null) {
container = existing;
} else {
container.init(fromBackup);
}
}
return container;
}
public void addContainer(String name, QueueContainer container) {
containerMap.put(name, container);
}
// need for testing..
public boolean containsQueue(String name) {
return containerMap.containsKey(name);
}
@Override
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
Map<String, QueueContainer> migrationData = new HashMap<String, QueueContainer>();
InternalPartitionService partitionService = nodeEngine.getPartitionService();
for (Entry<String, QueueContainer> entry : containerMap.entrySet()) {
String name = entry.getKey();
int partitionId = partitionService.getPartitionId(StringPartitioningStrategy.getPartitionKey(name));
QueueContainer container = entry.getValue();
if (partitionId == event.getPartitionId()
&& container.getConfig().getTotalBackupCount() >= event.getReplicaIndex()) {
migrationData.put(name, container);
}
}
if (migrationData.isEmpty()) {
return null;
} else {
return new QueueReplicationOperation(migrationData, event.getPartitionId(), event.getReplicaIndex());
}
}
@Override
public void commitMigration(PartitionMigrationEvent event) {
if (event.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
clearMigrationData(event.getPartitionId());
}
}
@Override
public void rollbackMigration(PartitionMigrationEvent event) {
if (event.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
clearMigrationData(event.getPartitionId());
}
}
private void clearMigrationData(int partitionId) {
Iterator<Entry<String, QueueContainer>> iterator = containerMap.entrySet().iterator();
InternalPartitionService partitionService = nodeEngine.getPartitionService();
while (iterator.hasNext()) {
final Entry<String, QueueContainer> entry = iterator.next();
final String name = entry.getKey();
final QueueContainer container = entry.getValue();
int containerPartitionId = partitionService.getPartitionId(StringPartitioningStrategy.getPartitionKey(name));
if (containerPartitionId == partitionId) {
container.destroy();
iterator.remove();
}
}
}
@Override
public void clearPartitionReplica(int partitionId) {
clearMigrationData(partitionId);
}
@Override
public void dispatchEvent(QueueEvent event, ItemListener listener) {
Object item = nodeEngine.toObject(event.data);
ItemEvent itemEvent = new ItemEvent(event.name, event.eventType, item,
nodeEngine.getClusterService().getMember(event.caller));
if (event.eventType.equals(ItemEventType.ADDED)) {
listener.itemAdded(itemEvent);
} else {
listener.itemRemoved(itemEvent);
}
getLocalQueueStatsImpl(event.name).incrementReceivedEvents();
}
@Override
public QueueProxyImpl createDistributedObject(String objectId) {
return new QueueProxyImpl(objectId, this, nodeEngine);
}
@Override
public void destroyDistributedObject(String name) {
containerMap.remove(name);
nodeEngine.getEventService().deregisterAllListeners(SERVICE_NAME, name);
}
public String addItemListener(String name, ItemListener listener, boolean includeValue) {
EventService eventService = nodeEngine.getEventService();
QueueEventFilter filter = new QueueEventFilter(includeValue);
EventRegistration registration = eventService.registerListener(
QueueService.SERVICE_NAME, name, filter, listener);
return registration.getId();
}
public boolean removeItemListener(String name, String registrationId) {
EventService eventService = nodeEngine.getEventService();
return eventService.deregisterListener(SERVICE_NAME, name, registrationId);
}
public NodeEngine getNodeEngine() {
return nodeEngine;
}
public LocalQueueStats createLocalQueueStats(String name, int partitionId) {
LocalQueueStatsImpl stats = getLocalQueueStatsImpl(name);
stats.setOwnedItemCount(0);
stats.setBackupItemCount(0);
QueueContainer container = containerMap.get(name);
if (container == null) {
return stats;
}
Address thisAddress = nodeEngine.getClusterService().getThisAddress();
InternalPartition partition = nodeEngine.getPartitionService().getPartition(partitionId);
Address owner = partition.getOwnerOrNull();
if (thisAddress.equals(owner)) {
stats.setOwnedItemCount(container.size());
} else if (owner != null) {
stats.setBackupItemCount(container.backupSize());
}
container.setStats(stats);
return stats;
}
public LocalQueueStatsImpl getLocalQueueStatsImpl(String name) {
return ConcurrencyUtil.getOrPutIfAbsent(statsMap, name, localQueueStatsConstructorFunction);
}
public TransactionalQueueProxy createTransactionalObject(String name, TransactionSupport transaction) {
return new TransactionalQueueProxy(nodeEngine, this, name, transaction);
}
public void rollbackTransaction(String transactionId) {
final Set<String> queueNames = containerMap.keySet();
InternalPartitionService partitionService = nodeEngine.getPartitionService();
OperationService operationService = nodeEngine.getOperationService();
for (String name : queueNames) {
int partitionId = partitionService.getPartitionId(StringPartitioningStrategy.getPartitionKey(name));
Operation operation = new QueueTransactionRollbackOperation(name, transactionId)
.setPartitionId(partitionId)
.setService(this)
.setNodeEngine(nodeEngine);
operationService.executeOperation(operation);
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_queue_QueueService.java |
1,527 | final class ConnectionFactoryMetaData implements ResourceAdapterMetaData {
/**
* JCA-Connector specific java packge to be used for all information retrieval
*/
private static final Package HZ_PACKAGE = ConnectionFactoryImpl.class.getPackage();
/**
* @return the implementation title from Hazelast
*/
public String getAdapterName() {
return HZ_PACKAGE.getImplementationTitle();
}
/**
* @return the specification title from Hazelast
*/
public String getAdapterShortDescription() {
return HZ_PACKAGE.getSpecificationTitle();
}
/**
* @return Hazelcast's implementation vendor
*/
public String getAdapterVendorName() {
return HZ_PACKAGE.getImplementationVendor();
}
/**
* @return Hazelcast's implementation version
*/
public String getAdapterVersion() {
return HZ_PACKAGE.getImplementationVersion();
}
/**
* There is no real specification thus always an empty String array...
*/
public String[] getInteractionSpecsSupported() {
return new String[]{};
}
/**
* @return Hazelcast's specification version
*/
public String getSpecVersion() {
return HZ_PACKAGE.getSpecificationVersion();
}
/* (non-Javadoc)
* @see javax.resource.cci.ResourceAdapterMetaData#supportsExecuteWithInputAndOutputRecord()
*/
public boolean supportsExecuteWithInputAndOutputRecord() {
return false;
}
/* (non-Javadoc)
* @see javax.resource.cci.ResourceAdapterMetaData#supportsExecuteWithInputRecordOnly()
*/
public boolean supportsExecuteWithInputRecordOnly() {
return false;
}
/* (non-Javadoc)
* @see javax.resource.cci.ResourceAdapterMetaData#supportsLocalTransactionDemarcation()
*/
public boolean supportsLocalTransactionDemarcation() {
return false;
}
} | 0true
| hazelcast-ra_hazelcast-jca_src_main_java_com_hazelcast_jca_ConnectionFactoryMetaData.java |
1,575 | class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
boolean disableNewAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, DisableAllocationDecider.this.disableNewAllocation);
if (disableNewAllocation != DisableAllocationDecider.this.disableNewAllocation) {
logger.info("updating [cluster.routing.allocation.disable_new_allocation] from [{}] to [{}]", DisableAllocationDecider.this.disableNewAllocation, disableNewAllocation);
DisableAllocationDecider.this.disableNewAllocation = disableNewAllocation;
}
boolean disableAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, DisableAllocationDecider.this.disableAllocation);
if (disableAllocation != DisableAllocationDecider.this.disableAllocation) {
logger.info("updating [cluster.routing.allocation.disable_allocation] from [{}] to [{}]", DisableAllocationDecider.this.disableAllocation, disableAllocation);
DisableAllocationDecider.this.disableAllocation = disableAllocation;
}
boolean disableReplicaAllocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION, DisableAllocationDecider.this.disableReplicaAllocation);
if (disableReplicaAllocation != DisableAllocationDecider.this.disableReplicaAllocation) {
logger.info("updating [cluster.routing.allocation.disable_replica_allocation] from [{}] to [{}]", DisableAllocationDecider.this.disableReplicaAllocation, disableReplicaAllocation);
DisableAllocationDecider.this.disableReplicaAllocation = disableReplicaAllocation;
}
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_decider_DisableAllocationDecider.java |
163 | public abstract class CountedCompleter<T> extends ForkJoinTask<T> {
private static final long serialVersionUID = 5232453752276485070L;
/** This task's completer, or null if none */
final CountedCompleter<?> completer;
/** The number of pending tasks until completion */
volatile int pending;
/**
* Creates a new CountedCompleter with the given completer
* and initial pending count.
*
* @param completer this task's completer, or {@code null} if none
* @param initialPendingCount the initial pending count
*/
protected CountedCompleter(CountedCompleter<?> completer,
int initialPendingCount) {
this.completer = completer;
this.pending = initialPendingCount;
}
/**
* Creates a new CountedCompleter with the given completer
* and an initial pending count of zero.
*
* @param completer this task's completer, or {@code null} if none
*/
protected CountedCompleter(CountedCompleter<?> completer) {
this.completer = completer;
}
/**
* Creates a new CountedCompleter with no completer
* and an initial pending count of zero.
*/
protected CountedCompleter() {
this.completer = null;
}
/**
* The main computation performed by this task.
*/
public abstract void compute();
/**
* Performs an action when method {@link #tryComplete} is invoked
* and the pending count is zero, or when the unconditional
* method {@link #complete} is invoked. By default, this method
* does nothing. You can distinguish cases by checking the
* identity of the given caller argument. If not equal to {@code
* this}, then it is typically a subtask that may contain results
* (and/or links to other results) to combine.
*
* @param caller the task invoking this method (which may
* be this task itself)
*/
public void onCompletion(CountedCompleter<?> caller) {
}
/**
* Performs an action when method {@link #completeExceptionally}
* is invoked or method {@link #compute} throws an exception, and
* this task has not otherwise already completed normally. On
* entry to this method, this task {@link
* ForkJoinTask#isCompletedAbnormally}. The return value of this
* method controls further propagation: If {@code true} and this
* task has a completer, then this completer is also completed
* exceptionally. The default implementation of this method does
* nothing except return {@code true}.
*
* @param ex the exception
* @param caller the task invoking this method (which may
* be this task itself)
* @return true if this exception should be propagated to this
* task's completer, if one exists
*/
public boolean onExceptionalCompletion(Throwable ex, CountedCompleter<?> caller) {
return true;
}
/**
* Returns the completer established in this task's constructor,
* or {@code null} if none.
*
* @return the completer
*/
public final CountedCompleter<?> getCompleter() {
return completer;
}
/**
* Returns the current pending count.
*
* @return the current pending count
*/
public final int getPendingCount() {
return pending;
}
/**
* Sets the pending count to the given value.
*
* @param count the count
*/
public final void setPendingCount(int count) {
pending = count;
}
/**
* Adds (atomically) the given value to the pending count.
*
* @param delta the value to add
*/
public final void addToPendingCount(int delta) {
int c; // note: can replace with intrinsic in jdk8
do {} while (!U.compareAndSwapInt(this, PENDING, c = pending, c+delta));
}
/**
* Sets (atomically) the pending count to the given count only if
* it currently holds the given expected value.
*
* @param expected the expected value
* @param count the new value
* @return true if successful
*/
public final boolean compareAndSetPendingCount(int expected, int count) {
return U.compareAndSwapInt(this, PENDING, expected, count);
}
/**
* If the pending count is nonzero, (atomically) decrements it.
*
* @return the initial (undecremented) pending count holding on entry
* to this method
*/
public final int decrementPendingCountUnlessZero() {
int c;
do {} while ((c = pending) != 0 &&
!U.compareAndSwapInt(this, PENDING, c, c - 1));
return c;
}
/**
* Returns the root of the current computation; i.e., this
* task if it has no completer, else its completer's root.
*
* @return the root of the current computation
*/
public final CountedCompleter<?> getRoot() {
CountedCompleter<?> a = this, p;
while ((p = a.completer) != null)
a = p;
return a;
}
/**
* If the pending count is nonzero, decrements the count;
* otherwise invokes {@link #onCompletion} and then similarly
* tries to complete this task's completer, if one exists,
* else marks this task as complete.
*/
public final void tryComplete() {
CountedCompleter<?> a = this, s = a;
for (int c;;) {
if ((c = a.pending) == 0) {
a.onCompletion(s);
if ((a = (s = a).completer) == null) {
s.quietlyComplete();
return;
}
}
else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
return;
}
}
/**
* Equivalent to {@link #tryComplete} but does not invoke {@link
* #onCompletion} along the completion path: If the pending count
* is nonzero, decrements the count; otherwise, similarly tries to
* complete this task's completer, if one exists, else marks this
* task as complete. This method may be useful in cases where
* {@code onCompletion} should not, or need not, be invoked for
* each completer in a computation.
*/
public final void propagateCompletion() {
CountedCompleter<?> a = this, s = a;
for (int c;;) {
if ((c = a.pending) == 0) {
if ((a = (s = a).completer) == null) {
s.quietlyComplete();
return;
}
}
else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
return;
}
}
/**
* Regardless of pending count, invokes {@link #onCompletion},
* marks this task as complete and further triggers {@link
* #tryComplete} on this task's completer, if one exists. The
* given rawResult is used as an argument to {@link #setRawResult}
* before invoking {@link #onCompletion} or marking this task as
* complete; its value is meaningful only for classes overriding
* {@code setRawResult}.
*
* <p>This method may be useful when forcing completion as soon as
* any one (versus all) of several subtask results are obtained.
* However, in the common (and recommended) case in which {@code
* setRawResult} is not overridden, this effect can be obtained
* more simply using {@code quietlyCompleteRoot();}.
*
* @param rawResult the raw result
*/
public void complete(T rawResult) {
CountedCompleter<?> p;
setRawResult(rawResult);
onCompletion(this);
quietlyComplete();
if ((p = completer) != null)
p.tryComplete();
}
/**
* If this task's pending count is zero, returns this task;
* otherwise decrements its pending count and returns {@code
* null}. This method is designed to be used with {@link
* #nextComplete} in completion traversal loops.
*
* @return this task, if pending count was zero, else {@code null}
*/
public final CountedCompleter<?> firstComplete() {
for (int c;;) {
if ((c = pending) == 0)
return this;
else if (U.compareAndSwapInt(this, PENDING, c, c - 1))
return null;
}
}
/**
* If this task does not have a completer, invokes {@link
* ForkJoinTask#quietlyComplete} and returns {@code null}. Or, if
* this task's pending count is non-zero, decrements its pending
* count and returns {@code null}. Otherwise, returns the
* completer. This method can be used as part of a completion
* traversal loop for homogeneous task hierarchies:
*
* <pre> {@code
* for (CountedCompleter<?> c = firstComplete();
* c != null;
* c = c.nextComplete()) {
* // ... process c ...
* }}</pre>
*
* @return the completer, or {@code null} if none
*/
public final CountedCompleter<?> nextComplete() {
CountedCompleter<?> p;
if ((p = completer) != null)
return p.firstComplete();
else {
quietlyComplete();
return null;
}
}
/**
* Equivalent to {@code getRoot().quietlyComplete()}.
*/
public final void quietlyCompleteRoot() {
for (CountedCompleter<?> a = this, p;;) {
if ((p = a.completer) == null) {
a.quietlyComplete();
return;
}
a = p;
}
}
/**
* Supports ForkJoinTask exception propagation.
*/
void internalPropagateException(Throwable ex) {
CountedCompleter<?> a = this, s = a;
while (a.onExceptionalCompletion(ex, s) &&
(a = (s = a).completer) != null && a.status >= 0)
a.recordExceptionalCompletion(ex);
}
/**
* Implements execution conventions for CountedCompleters.
*/
protected final boolean exec() {
compute();
return false;
}
/**
* Returns the result of the computation. By default,
* returns {@code null}, which is appropriate for {@code Void}
* actions, but in other cases should be overridden, almost
* always to return a field or function of a field that
* holds the result upon completion.
*
* @return the result of the computation
*/
public T getRawResult() { return null; }
/**
* A method that result-bearing CountedCompleters may optionally
* use to help maintain result data. By default, does nothing.
* Overrides are not recommended. However, if this method is
* overridden to update existing objects or fields, then it must
* in general be defined to be thread-safe.
*/
protected void setRawResult(T t) { }
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long PENDING;
static {
try {
U = getUnsafe();
PENDING = U.objectFieldOffset
(CountedCompleter.class.getDeclaredField("pending"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
} | 0true
| src_main_java_jsr166y_CountedCompleter.java |
687 | public class OLocalHashTable<K, V> extends OSharedResourceAdaptive {
private static final double MERGE_THRESHOLD = 0.2;
private static final long HASH_CODE_MIN_VALUE = 0;
private static final long HASH_CODE_MAX_VALUE = 0xFFFFFFFFFFFFFFFFL;
private long[][] hashTree;
private OHashTreeNodeMetadata[] nodesMetadata;
private int hashTreeSize;
private long size;
private int hashTreeTombstone = -1;
private long bucketTombstonePointer = -1;
private final String metadataConfigurationFileExtension;
private final String treeStateFileExtension;
private final String bucketFileExtension;
public static final int HASH_CODE_SIZE = 64;
public static final int MAX_LEVEL_DEPTH = 8;
public static final int MAX_LEVEL_SIZE = 1 << MAX_LEVEL_DEPTH;
public static final int LEVEL_MASK = Integer.MAX_VALUE >>> (31 - MAX_LEVEL_DEPTH);
private OStorageLocalAbstract storage;
private String name;
private OHashIndexBufferStore metadataStore;
private OHashIndexTreeStateStore treeStateStore;
private ODiskCache diskCache;
private final OHashFunction<K> keyHashFunction;
private OBinarySerializer<K> keySerializer;
private OBinarySerializer<V> valueSerializer;
private OType[] keyTypes;
private OHashIndexFileLevelMetadata[] filesMetadata = new OHashIndexFileLevelMetadata[HASH_CODE_SIZE];
private final long[] fileLevelIds = new long[HASH_CODE_SIZE];
private final KeyHashCodeComparator<K> comparator;
public OLocalHashTable(String metadataConfigurationFileExtension, String treeStateFileExtension, String bucketFileExtension,
OHashFunction<K> keyHashFunction) {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean());
this.metadataConfigurationFileExtension = metadataConfigurationFileExtension;
this.treeStateFileExtension = treeStateFileExtension;
this.bucketFileExtension = bucketFileExtension;
this.keyHashFunction = keyHashFunction;
this.comparator = new KeyHashCodeComparator<K>(this.keyHashFunction);
}
private void initStores(String metadataConfigurationFileExtension, String treeStateFileExtension) throws IOException {
final OStorageFileConfiguration metadataConfiguration = new OStorageFileConfiguration(null,
OStorageVariableParser.DB_PATH_VARIABLE + '/' + name + metadataConfigurationFileExtension, OFileFactory.CLASSIC, "0", "50%");
final OStorageFileConfiguration treeStateConfiguration = new OStorageFileConfiguration(null,
OStorageVariableParser.DB_PATH_VARIABLE + '/' + name + treeStateFileExtension, OFileFactory.CLASSIC, "0", "50%");
metadataStore = new OHashIndexBufferStore(storage, metadataConfiguration);
treeStateStore = new OHashIndexTreeStateStore(storage, treeStateConfiguration);
}
public void create(String name, OBinarySerializer<K> keySerializer, OBinarySerializer<V> valueSerializer, OType[] keyTypes,
OStorageLocalAbstract storageLocal) {
acquireExclusiveLock();
try {
this.storage = storageLocal;
this.keyTypes = keyTypes;
this.diskCache = storage.getDiskCache();
if (this.diskCache == null)
throw new IllegalStateException("Disk cache was not initialized on storage level");
this.name = name;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
initStores(metadataConfigurationFileExtension, treeStateFileExtension);
metadataStore.create(-1);
treeStateStore.create(-1);
metadataStore.setRecordsCount(size);
treeStateStore.setHashTreeSize(hashTreeSize);
treeStateStore.setHashTreeTombstone(hashTreeTombstone);
treeStateStore.setBucketTombstonePointer(bucketTombstonePointer);
filesMetadata[0] = createFileMetadata(0);
initHashTreeState();
} catch (IOException e) {
throw new OIndexException("Error during local hash table creation.", e);
} finally {
releaseExclusiveLock();
}
}
public OBinarySerializer<K> getKeySerializer() {
return keySerializer;
}
public void setKeySerializer(OBinarySerializer<K> keySerializer) {
this.keySerializer = keySerializer;
}
public OBinarySerializer<V> getValueSerializer() {
return valueSerializer;
}
public void setValueSerializer(OBinarySerializer<V> valueSerializer) {
this.valueSerializer = valueSerializer;
}
private OHashIndexFileLevelMetadata createFileMetadata(int i) throws IOException {
String fileName = name + i + bucketFileExtension;
fileLevelIds[i] = diskCache.openFile(fileName);
return new OHashIndexFileLevelMetadata(fileName, 0, -1);
}
public V get(K key) {
if (key == null)
return null;
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
final long bucketPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
if (bucketPointer == 0)
return null;
long pageIndex = getPageIndex(bucketPointer);
int fileLevel = getFileLevel(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
OCachePointer dataPointer = cacheEntry.getCachePointer();
try {
final OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(dataPointer.getDataPointer(), keySerializer,
valueSerializer, keyTypes);
OHashIndexBucket.Entry<K, V> entry = bucket.find(key, hashCode);
if (entry == null)
return null;
return entry.value;
} finally {
diskCache.release(cacheEntry);
}
} catch (IOException e) {
throw new OIndexException("Exception during index value retrieval", e);
} finally {
releaseSharedLock();
}
}
public void put(K key, V value) {
acquireExclusiveLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
doPut(key, value);
} catch (OIndexMaximumLimitReachedException e) {
OLogManager.instance().warn(this, "Key " + key + " is too large to fit in index and will be skipped", e);
} catch (IOException e) {
throw new OIndexException("Error during index update", e);
} finally {
releaseExclusiveLock();
}
}
public V remove(K key) {
acquireExclusiveLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
final BucketPath nodePath = getBucket(hashCode);
final long bucketPointer = hashTree[nodePath.nodeIndex][nodePath.itemIndex + nodePath.hashMapOffset];
final long pageIndex = getPageIndex(bucketPointer);
final int fileLevel = getFileLevel(bucketPointer);
final V removed;
final OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
final OCachePointer dataPointer = cacheEntry.getCachePointer();
dataPointer.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(dataPointer.getDataPointer(), keySerializer,
valueSerializer, keyTypes);
final int positionIndex = bucket.getIndex(hashCode, key);
if (positionIndex < 0)
return null;
removed = bucket.deleteEntry(positionIndex).value;
size--;
mergeBucketsAfterDeletion(nodePath, bucket);
cacheEntry.markDirty();
} finally {
dataPointer.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
if (nodePath.parent != null) {
final int hashMapSize = 1 << nodePath.nodeLocalDepth;
final long[] node = hashTree[nodePath.nodeIndex];
final boolean allMapsContainSameBucket = checkAllMapsContainSameBucket(node, hashMapSize);
if (allMapsContainSameBucket)
mergeNodeToParent(node, nodePath);
}
return removed;
} catch (IOException e) {
throw new OIndexException("Error during index removal", e);
} finally {
releaseExclusiveLock();
}
}
public void clear() {
acquireExclusiveLock();
try {
for (int i = 0; i < filesMetadata.length; i++) {
if (filesMetadata[i] != null)
diskCache.truncateFile(fileLevelIds[i]);
}
bucketTombstonePointer = -1;
metadataStore.truncate();
treeStateStore.truncate();
initHashTreeState();
} catch (IOException e) {
throw new OIndexException("Error during hash table clear", e);
} finally {
releaseExclusiveLock();
}
}
public OHashIndexBucket.Entry<K, V>[] higherEntries(K key) {
return higherEntries(key, -1);
}
public OHashIndexBucket.Entry<K, V>[] higherEntries(K key, int limit) {
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
OCachePointer pagePointer = cacheEntry.getCachePointer();
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer,
keyTypes);
while (bucket.size() == 0 || comparator.compare(bucket.getKey(bucket.size() - 1), key) <= 0) {
bucketPath = nextBucketToFind(bucketPath, bucket.getDepth());
if (bucketPath == null)
return new OHashIndexBucket.Entry[0];
diskCache.release(cacheEntry);
final long nextPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
fileLevel = getFileLevel(nextPointer);
pageIndex = getPageIndex(nextPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel);
pagePointer = cacheEntry.getCachePointer();
bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
}
final int index = bucket.getIndex(hashCode, key);
final int startIndex;
if (index >= 0)
startIndex = index + 1;
else
startIndex = -index - 1;
final int endIndex;
if (limit <= 0)
endIndex = bucket.size();
else
endIndex = Math.min(bucket.size(), startIndex + limit);
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
diskCache.release(cacheEntry);
}
} catch (IOException ioe) {
throw new OIndexException("Exception during data retrieval", ioe);
} finally {
releaseSharedLock();
}
}
private void saveState() throws IOException {
treeStateStore.setHashTreeSize(hashTreeSize);
treeStateStore.setBucketTombstonePointer(bucketTombstonePointer);
treeStateStore.setHashTreeTombstone(hashTreeTombstone);
treeStateStore.storeTreeState(hashTree, nodesMetadata);
metadataStore.setRecordsCount(size);
metadataStore.setKeySerializerId(keySerializer.getId());
metadataStore.setValueSerializerId(valueSerializer.getId());
metadataStore.storeMetadata(filesMetadata);
}
public void load(String name, OType[] keyTypes, OStorageLocalAbstract storageLocal) {
acquireExclusiveLock();
try {
this.storage = storageLocal;
this.keyTypes = keyTypes;
diskCache = storage.getDiskCache();
this.name = name;
initStores(metadataConfigurationFileExtension, treeStateFileExtension);
metadataStore.open();
treeStateStore.open();
size = metadataStore.getRecordsCount();
hashTreeSize = (int) treeStateStore.getHashTreeSize();
hashTreeTombstone = (int) treeStateStore.getHashTreeTombstone();
bucketTombstonePointer = treeStateStore.getBucketTombstonePointer();
final int arraySize;
int bitsCount = Integer.bitCount(hashTreeSize);
if (bitsCount == 1)
arraySize = hashTreeSize;
else
arraySize = Integer.highestOneBit(hashTreeSize) << 1;
OHashIndexTreeStateStore.TreeState treeState = treeStateStore.loadTreeState(arraySize);
hashTree = treeState.getHashTree();
nodesMetadata = treeState.getHashTreeNodeMetadata();
size = metadataStore.getRecordsCount();
keySerializer = (OBinarySerializer<K>) OBinarySerializerFactory.INSTANCE.getObjectSerializer(metadataStore
.getKeySerializerId());
valueSerializer = (OBinarySerializer<V>) OBinarySerializerFactory.INSTANCE.getObjectSerializer(metadataStore
.getValuerSerializerId());
filesMetadata = metadataStore.loadMetadata();
for (int i = 0; i < filesMetadata.length; i++) {
OHashIndexFileLevelMetadata fileLevelMetadata = filesMetadata[i];
if (fileLevelMetadata != null)
fileLevelIds[i] = diskCache.openFile(fileLevelMetadata.getFileName());
}
} catch (IOException e) {
throw new OIndexException("Exception during hash table loading", e);
} finally {
releaseExclusiveLock();
}
}
public void deleteWithoutLoad(String name, OStorageLocalAbstract storageLocal) {
acquireExclusiveLock();
try {
final ODiskCache diskCache = storageLocal.getDiskCache();
initStores(metadataConfigurationFileExtension, treeStateFileExtension);
metadataStore.open();
treeStateStore.open();
filesMetadata = metadataStore.loadMetadata();
for (int i = 0; i < filesMetadata.length; i++) {
OHashIndexFileLevelMetadata fileLevelMetadata = filesMetadata[i];
if (fileLevelMetadata != null) {
fileLevelIds[i] = diskCache.openFile(fileLevelMetadata.getFileName());
diskCache.deleteFile(fileLevelIds[i]);
}
}
metadataStore.delete();
treeStateStore.delete();
} catch (IOException ioe) {
throw new OIndexException("Can not delete hash table with name " + name, ioe);
} finally {
releaseExclusiveLock();
}
}
private OHashIndexBucket.Entry<K, V>[] convertBucketToEntries(final OHashIndexBucket<K, V> bucket, int startIndex, int endIndex) {
final OHashIndexBucket.Entry<K, V>[] entries = new OHashIndexBucket.Entry[endIndex - startIndex];
final Iterator<OHashIndexBucket.Entry<K, V>> iterator = bucket.iterator(startIndex);
for (int i = 0, k = startIndex; k < endIndex; i++, k++)
entries[i] = iterator.next();
return entries;
}
private BucketPath nextBucketToFind(final BucketPath bucketPath, int bucketDepth) {
int offset = bucketPath.nodeGlobalDepth - bucketDepth;
BucketPath currentNode = bucketPath;
int nodeLocalDepth = nodesMetadata[bucketPath.nodeIndex].getNodeLocalDepth();
assert nodesMetadata[bucketPath.nodeIndex].getNodeLocalDepth() == bucketPath.nodeLocalDepth;
while (offset > 0) {
offset -= nodeLocalDepth;
if (offset > 0) {
currentNode = bucketPath.parent;
nodeLocalDepth = currentNode.nodeLocalDepth;
assert nodesMetadata[currentNode.nodeIndex].getNodeLocalDepth() == currentNode.nodeLocalDepth;
}
}
final int diff = bucketDepth - (currentNode.nodeGlobalDepth - nodeLocalDepth);
final int interval = (1 << (nodeLocalDepth - diff));
final int firstStartIndex = currentNode.itemIndex & ((LEVEL_MASK << (nodeLocalDepth - diff)) & LEVEL_MASK);
final BucketPath bucketPathToFind;
final int globalIndex = firstStartIndex + interval + currentNode.hashMapOffset;
if (globalIndex >= MAX_LEVEL_SIZE)
bucketPathToFind = nextLevelUp(currentNode);
else {
final int hashMapSize = 1 << currentNode.nodeLocalDepth;
final int hashMapOffset = globalIndex / hashMapSize * hashMapSize;
final int startIndex = globalIndex - hashMapOffset;
bucketPathToFind = new BucketPath(currentNode.parent, hashMapOffset, startIndex, currentNode.nodeIndex,
currentNode.nodeLocalDepth, currentNode.nodeGlobalDepth);
}
return nextNonEmptyNode(bucketPathToFind);
}
private BucketPath nextNonEmptyNode(BucketPath bucketPath) {
nextBucketLoop: while (bucketPath != null) {
final long[] node = hashTree[bucketPath.nodeIndex];
final int startIndex = bucketPath.itemIndex + bucketPath.hashMapOffset;
final int endIndex = MAX_LEVEL_SIZE;
for (int i = startIndex; i < endIndex; i++) {
final long position = node[i];
if (position > 0) {
final int hashMapSize = 1 << bucketPath.nodeLocalDepth;
final int hashMapOffset = (i / hashMapSize) * hashMapSize;
final int itemIndex = i - hashMapOffset;
return new BucketPath(bucketPath.parent, hashMapOffset, itemIndex, bucketPath.nodeIndex, bucketPath.nodeLocalDepth,
bucketPath.nodeGlobalDepth);
}
if (position < 0) {
final int childNodeIndex = (int) ((position & Long.MAX_VALUE) >> 8);
final int childItemOffset = (int) position & 0xFF;
final BucketPath parent = new BucketPath(bucketPath.parent, 0, i, bucketPath.nodeIndex, bucketPath.nodeLocalDepth,
bucketPath.nodeGlobalDepth);
final int childLocalDepth = nodesMetadata[childNodeIndex].getNodeLocalDepth();
bucketPath = new BucketPath(parent, childItemOffset, 0, childNodeIndex, childLocalDepth, bucketPath.nodeGlobalDepth
+ childLocalDepth);
continue nextBucketLoop;
}
}
bucketPath = nextLevelUp(bucketPath);
}
return null;
}
private BucketPath nextLevelUp(BucketPath bucketPath) {
if (bucketPath.parent == null)
return null;
final int nodeLocalDepth = bucketPath.nodeLocalDepth;
assert nodesMetadata[bucketPath.nodeIndex].getNodeLocalDepth() == bucketPath.nodeLocalDepth;
final int pointersSize = 1 << (MAX_LEVEL_DEPTH - nodeLocalDepth);
final BucketPath parent = bucketPath.parent;
if (parent.itemIndex < MAX_LEVEL_SIZE / 2) {
final int nextParentIndex = (parent.itemIndex / pointersSize + 1) * pointersSize;
return new BucketPath(parent.parent, 0, nextParentIndex, parent.nodeIndex, parent.nodeLocalDepth, parent.nodeGlobalDepth);
}
final int nextParentIndex = ((parent.itemIndex - MAX_LEVEL_SIZE / 2) / pointersSize + 1) * pointersSize + MAX_LEVEL_SIZE / 2;
if (nextParentIndex < MAX_LEVEL_SIZE)
return new BucketPath(parent.parent, 0, nextParentIndex, parent.nodeIndex, parent.nodeLocalDepth, parent.nodeGlobalDepth);
return nextLevelUp(new BucketPath(parent.parent, 0, MAX_LEVEL_SIZE - 1, parent.nodeIndex, parent.nodeLocalDepth,
parent.nodeGlobalDepth));
}
public OHashIndexBucket.Entry<K, V>[] ceilingEntries(K key) {
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
OCachePointer pagePointer = cacheEntry.getCachePointer();
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer,
keyTypes);
while (bucket.size() == 0) {
bucketPath = nextBucketToFind(bucketPath, bucket.getDepth());
if (bucketPath == null)
return new OHashIndexBucket.Entry[0];
diskCache.release(cacheEntry);
final long nextPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
fileLevel = getFileLevel(nextPointer);
pageIndex = getPageIndex(nextPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel);
pagePointer = cacheEntry.getCachePointer();
bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
}
final int index = bucket.getIndex(hashCode, key);
final int startIndex;
if (index >= 0)
startIndex = index;
else
startIndex = -index - 1;
final int endIndex = bucket.size();
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
diskCache.release(cacheEntry);
}
} catch (IOException ioe) {
throw new OIndexException("Error during data retrieval", ioe);
} finally {
releaseSharedLock();
}
}
public OHashIndexBucket.Entry<K, V> firstEntry() {
acquireSharedLock();
try {
BucketPath bucketPath = getBucket(HASH_CODE_MIN_VALUE);
long bucketPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex];
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
OCachePointer pagePointer = cacheEntry.getCachePointer();
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer,
keyTypes);
while (bucket.size() == 0) {
bucketPath = nextBucketToFind(bucketPath, bucket.getDepth());
if (bucketPath == null)
return null;
diskCache.release(cacheEntry);
final long nextPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
fileLevel = getFileLevel(nextPointer);
pageIndex = getPageIndex(nextPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel);
pagePointer = cacheEntry.getCachePointer();
bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
}
return bucket.getEntry(0);
} finally {
diskCache.release(cacheEntry);
}
} catch (IOException ioe) {
throw new OIndexException("Exception during data read", ioe);
} finally {
releaseSharedLock();
}
}
public OHashIndexBucket.Entry<K, V> lastEntry() {
acquireSharedLock();
try {
BucketPath bucketPath = getBucket(HASH_CODE_MAX_VALUE);
long bucketPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
OCachePointer pagePointer = cacheEntry.getCachePointer();
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer,
keyTypes);
while (bucket.size() == 0) {
final BucketPath prevBucketPath = prevBucketToFind(bucketPath, bucket.getDepth());
if (prevBucketPath == null)
return null;
diskCache.release(cacheEntry);
final long prevPointer = hashTree[prevBucketPath.nodeIndex][prevBucketPath.itemIndex + prevBucketPath.hashMapOffset];
fileLevel = getFileLevel(prevPointer);
pageIndex = getPageIndex(prevPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel);
pagePointer = cacheEntry.getCachePointer();
bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
bucketPath = prevBucketPath;
}
return bucket.getEntry(bucket.size() - 1);
} finally {
diskCache.release(cacheEntry);
}
} catch (IOException ioe) {
throw new OIndexException("Exception during data read", ioe);
} finally {
releaseSharedLock();
}
}
public OHashIndexBucket.Entry<K, V>[] lowerEntries(K key) throws IOException {
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
OCachePointer pagePointer = cacheEntry.getCachePointer();
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer,
keyTypes);
while (bucket.size() == 0 || comparator.compare(bucket.getKey(0), key) >= 0) {
final BucketPath prevBucketPath = prevBucketToFind(bucketPath, bucket.getDepth());
if (prevBucketPath == null)
return new OHashIndexBucket.Entry[0];
diskCache.release(cacheEntry);
final long prevPointer = hashTree[prevBucketPath.nodeIndex][prevBucketPath.itemIndex + prevBucketPath.hashMapOffset];
fileLevel = getFileLevel(prevPointer);
pageIndex = getPageIndex(prevPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel);
pagePointer = cacheEntry.getCachePointer();
bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
bucketPath = prevBucketPath;
}
final int startIndex = 0;
final int index = bucket.getIndex(hashCode, key);
final int endIndex;
if (index >= 0)
endIndex = index;
else
endIndex = -index - 1;
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
diskCache.release(cacheEntry);
}
} finally {
releaseSharedLock();
}
}
public OHashIndexBucket.Entry<K, V>[] floorEntries(K key) throws IOException {
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final long hashCode = keyHashFunction.hashCode(key);
BucketPath bucketPath = getBucket(hashCode);
long bucketPointer = hashTree[bucketPath.nodeIndex][bucketPath.itemIndex + bucketPath.hashMapOffset];
int fileLevel = getFileLevel(bucketPointer);
long pageIndex = getPageIndex(bucketPointer);
OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
OCachePointer pagePointer = cacheEntry.getCachePointer();
try {
OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer,
keyTypes);
while (bucket.size() == 0) {
final BucketPath prevBucketPath = prevBucketToFind(bucketPath, bucket.getDepth());
if (prevBucketPath == null)
return new OHashIndexBucket.Entry[0];
diskCache.release(cacheEntry);
final long prevPointer = hashTree[prevBucketPath.nodeIndex][prevBucketPath.itemIndex + prevBucketPath.hashMapOffset];
fileLevel = getFileLevel(prevPointer);
pageIndex = getPageIndex(prevPointer);
cacheEntry = loadPageEntry(pageIndex, fileLevel);
pagePointer = cacheEntry.getCachePointer();
bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
bucketPath = prevBucketPath;
}
final int startIndex = 0;
final int index = bucket.getIndex(hashCode, key);
final int endIndex;
if (index >= 0)
endIndex = index + 1;
else
endIndex = -index - 1;
return convertBucketToEntries(bucket, startIndex, endIndex);
} finally {
diskCache.release(cacheEntry);
}
} finally {
releaseSharedLock();
}
}
private BucketPath prevBucketToFind(final BucketPath bucketPath, int bucketDepth) {
int offset = bucketPath.nodeGlobalDepth - bucketDepth;
BucketPath currentBucket = bucketPath;
int nodeLocalDepth = bucketPath.nodeLocalDepth;
while (offset > 0) {
offset -= nodeLocalDepth;
if (offset > 0) {
currentBucket = bucketPath.parent;
nodeLocalDepth = currentBucket.nodeLocalDepth;
}
}
final int diff = bucketDepth - (currentBucket.nodeGlobalDepth - nodeLocalDepth);
final int firstStartIndex = currentBucket.itemIndex & ((LEVEL_MASK << (nodeLocalDepth - diff)) & LEVEL_MASK);
final int globalIndex = firstStartIndex + currentBucket.hashMapOffset - 1;
final BucketPath bucketPathToFind;
if (globalIndex < 0)
bucketPathToFind = prevLevelUp(bucketPath);
else {
final int hashMapSize = 1 << currentBucket.nodeLocalDepth;
final int hashMapOffset = globalIndex / hashMapSize * hashMapSize;
final int startIndex = globalIndex - hashMapOffset;
bucketPathToFind = new BucketPath(currentBucket.parent, hashMapOffset, startIndex, currentBucket.nodeIndex,
currentBucket.nodeLocalDepth, currentBucket.nodeGlobalDepth);
}
return prevNonEmptyNode(bucketPathToFind);
}
private BucketPath prevNonEmptyNode(BucketPath nodePath) {
prevBucketLoop: while (nodePath != null) {
final long[] node = hashTree[nodePath.nodeIndex];
final int startIndex = 0;
final int endIndex = nodePath.itemIndex + nodePath.hashMapOffset;
for (int i = endIndex; i >= startIndex; i--) {
final long position = node[i];
if (position > 0) {
final int hashMapSize = 1 << nodePath.nodeLocalDepth;
final int hashMapOffset = (i / hashMapSize) * hashMapSize;
final int itemIndex = i - hashMapOffset;
return new BucketPath(nodePath.parent, hashMapOffset, itemIndex, nodePath.nodeIndex, nodePath.nodeLocalDepth,
nodePath.nodeGlobalDepth);
}
if (position < 0) {
final int childNodeIndex = (int) ((position & Long.MAX_VALUE) >> 8);
final int childItemOffset = (int) position & 0xFF;
final int nodeLocalDepth = nodesMetadata[childNodeIndex].getNodeLocalDepth();
final int endChildIndex = (1 << nodeLocalDepth) - 1;
final BucketPath parent = new BucketPath(nodePath.parent, 0, i, nodePath.nodeIndex, nodePath.nodeLocalDepth,
nodePath.nodeGlobalDepth);
nodePath = new BucketPath(parent, childItemOffset, endChildIndex, childNodeIndex, nodeLocalDepth, parent.nodeGlobalDepth
+ nodeLocalDepth);
continue prevBucketLoop;
}
}
nodePath = prevLevelUp(nodePath);
}
return null;
}
private BucketPath prevLevelUp(BucketPath bucketPath) {
if (bucketPath.parent == null)
return null;
final int nodeLocalDepth = bucketPath.nodeLocalDepth;
final int pointersSize = 1 << (MAX_LEVEL_DEPTH - nodeLocalDepth);
final BucketPath parent = bucketPath.parent;
if (parent.itemIndex > MAX_LEVEL_SIZE / 2) {
final int prevParentIndex = ((parent.itemIndex - MAX_LEVEL_SIZE / 2) / pointersSize) * pointersSize + MAX_LEVEL_SIZE / 2 - 1;
return new BucketPath(parent.parent, 0, prevParentIndex, parent.nodeIndex, parent.nodeLocalDepth, parent.nodeGlobalDepth);
}
final int prevParentIndex = (parent.itemIndex / pointersSize) * pointersSize - 1;
if (prevParentIndex >= 0)
return new BucketPath(parent.parent, 0, prevParentIndex, parent.nodeIndex, parent.nodeLocalDepth, parent.nodeGlobalDepth);
return prevLevelUp(new BucketPath(parent.parent, 0, 0, parent.nodeIndex, parent.nodeLocalDepth, -1));
}
public long size() {
acquireSharedLock();
try {
return size;
} finally {
releaseSharedLock();
}
}
public void rename(String newName) {
acquireExclusiveLock();
try {
metadataStore.rename(name, newName);
treeStateStore.rename(name, newName);
for (long fileId : fileLevelIds)
if (fileId > 0)
diskCache.renameFile(fileId, newName, name);
} catch (IOException ioe) {
throw new OIndexException("Attempt of rename of hash table was failed", ioe);
} finally {
releaseExclusiveLock();
}
}
public void close() {
acquireExclusiveLock();
try {
flush();
metadataStore.close();
treeStateStore.close();
for (int i = 0; i < filesMetadata.length; i++)
if (filesMetadata[i] != null)
diskCache.closeFile(fileLevelIds[i]);
} catch (IOException e) {
throw new OIndexException("Error during hash table close", e);
} finally {
releaseExclusiveLock();
}
}
public void delete() {
acquireExclusiveLock();
try {
for (int i = 0; i < filesMetadata.length; i++) {
if (filesMetadata[i] != null)
diskCache.deleteFile(fileLevelIds[i]);
}
metadataStore.delete();
treeStateStore.delete();
} catch (IOException e) {
throw new OIndexException("Exception during index deletion", e);
} finally {
releaseExclusiveLock();
}
}
private void mergeNodeToParent(long[] node, BucketPath nodePath) {
final int startIndex = findParentNodeStartIndex(nodePath);
final int localNodeDepth = nodePath.nodeLocalDepth;
final int hashMapSize = 1 << localNodeDepth;
final long[] parentNode = hashTree[nodePath.parent.nodeIndex];
for (int i = 0, k = startIndex; i < node.length; i += hashMapSize, k++) {
parentNode[k] = node[i];
}
deleteNode(nodePath.nodeIndex);
final OHashTreeNodeMetadata metadata = nodesMetadata[nodePath.parent.nodeIndex];
if (nodePath.parent.itemIndex < MAX_LEVEL_SIZE / 2) {
final int maxChildDepth = metadata.getMaxLeftChildDepth();
if (maxChildDepth == localNodeDepth)
metadata.setMaxLeftChildDepth(getMaxLevelDepth(parentNode, 0, parentNode.length / 2));
} else {
final int maxChildDepth = metadata.getMaxRightChildDepth();
if (maxChildDepth == localNodeDepth)
metadata.setMaxRightChildDepth(getMaxLevelDepth(parentNode, parentNode.length / 2, parentNode.length));
}
}
private void mergeBucketsAfterDeletion(BucketPath nodePath, OHashIndexBucket<K, V> bucket) throws IOException {
final int bucketDepth = bucket.getDepth();
if (bucket.getContentSize() > OHashIndexBucket.MAX_BUCKET_SIZE_BYTES * MERGE_THRESHOLD)
return;
if (bucketDepth - MAX_LEVEL_DEPTH < 1)
return;
int offset = nodePath.nodeGlobalDepth - (bucketDepth - 1);
BucketPath currentNode = nodePath;
int nodeLocalDepth = nodePath.nodeLocalDepth;
while (offset > 0) {
offset -= nodeLocalDepth;
if (offset > 0) {
currentNode = nodePath.parent;
nodeLocalDepth = currentNode.nodeLocalDepth;
}
}
final int diff = bucketDepth - 1 - (currentNode.nodeGlobalDepth - nodeLocalDepth);
final int interval = (1 << (nodeLocalDepth - diff - 1));
int firstStartIndex = currentNode.itemIndex & ((LEVEL_MASK << (nodeLocalDepth - diff)) & LEVEL_MASK);
int firstEndIndex = firstStartIndex + interval;
final int secondStartIndex = firstEndIndex;
final int secondEndIndex = secondStartIndex + interval;
final OHashIndexBucket<K, V> buddyBucket;
int buddyLevel;
long buddyIndex;
long buddyPointer;
final long[] node = hashTree[currentNode.nodeIndex];
if ((currentNode.itemIndex >>> (nodeLocalDepth - diff - 1) & 1) == 1) {
buddyPointer = node[firstStartIndex + currentNode.hashMapOffset];
while (buddyPointer < 0) {
final int nodeIndex = (int) ((buddyPointer & Long.MAX_VALUE) >> 8);
final int itemOffset = (int) buddyPointer & 0xFF;
buddyPointer = hashTree[nodeIndex][itemOffset];
}
assert buddyPointer > 0;
buddyLevel = getFileLevel(buddyPointer);
buddyIndex = getPageIndex(buddyPointer);
} else {
buddyPointer = node[secondStartIndex + currentNode.hashMapOffset];
while (buddyPointer < 0) {
final int nodeIndex = (int) ((buddyPointer & Long.MAX_VALUE) >> 8);
final int itemOffset = (int) buddyPointer & 0xFF;
buddyPointer = hashTree[nodeIndex][itemOffset];
}
assert buddyPointer > 0;
buddyLevel = getFileLevel(buddyPointer);
buddyIndex = getPageIndex(buddyPointer);
}
OCacheEntry buddyCacheEntry = loadPageEntry(buddyIndex, buddyLevel);
OCachePointer buddyPagePointer = buddyCacheEntry.getCachePointer();
buddyPagePointer.acquireExclusiveLock();
try {
buddyBucket = new OHashIndexBucket<K, V>(buddyPagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
if (buddyBucket.getDepth() != bucketDepth)
return;
if (bucket.mergedSize(buddyBucket) >= OHashIndexBucket.MAX_BUCKET_SIZE_BYTES)
return;
filesMetadata[buddyLevel].setBucketsCount(filesMetadata[buddyLevel].getBucketsCount() - 2);
int newBuddyLevel = buddyLevel - 1;
long newBuddyIndex = buddyBucket.getSplitHistory(newBuddyLevel);
filesMetadata[buddyLevel].setBucketsCount(filesMetadata[buddyLevel].getBucketsCount() + 1);
final OCacheEntry newBuddyCacheEntry = loadPageEntry(newBuddyIndex, newBuddyLevel);
final OCachePointer newBuddyPagePointer = newBuddyCacheEntry.getCachePointer();
newBuddyPagePointer.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> newBuddyBucket = new OHashIndexBucket<K, V>(bucketDepth - 1,
newBuddyPagePointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
for (OHashIndexBucket.Entry<K, V> entry : buddyBucket)
newBuddyBucket.appendEntry(entry.hashCode, entry.key, entry.value);
for (OHashIndexBucket.Entry<K, V> entry : bucket)
newBuddyBucket.addEntry(entry.hashCode, entry.key, entry.value);
} finally {
newBuddyCacheEntry.markDirty();
newBuddyPagePointer.releaseExclusiveLock();
diskCache.release(newBuddyCacheEntry);
}
final long bucketPointer = hashTree[nodePath.nodeIndex][nodePath.itemIndex + nodePath.hashMapOffset];
final long bucketIndex = getPageIndex(bucketPointer);
final long newBuddyPointer = createBucketPointer(buddyIndex, buddyLevel);
for (int i = firstStartIndex; i < secondEndIndex; i++)
updateBucket(currentNode.nodeIndex, i, currentNode.hashMapOffset, newBuddyPointer);
final OHashIndexFileLevelMetadata oldBuddyFileMetadata = filesMetadata[buddyLevel];
if (oldBuddyFileMetadata.getBucketsCount() > 0) {
final long newTombstoneIndex;
if (bucketIndex < buddyIndex) {
bucket.setNextRemovedBucketPair(oldBuddyFileMetadata.getTombstoneIndex());
newTombstoneIndex = bucketIndex;
} else {
buddyBucket.setNextRemovedBucketPair(oldBuddyFileMetadata.getTombstoneIndex());
buddyCacheEntry.markDirty();
newTombstoneIndex = buddyIndex;
}
oldBuddyFileMetadata.setTombstoneIndex(newTombstoneIndex);
} else
oldBuddyFileMetadata.setTombstoneIndex(-1);
} finally {
buddyPagePointer.releaseExclusiveLock();
diskCache.release(buddyCacheEntry);
}
}
public void flush() {
acquireExclusiveLock();
try {
saveState();
metadataStore.synch();
treeStateStore.synch();
for (int i = 0; i < filesMetadata.length; i++)
if (filesMetadata[i] != null)
diskCache.flushFile(fileLevelIds[i]);
} catch (IOException e) {
throw new OIndexException("Error during hash table flush", e);
} finally {
releaseExclusiveLock();
}
}
public boolean wasSoftlyClosed() {
acquireSharedLock();
try {
if (!metadataStore.wasSoftlyClosedAtPreviousTime())
return false;
if (!treeStateStore.wasSoftlyClosedAtPreviousTime())
return false;
for (int i = 0; i < filesMetadata.length; i++) {
if (filesMetadata[i] != null && !diskCache.wasSoftlyClosed(fileLevelIds[i]))
return false;
}
return true;
} catch (IOException ioe) {
throw new OIndexException("Error during integrity check", ioe);
} finally {
releaseSharedLock();
}
}
public void setSoftlyClosed(boolean softlyClosed) {
acquireSharedLock();
try {
metadataStore.setSoftlyClosed(softlyClosed);
treeStateStore.setSoftlyClosed(softlyClosed);
for (int i = 0; i < filesMetadata.length; i++) {
if (filesMetadata[i] != null)
diskCache.setSoftlyClosed(fileLevelIds[i], softlyClosed);
}
} catch (IOException ioe) {
throw new OIndexException("Error during integrity check", ioe);
} finally {
releaseSharedLock();
}
}
private void doPut(K key, V value) throws IOException {
final long hashCode = keyHashFunction.hashCode(key);
final BucketPath bucketPath = getBucket(hashCode);
long[] node = hashTree[bucketPath.nodeIndex];
final long bucketPointer = node[bucketPath.itemIndex + bucketPath.hashMapOffset];
if (bucketPointer == 0)
throw new IllegalStateException("In this version of hash table buckets are added through split only.");
final long pageIndex = getPageIndex(bucketPointer);
final int fileLevel = getFileLevel(bucketPointer);
final OCacheEntry cacheEntry = loadPageEntry(pageIndex, fileLevel);
final OCachePointer pagePointer = cacheEntry.getCachePointer();
pagePointer.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> bucket = new OHashIndexBucket<K, V>(pagePointer.getDataPointer(), keySerializer,
valueSerializer, keyTypes);
final int index = bucket.getIndex(hashCode, key);
if (index > -1) {
final int updateResult = bucket.updateEntry(index, value);
if (updateResult == 0)
return;
if (updateResult == 1) {
cacheEntry.markDirty();
return;
}
assert updateResult == -1;
bucket.deleteEntry(index);
size--;
}
if (bucket.addEntry(hashCode, key, value)) {
cacheEntry.markDirty();
size++;
return;
}
final BucketSplitResult splitResult = splitBucket(bucket, fileLevel, pageIndex);
final long updatedBucketPointer = splitResult.updatedBucketPointer;
final long newBucketPointer = splitResult.newBucketPointer;
final int bucketDepth = splitResult.newDepth;
if (bucketDepth <= bucketPath.nodeGlobalDepth) {
updateNodeAfterBucketSplit(bucketPath, bucketDepth, newBucketPointer, updatedBucketPointer);
} else {
if (bucketPath.nodeLocalDepth < MAX_LEVEL_DEPTH) {
final NodeSplitResult nodeSplitResult = splitNode(bucketPath, node);
assert !(nodeSplitResult.allLeftHashMapsEqual && nodeSplitResult.allRightHashMapsEqual);
final long[] newNode = nodeSplitResult.newNode;
final int nodeLocalDepth = bucketPath.nodeLocalDepth + 1;
final int hashMapSize = 1 << nodeLocalDepth;
assert nodeSplitResult.allRightHashMapsEqual == checkAllMapsContainSameBucket(newNode, hashMapSize);
int newNodeIndex = -1;
if (!nodeSplitResult.allRightHashMapsEqual || bucketPath.itemIndex >= MAX_LEVEL_SIZE / 2)
newNodeIndex = addNewNode(newNode, nodeLocalDepth);
final int updatedItemIndex = bucketPath.itemIndex << 1;
final int updatedOffset = bucketPath.hashMapOffset << 1;
final int updatedGlobalDepth = bucketPath.nodeGlobalDepth + 1;
boolean allLeftHashMapsEqual = nodeSplitResult.allLeftHashMapsEqual;
boolean allRightHashMapsEqual = nodeSplitResult.allRightHashMapsEqual;
if (updatedOffset < MAX_LEVEL_SIZE) {
allLeftHashMapsEqual = false;
final BucketPath updatedBucketPath = new BucketPath(bucketPath.parent, updatedOffset, updatedItemIndex,
bucketPath.nodeIndex, nodeLocalDepth, updatedGlobalDepth);
updateNodeAfterBucketSplit(updatedBucketPath, bucketDepth, newBucketPointer, updatedBucketPointer);
} else {
allRightHashMapsEqual = false;
final BucketPath newBucketPath = new BucketPath(bucketPath.parent, updatedOffset - MAX_LEVEL_SIZE, updatedItemIndex,
newNodeIndex, nodeLocalDepth, updatedGlobalDepth);
updateNodeAfterBucketSplit(newBucketPath, bucketDepth, newBucketPointer, updatedBucketPointer);
}
final long[] updatedNode = hashTree[bucketPath.nodeIndex];
updateNodesAfterSplit(bucketPath, updatedNode, newNode, nodeLocalDepth, hashMapSize, allLeftHashMapsEqual,
allRightHashMapsEqual, newNodeIndex);
if (allLeftHashMapsEqual)
deleteNode(bucketPath.nodeIndex);
} else {
addNewLevelNode(bucketPath, node, newBucketPointer, updatedBucketPointer);
}
}
} finally {
pagePointer.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
doPut(key, value);
}
private void updateNodesAfterSplit(BucketPath bucketPath, long[] node, long[] newNode, int nodeLocalDepth, int hashMapSize,
boolean allLeftHashMapEquals, boolean allRightHashMapsEquals, int newNodeIndex) {
final int startIndex = findParentNodeStartIndex(bucketPath);
final long[] parentNode = hashTree[bucketPath.parent.nodeIndex];
assert assertParentNodeStartIndex(bucketPath, parentNode, startIndex);
final int pointersSize = 1 << (MAX_LEVEL_DEPTH - nodeLocalDepth);
if (allLeftHashMapEquals) {
for (int i = 0; i < pointersSize; i++) {
final long position = node[i * hashMapSize];
parentNode[startIndex + i] = position;
}
} else {
for (int i = 0; i < pointersSize; i++)
parentNode[startIndex + i] = (bucketPath.nodeIndex << 8) | (i * hashMapSize) | Long.MIN_VALUE;
}
if (allRightHashMapsEquals) {
for (int i = 0; i < pointersSize; i++) {
final long position = newNode[i * hashMapSize];
parentNode[startIndex + pointersSize + i] = position;
}
} else {
for (int i = 0; i < pointersSize; i++)
parentNode[startIndex + pointersSize + i] = (newNodeIndex << 8) | (i * hashMapSize) | Long.MIN_VALUE;
}
updateMaxChildDepth(bucketPath.parent, bucketPath.nodeLocalDepth + 1);
}
private void updateMaxChildDepth(BucketPath parentPath, int childDepth) {
if (parentPath == null)
return;
final OHashTreeNodeMetadata metadata = nodesMetadata[parentPath.nodeIndex];
if (parentPath.itemIndex < MAX_LEVEL_SIZE / 2) {
final int maxChildDepth = metadata.getMaxLeftChildDepth();
if (childDepth > maxChildDepth)
metadata.setMaxLeftChildDepth(childDepth);
} else {
final int maxChildDepth = metadata.getMaxRightChildDepth();
if (childDepth + 1 > maxChildDepth)
metadata.setMaxRightChildDepth(childDepth);
}
}
private boolean assertParentNodeStartIndex(BucketPath bucketPath, long[] parentNode, int calculatedIndex) {
int startIndex = -1;
for (int i = 0; i < parentNode.length; i++)
if (parentNode[i] < 0 && (parentNode[i] & Long.MAX_VALUE) >>> 8 == bucketPath.nodeIndex) {
startIndex = i;
break;
}
return startIndex == calculatedIndex;
}
private int findParentNodeStartIndex(BucketPath bucketPath) {
final BucketPath parentBucketPath = bucketPath.parent;
final int pointersSize = 1 << (MAX_LEVEL_DEPTH - bucketPath.nodeLocalDepth);
if (parentBucketPath.itemIndex < MAX_LEVEL_SIZE / 2)
return (parentBucketPath.itemIndex / pointersSize) * pointersSize;
return ((parentBucketPath.itemIndex - MAX_LEVEL_SIZE / 2) / pointersSize) * pointersSize + MAX_LEVEL_SIZE / 2;
}
private void addNewLevelNode(BucketPath bucketPath, long[] node, long newBucketPointer, long updatedBucketPointer) {
final long[] newNode = new long[MAX_LEVEL_SIZE];
final int newNodeDepth;
final int newNodeStartIndex;
final int mapInterval;
if (bucketPath.itemIndex < node.length / 2) {
final int maxDepth = nodesMetadata[bucketPath.nodeIndex].getMaxLeftChildDepth();
assert getMaxLevelDepth(node, 0, node.length / 2) == maxDepth;
if (maxDepth > 0)
newNodeDepth = maxDepth;
else
newNodeDepth = 1;
mapInterval = 1 << (MAX_LEVEL_DEPTH - newNodeDepth);
newNodeStartIndex = (bucketPath.itemIndex / mapInterval) * mapInterval;
} else {
final int maxDepth = nodesMetadata[bucketPath.nodeIndex].getMaxRightChildDepth();
assert getMaxLevelDepth(node, node.length / 2, node.length) == maxDepth;
if (maxDepth > 0)
newNodeDepth = maxDepth;
else
newNodeDepth = 1;
mapInterval = 1 << (MAX_LEVEL_DEPTH - newNodeDepth);
newNodeStartIndex = ((bucketPath.itemIndex - node.length / 2) / mapInterval) * mapInterval + node.length / 2;
}
final int newNodeIndex = addNewNode(newNode, newNodeDepth);
final int mapSize = 1 << newNodeDepth;
for (int i = 0; i < mapInterval; i++) {
final int nodeOffset = i + newNodeStartIndex;
final long bucketPointer = node[nodeOffset];
if (nodeOffset != bucketPath.itemIndex) {
for (int n = i << newNodeDepth; n < (i + 1) << newNodeDepth; n++)
newNode[n] = bucketPointer;
} else {
for (int n = i << newNodeDepth; n < (2 * i + 1) << (newNodeDepth - 1); n++)
newNode[n] = updatedBucketPointer;
for (int n = (2 * i + 1) << (newNodeDepth - 1); n < (i + 1) << newNodeDepth; n++)
newNode[n] = newBucketPointer;
}
node[nodeOffset] = (newNodeIndex << 8) | (i * mapSize) | Long.MIN_VALUE;
}
updateMaxChildDepth(bucketPath, newNodeDepth);
}
private int getMaxLevelDepth(long node[], int start, int end) {
int currentIndex = -1;
int maxDepth = 0;
for (int i = start; i < end; i++) {
final long nodePosition = node[i];
if (nodePosition >= 0)
continue;
final int index = (int) ((nodePosition & Long.MAX_VALUE) >>> 8);
if (index == currentIndex)
continue;
currentIndex = index;
if (maxDepth < nodesMetadata[index].getNodeLocalDepth())
maxDepth = nodesMetadata[index].getNodeLocalDepth();
}
return maxDepth;
}
private void updateNodeAfterBucketSplit(BucketPath bucketPath, int bucketDepth, long newBucketPointer, long updatedBucketPointer) {
int offset = bucketPath.nodeGlobalDepth - (bucketDepth - 1);
BucketPath currentNode = bucketPath;
int nodeLocalDepth = bucketPath.nodeLocalDepth;
while (offset > 0) {
offset -= nodeLocalDepth;
if (offset > 0) {
currentNode = bucketPath.parent;
nodeLocalDepth = currentNode.nodeLocalDepth;
}
}
final int diff = bucketDepth - 1 - (currentNode.nodeGlobalDepth - nodeLocalDepth);
final int interval = (1 << (nodeLocalDepth - diff - 1));
final int firstStartIndex = currentNode.itemIndex & ((LEVEL_MASK << (nodeLocalDepth - diff)) & LEVEL_MASK);
final int firstEndIndex = firstStartIndex + interval;
final int secondStartIndex = firstEndIndex;
final int secondEndIndex = secondStartIndex + interval;
for (int i = firstStartIndex; i < firstEndIndex; i++)
updateBucket(currentNode.nodeIndex, i, currentNode.hashMapOffset, updatedBucketPointer);
for (int i = secondStartIndex; i < secondEndIndex; i++)
updateBucket(currentNode.nodeIndex, i, currentNode.hashMapOffset, newBucketPointer);
}
private int addNewNode(long[] newNode, int nodeLocalDepth) {
if (hashTreeTombstone >= 0) {
long[] tombstone = hashTree[hashTreeTombstone];
hashTree[hashTreeTombstone] = newNode;
nodesMetadata[hashTreeTombstone] = new OHashTreeNodeMetadata((byte) 0, (byte) 0, (byte) nodeLocalDepth);
final int nodeIndex = hashTreeTombstone;
if (tombstone != null)
hashTreeTombstone = (int) tombstone[0];
else
hashTreeTombstone = -1;
return nodeIndex;
}
if (hashTreeSize >= hashTree.length) {
long[][] newHashTree = new long[hashTree.length << 1][];
System.arraycopy(hashTree, 0, newHashTree, 0, hashTree.length);
hashTree = newHashTree;
newHashTree = null;
OHashTreeNodeMetadata[] newNodeMetadata = new OHashTreeNodeMetadata[nodesMetadata.length << 1];
System.arraycopy(nodesMetadata, 0, newNodeMetadata, 0, nodesMetadata.length);
nodesMetadata = newNodeMetadata;
newNodeMetadata = null;
}
hashTree[hashTreeSize] = newNode;
nodesMetadata[hashTreeSize] = new OHashTreeNodeMetadata((byte) 0, (byte) 0, (byte) nodeLocalDepth);
hashTreeSize++;
return hashTreeSize - 1;
}
private boolean checkAllMapsContainSameBucket(long[] newNode, int hashMapSize) {
int n = 0;
boolean allHashMapsEquals = true;
while (n < newNode.length) {
boolean allHashBucketEquals = true;
for (int i = 0; i < hashMapSize - 1; i++) {
if (newNode[i + n] != newNode[i + n + 1]) {
allHashBucketEquals = false;
break;
}
}
n += hashMapSize;
if (!allHashBucketEquals) {
allHashMapsEquals = false;
break;
}
}
assert assertAllNodesAreFilePointers(allHashMapsEquals, newNode, hashMapSize);
return allHashMapsEquals;
}
private boolean assertAllNodesAreFilePointers(boolean allHashMapsEquals, long[] newNode, int hashMapSize) {
if (allHashMapsEquals) {
int n = 0;
while (n < newNode.length) {
for (int i = 0; i < hashMapSize; i++) {
if (newNode[i] < 0) {
return false;
}
}
n += hashMapSize;
}
}
return true;
}
private NodeSplitResult splitNode(BucketPath bucketPath, long[] node) {
final long[] newNode = new long[MAX_LEVEL_SIZE];
final int hashMapSize = 1 << (bucketPath.nodeLocalDepth + 1);
boolean hashMapItemsAreEqual = true;
boolean allLeftItemsAreEqual;
boolean allRightItemsAreEqual;
int mapCounter = 0;
long firstPosition = -1;
for (int i = MAX_LEVEL_SIZE / 2; i < MAX_LEVEL_SIZE; i++) {
final long position = node[i];
if (hashMapItemsAreEqual && mapCounter == 0)
firstPosition = position;
newNode[2 * (i - MAX_LEVEL_SIZE / 2)] = position;
newNode[2 * (i - MAX_LEVEL_SIZE / 2) + 1] = position;
if (hashMapItemsAreEqual) {
hashMapItemsAreEqual = firstPosition == position;
mapCounter += 2;
if (mapCounter >= hashMapSize)
mapCounter = 0;
}
}
mapCounter = 0;
allRightItemsAreEqual = hashMapItemsAreEqual;
hashMapItemsAreEqual = true;
final long[] updatedNode = new long[node.length];
for (int i = 0; i < MAX_LEVEL_SIZE / 2; i++) {
final long position = node[i];
if (hashMapItemsAreEqual && mapCounter == 0)
firstPosition = position;
updatedNode[2 * i] = position;
updatedNode[2 * i + 1] = position;
if (hashMapItemsAreEqual) {
hashMapItemsAreEqual = firstPosition == position;
mapCounter += 2;
if (mapCounter >= hashMapSize)
mapCounter = 0;
}
}
allLeftItemsAreEqual = hashMapItemsAreEqual;
nodesMetadata[bucketPath.nodeIndex].incrementLocalNodeDepth();
hashTree[bucketPath.nodeIndex] = updatedNode;
return new NodeSplitResult(newNode, allLeftItemsAreEqual, allRightItemsAreEqual);
}
private void deleteNode(int nodeIndex) {
if (nodeIndex == hashTreeSize - 1) {
hashTree[nodeIndex] = null;
nodesMetadata[nodeIndex] = null;
hashTreeSize--;
return;
}
if (hashTreeTombstone > -1) {
final long[] tombstone = new long[] { hashTreeTombstone };
hashTree[nodeIndex] = tombstone;
hashTreeTombstone = nodeIndex;
} else {
hashTree[nodeIndex] = null;
hashTreeTombstone = nodeIndex;
}
nodesMetadata[nodeIndex] = null;
}
private void splitBucketContent(OHashIndexBucket<K, V> bucket, OHashIndexBucket<K, V> updatedBucket,
OHashIndexBucket<K, V> newBucket, int newBucketDepth) {
assert checkBucketDepth(bucket);
for (OHashIndexBucket.Entry<K, V> entry : bucket) {
if (((keyHashFunction.hashCode(entry.key) >>> (HASH_CODE_SIZE - newBucketDepth)) & 1) == 0)
updatedBucket.appendEntry(entry.hashCode, entry.key, entry.value);
else
newBucket.appendEntry(entry.hashCode, entry.key, entry.value);
}
updatedBucket.setDepth(newBucketDepth);
newBucket.setDepth(newBucketDepth);
assert checkBucketDepth(updatedBucket);
assert checkBucketDepth(newBucket);
}
private BucketSplitResult splitBucket(OHashIndexBucket<K, V> bucket, int fileLevel, long pageIndex) throws IOException {
int bucketDepth = bucket.getDepth();
int newBucketDepth = bucketDepth + 1;
int newFileLevel = newBucketDepth - MAX_LEVEL_DEPTH;
OHashIndexFileLevelMetadata newFileMetadata = filesMetadata[newFileLevel];
if (newFileMetadata == null) {
newFileMetadata = createFileMetadata(newFileLevel);
filesMetadata[newFileLevel] = newFileMetadata;
}
final long tombstoneIndex = newFileMetadata.getTombstoneIndex();
final long updatedBucketIndex;
if (tombstoneIndex >= 0) {
final OCacheEntry tombstoneCacheEntry = loadPageEntry(tombstoneIndex, newFileLevel);
final OCachePointer tombstonePagePointer = tombstoneCacheEntry.getCachePointer();
try {
final OHashIndexBucket<K, V> tombstone = new OHashIndexBucket<K, V>(tombstonePagePointer.getDataPointer(), keySerializer,
valueSerializer, keyTypes);
newFileMetadata.setTombstoneIndex(tombstone.getNextRemovedBucketPair());
updatedBucketIndex = tombstoneIndex;
} finally {
diskCache.release(tombstoneCacheEntry);
}
} else
updatedBucketIndex = diskCache.getFilledUpTo(fileLevelIds[newFileLevel]);
final long newBucketIndex = updatedBucketIndex + 1;
final OCacheEntry updateBucketCacheEntry = loadPageEntry(updatedBucketIndex, newFileLevel);
final OCachePointer updatedBucketDataPointer = updateBucketCacheEntry.getCachePointer();
updatedBucketDataPointer.acquireExclusiveLock();
try {
final OCacheEntry newBucketCacheEntry = loadPageEntry(newBucketIndex, newFileLevel);
final OCachePointer newBucketDataPointer = newBucketCacheEntry.getCachePointer();
newBucketDataPointer.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> updatedBucket = new OHashIndexBucket<K, V>(newBucketDepth,
updatedBucketDataPointer.getDataPointer(), keySerializer, valueSerializer, keyTypes);
final OHashIndexBucket<K, V> newBucket = new OHashIndexBucket<K, V>(newBucketDepth, newBucketDataPointer.getDataPointer(),
keySerializer, valueSerializer, keyTypes);
splitBucketContent(bucket, updatedBucket, newBucket, newBucketDepth);
assert bucket.getDepth() == bucketDepth;
final OHashIndexFileLevelMetadata bufferMetadata = filesMetadata[fileLevel];
bufferMetadata.setBucketsCount(bufferMetadata.getBucketsCount() - 1);
assert bufferMetadata.getBucketsCount() >= 0;
updatedBucket.setSplitHistory(fileLevel, pageIndex);
newBucket.setSplitHistory(fileLevel, pageIndex);
newFileMetadata.setBucketsCount(newFileMetadata.getBucketsCount() + 2);
final long updatedBucketPointer = createBucketPointer(updatedBucketIndex, newFileLevel);
final long newBucketPointer = createBucketPointer(newBucketIndex, newFileLevel);
return new BucketSplitResult(updatedBucketPointer, newBucketPointer, newBucketDepth);
} finally {
newBucketDataPointer.releaseExclusiveLock();
newBucketCacheEntry.markDirty();
diskCache.release(newBucketCacheEntry);
}
} finally {
updatedBucketDataPointer.releaseExclusiveLock();
updateBucketCacheEntry.markDirty();
diskCache.release(updateBucketCacheEntry);
}
}
private boolean checkBucketDepth(OHashIndexBucket<K, V> bucket) {
int bucketDepth = bucket.getDepth();
if (bucket.size() == 0)
return true;
final Iterator<OHashIndexBucket.Entry<K, V>> positionIterator = bucket.iterator();
long firstValue = keyHashFunction.hashCode(positionIterator.next().key) >>> (HASH_CODE_SIZE - bucketDepth);
while (positionIterator.hasNext()) {
final long value = keyHashFunction.hashCode(positionIterator.next().key) >>> (HASH_CODE_SIZE - bucketDepth);
if (value != firstValue)
return false;
}
return true;
}
private void updateBucket(int nodeIndex, int itemIndex, int offset, long newBucketPointer) {
final long node[] = hashTree[nodeIndex];
final long position = node[itemIndex + offset];
if (position >= 0)
node[itemIndex + offset] = newBucketPointer;
else {
final int childNodeIndex = (int) ((position & Long.MAX_VALUE) >>> 8);
final int childOffset = (int) (position & 0xFF);
final int childNodeDepth = nodesMetadata[childNodeIndex].getNodeLocalDepth();
final int interval = 1 << childNodeDepth;
for (int i = 0; i < interval; i++) {
updateBucket(childNodeIndex, i, childOffset, newBucketPointer);
}
}
}
private void initHashTreeState() throws IOException {
for (long pageIndex = 0; pageIndex < MAX_LEVEL_SIZE; pageIndex++) {
final OCacheEntry cacheEntry = loadPageEntry(pageIndex, 0);
final OCachePointer pagePointer = cacheEntry.getCachePointer();
pagePointer.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> emptyBucket = new OHashIndexBucket<K, V>(MAX_LEVEL_DEPTH, pagePointer.getDataPointer(),
keySerializer, valueSerializer, keyTypes);
} finally {
pagePointer.releaseExclusiveLock();
cacheEntry.markDirty();
diskCache.release(cacheEntry);
}
}
final long[] rootTree = new long[MAX_LEVEL_SIZE];
for (int i = 0; i < MAX_LEVEL_SIZE; i++)
rootTree[i] = createBucketPointer(i, 0);
hashTree = new long[1][];
hashTree[0] = rootTree;
nodesMetadata = new OHashTreeNodeMetadata[1];
nodesMetadata[0] = new OHashTreeNodeMetadata((byte) 0, (byte) 0, (byte) MAX_LEVEL_DEPTH);
filesMetadata[0].setBucketsCount(MAX_LEVEL_SIZE);
size = 0;
hashTreeSize = 1;
}
private long createBucketPointer(long pageIndex, int fileLevel) {
return ((pageIndex + 1) << 8) | fileLevel;
}
private long getPageIndex(long bucketPointer) {
return (bucketPointer >>> 8) - 1;
}
private int getFileLevel(long bucketPointer) {
return (int) (bucketPointer & 0xFF);
}
private OCacheEntry loadPageEntry(long pageIndex, int fileLevel) throws IOException {
return diskCache.load(fileLevelIds[fileLevel], pageIndex, false);
}
private BucketPath getBucket(final long hashCode) {
int localNodeDepth = nodesMetadata[0].getNodeLocalDepth();
int nodeDepth = localNodeDepth;
BucketPath parentNode = null;
int nodeIndex = 0;
int offset = 0;
int index = (int) ((hashCode >>> (HASH_CODE_SIZE - nodeDepth)) & (LEVEL_MASK >>> (MAX_LEVEL_DEPTH - localNodeDepth)));
BucketPath currentNode = new BucketPath(parentNode, 0, index, 0, localNodeDepth, nodeDepth);
do {
final long position = hashTree[nodeIndex][index + offset];
if (position >= 0)
return currentNode;
nodeIndex = (int) ((position & Long.MAX_VALUE) >>> 8);
offset = (int) (position & 0xFF);
localNodeDepth = nodesMetadata[nodeIndex].getNodeLocalDepth();
nodeDepth += localNodeDepth;
index = (int) ((hashCode >>> (HASH_CODE_SIZE - nodeDepth)) & (LEVEL_MASK >>> (MAX_LEVEL_DEPTH - localNodeDepth)));
parentNode = currentNode;
currentNode = new BucketPath(parentNode, offset, index, nodeIndex, localNodeDepth, nodeDepth);
} while (nodeDepth <= HASH_CODE_SIZE);
throw new IllegalStateException("Extendible hashing tree in corrupted state.");
}
private static final class BucketPath {
private final BucketPath parent;
private final int hashMapOffset;
private final int itemIndex;
private final int nodeIndex;
private final int nodeGlobalDepth;
private final int nodeLocalDepth;
private BucketPath(BucketPath parent, int hashMapOffset, int itemIndex, int nodeIndex, int nodeLocalDepth, int nodeGlobalDepth) {
this.parent = parent;
this.hashMapOffset = hashMapOffset;
this.itemIndex = itemIndex;
this.nodeIndex = nodeIndex;
this.nodeGlobalDepth = nodeGlobalDepth;
this.nodeLocalDepth = nodeLocalDepth;
}
}
private static final class BucketSplitResult {
private final long updatedBucketPointer;
private final long newBucketPointer;
private final int newDepth;
private BucketSplitResult(long updatedBucketPointer, long newBucketPointer, int newDepth) {
this.updatedBucketPointer = updatedBucketPointer;
this.newBucketPointer = newBucketPointer;
this.newDepth = newDepth;
}
}
private static final class NodeSplitResult {
private final long[] newNode;
private final boolean allLeftHashMapsEqual;
private final boolean allRightHashMapsEqual;
private NodeSplitResult(long[] newNode, boolean allLeftHashMapsEqual, boolean allRightHashMapsEqual) {
this.newNode = newNode;
this.allLeftHashMapsEqual = allLeftHashMapsEqual;
this.allRightHashMapsEqual = allRightHashMapsEqual;
}
}
private static final class KeyHashCodeComparator<K> implements Comparator<K> {
private final Comparator<? super K> comparator = ODefaultComparator.INSTANCE;
private final OHashFunction<K> keyHashFunction;
public KeyHashCodeComparator(OHashFunction<K> keyHashFunction) {
this.keyHashFunction = keyHashFunction;
}
@Override
public int compare(K keyOne, K keyTwo) {
final long hashCodeOne = keyHashFunction.hashCode(keyOne);
final long hashCodeTwo = keyHashFunction.hashCode(keyTwo);
if (hashCodeOne > hashCodeTwo)
return 1;
if (hashCodeOne < hashCodeTwo)
return -1;
return comparator.compare(keyOne, keyTwo);
}
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_index_hashindex_local_OLocalHashTable.java |
580 | class ShardOptimizeRequest extends BroadcastShardOperationRequest {
private boolean waitForMerge = OptimizeRequest.Defaults.WAIT_FOR_MERGE;
private int maxNumSegments = OptimizeRequest.Defaults.MAX_NUM_SEGMENTS;
private boolean onlyExpungeDeletes = OptimizeRequest.Defaults.ONLY_EXPUNGE_DELETES;
private boolean flush = OptimizeRequest.Defaults.FLUSH;
ShardOptimizeRequest() {
}
public ShardOptimizeRequest(String index, int shardId, OptimizeRequest request) {
super(index, shardId, request);
waitForMerge = request.waitForMerge();
maxNumSegments = request.maxNumSegments();
onlyExpungeDeletes = request.onlyExpungeDeletes();
flush = request.flush();
}
boolean waitForMerge() {
return waitForMerge;
}
int maxNumSegments() {
return maxNumSegments;
}
public boolean onlyExpungeDeletes() {
return onlyExpungeDeletes;
}
public boolean flush() {
return flush;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
waitForMerge = in.readBoolean();
maxNumSegments = in.readInt();
onlyExpungeDeletes = in.readBoolean();
flush = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(waitForMerge);
out.writeInt(maxNumSegments);
out.writeBoolean(onlyExpungeDeletes);
out.writeBoolean(flush);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_optimize_ShardOptimizeRequest.java |
815 | cls = getDatabase().getStorage().callInLock(new Callable<OClass>() {
@Override
public OClass call() throws Exception {
OClass cls = classes.get(iClassName.toLowerCase());
if (cls == null) {
// CHECK IF CAN AUTO-CREATE IT
final ODatabase ownerDb = getDatabase().getDatabaseOwner();
if (ownerDb instanceof ODatabaseObject) {
final Class<?> javaClass = ((ODatabaseObject) ownerDb).getEntityManager().getEntityClass(iClassName);
if (javaClass != null) {
// AUTO REGISTER THE CLASS AT FIRST USE
cls = cascadeCreate(javaClass);
}
}
}
return cls;
}
}, true); | 0true
| core_src_main_java_com_orientechnologies_orient_core_metadata_schema_OSchemaShared.java |
1,009 | transportService.sendRequest(node, transportShardAction, new ShardSingleOperationRequest(request, shard.id()), new BaseTransportResponseHandler<Response>() {
@Override
public Response newInstance() {
return newResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(final Response response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
onFailure(shard, exp);
}
}); | 0true
| src_main_java_org_elasticsearch_action_support_single_custom_TransportSingleCustomOperationAction.java |
3,400 | public class JustSourceFieldsVisitor extends FieldsVisitor {
@Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
return Status.YES;
}
return source != null ? Status.STOP : Status.NO;
}
} | 0true
| src_main_java_org_elasticsearch_index_fieldvisitor_JustSourceFieldsVisitor.java |
1,646 | ois = new java.io.ObjectInputStream(bais) {
@Override
public Class<?> resolveClass(java.io.ObjectStreamClass streamClass)
throws java.io.IOException, ClassNotFoundException {
Class<?> c = Class.forName(streamClass.getName(), false, loader);
if (c == null) {
return super.resolveClass(streamClass);
} else {
return c; // Class loader knows of this class.
} // end else: not null
} // end resolveClass
}; // end ois | 0true
| src_main_java_org_elasticsearch_common_Base64.java |
294 | new Thread() {
public void run() {
if (!l.tryLock()) {
latch.countDown();
}
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java |
1,840 | class InjectorImpl implements Injector, Lookups {
final State state;
final InjectorImpl parent;
boolean readOnly;
BindingsMultimap bindingsMultimap = new BindingsMultimap();
final Initializer initializer;
/**
* Just-in-time binding cache. Guarded by state.lock()
*/
Map<Key<?>, BindingImpl<?>> jitBindings = Maps.newHashMap();
Lookups lookups = new DeferredLookups(this);
InjectorImpl(@Nullable InjectorImpl parent, State state, Initializer initializer) {
this.parent = parent;
this.state = state;
this.initializer = initializer;
if (parent != null) {
localContext = parent.localContext;
} else {
localContext = new ThreadLocal<Object[]>() {
protected Object[] initialValue() {
return new Object[1];
}
};
}
}
/**
* Indexes bindings by type.
*/
void index() {
for (Binding<?> binding : state.getExplicitBindingsThisLevel().values()) {
index(binding);
}
}
<T> void index(Binding<T> binding) {
bindingsMultimap.put(binding.getKey().getTypeLiteral(), binding);
}
public <T> List<Binding<T>> findBindingsByType(TypeLiteral<T> type) {
return bindingsMultimap.getAll(type);
}
/**
* Returns the binding for {@code key}
*/
public <T> BindingImpl<T> getBinding(Key<T> key) {
Errors errors = new Errors(key);
try {
BindingImpl<T> result = getBindingOrThrow(key, errors);
errors.throwConfigurationExceptionIfErrorsExist();
return result;
} catch (ErrorsException e) {
throw new ConfigurationException(errors.merge(e.getErrors()).getMessages());
}
}
/**
* Gets a binding implementation. First, it check to see if the parent has a binding. If the
* parent has a binding and the binding is scoped, it will use that binding. Otherwise, this
* checks for an explicit binding. If no explicit binding is found, it looks for a just-in-time
* binding.
*/
public <T> BindingImpl<T> getBindingOrThrow(Key<T> key, Errors errors)
throws ErrorsException {
// Check explicit bindings, i.e. bindings created by modules.
BindingImpl<T> binding = state.getExplicitBinding(key);
if (binding != null) {
return binding;
}
// Look for an on-demand binding.
return getJustInTimeBinding(key, errors);
}
public <T> Binding<T> getBinding(Class<T> type) {
return getBinding(Key.get(type));
}
public Injector getParent() {
return parent;
}
public Injector createChildInjector(Iterable<? extends Module> modules) {
return new InjectorBuilder()
.parentInjector(this)
.addModules(modules)
.build();
}
public Injector createChildInjector(Module... modules) {
return createChildInjector(ImmutableList.copyOf(modules));
}
/**
* Returns a just-in-time binding for {@code key}, creating it if necessary.
*
* @throws ErrorsException if the binding could not be created.
*/
private <T> BindingImpl<T> getJustInTimeBinding(Key<T> key, Errors errors)
throws ErrorsException {
synchronized (state.lock()) {
// first try to find a JIT binding that we've already created
for (InjectorImpl injector = this; injector != null; injector = injector.parent) {
@SuppressWarnings("unchecked") // we only store bindings that match their key
BindingImpl<T> binding = (BindingImpl<T>) injector.jitBindings.get(key);
if (binding != null) {
return binding;
}
}
return createJustInTimeBindingRecursive(key, errors);
}
}
/**
* Returns true if the key type is Provider (but not a subclass of Provider).
*/
static boolean isProvider(Key<?> key) {
return key.getTypeLiteral().getRawType().equals(Provider.class);
}
/**
* Returns true if the key type is MembersInjector (but not a subclass of MembersInjector).
*/
static boolean isMembersInjector(Key<?> key) {
return key.getTypeLiteral().getRawType().equals(MembersInjector.class)
&& !key.hasAnnotationType();
}
private <T> BindingImpl<MembersInjector<T>> createMembersInjectorBinding(
Key<MembersInjector<T>> key, Errors errors) throws ErrorsException {
Type membersInjectorType = key.getTypeLiteral().getType();
if (!(membersInjectorType instanceof ParameterizedType)) {
throw errors.cannotInjectRawMembersInjector().toException();
}
@SuppressWarnings("unchecked") // safe because T came from Key<MembersInjector<T>>
TypeLiteral<T> instanceType = (TypeLiteral<T>) TypeLiteral.get(
((ParameterizedType) membersInjectorType).getActualTypeArguments()[0]);
MembersInjector<T> membersInjector = membersInjectorStore.get(instanceType, errors);
InternalFactory<MembersInjector<T>> factory = new ConstantFactory<MembersInjector<T>>(
Initializables.of(membersInjector));
return new InstanceBindingImpl<MembersInjector<T>>(this, key, SourceProvider.UNKNOWN_SOURCE,
factory, ImmutableSet.<InjectionPoint>of(), membersInjector);
}
/**
* Creates a synthetic binding to {@code Provider<T>}, i.e. a binding to the provider from
* {@code Binding<T>}.
*/
private <T> BindingImpl<Provider<T>> createProviderBinding(Key<Provider<T>> key, Errors errors)
throws ErrorsException {
Type providerType = key.getTypeLiteral().getType();
// If the Provider has no type parameter (raw Provider)...
if (!(providerType instanceof ParameterizedType)) {
throw errors.cannotInjectRawProvider().toException();
}
Type entryType = ((ParameterizedType) providerType).getActualTypeArguments()[0];
@SuppressWarnings("unchecked") // safe because T came from Key<Provider<T>>
Key<T> providedKey = (Key<T>) key.ofType(entryType);
BindingImpl<T> delegate = getBindingOrThrow(providedKey, errors);
return new ProviderBindingImpl<T>(this, key, delegate);
}
static class ProviderBindingImpl<T> extends BindingImpl<Provider<T>>
implements ProviderBinding<Provider<T>> {
final BindingImpl<T> providedBinding;
ProviderBindingImpl(InjectorImpl injector, Key<Provider<T>> key, Binding<T> providedBinding) {
super(injector, key, providedBinding.getSource(), createInternalFactory(providedBinding),
Scoping.UNSCOPED);
this.providedBinding = (BindingImpl<T>) providedBinding;
}
static <T> InternalFactory<Provider<T>> createInternalFactory(Binding<T> providedBinding) {
final Provider<T> provider = providedBinding.getProvider();
return new InternalFactory<Provider<T>>() {
public Provider<T> get(Errors errors, InternalContext context, Dependency dependency) {
return provider;
}
};
}
public Key<? extends T> getProvidedKey() {
return providedBinding.getKey();
}
public <V> V acceptTargetVisitor(BindingTargetVisitor<? super Provider<T>, V> visitor) {
return visitor.visit(this);
}
public void applyTo(Binder binder) {
throw new UnsupportedOperationException("This element represents a synthetic binding.");
}
@Override
public String toString() {
return new ToStringBuilder(ProviderKeyBinding.class)
.add("key", getKey())
.add("providedKey", getProvidedKey())
.toString();
}
}
/**
* Converts a constant string binding to the required type.
*
* @return the binding if it could be resolved, or null if the binding doesn't exist
* @throws org.elasticsearch.common.inject.internal.ErrorsException
* if there was an error resolving the binding
*/
private <T> BindingImpl<T> convertConstantStringBinding(Key<T> key, Errors errors)
throws ErrorsException {
// Find a constant string binding.
Key<String> stringKey = key.ofType(String.class);
BindingImpl<String> stringBinding = state.getExplicitBinding(stringKey);
if (stringBinding == null || !stringBinding.isConstant()) {
return null;
}
String stringValue = stringBinding.getProvider().get();
Object source = stringBinding.getSource();
// Find a matching type converter.
TypeLiteral<T> type = key.getTypeLiteral();
MatcherAndConverter matchingConverter = state.getConverter(stringValue, type, errors, source);
if (matchingConverter == null) {
// No converter can handle the given type.
return null;
}
// Try to convert the string. A failed conversion results in an error.
try {
@SuppressWarnings("unchecked") // This cast is safe because we double check below.
T converted = (T) matchingConverter.getTypeConverter().convert(stringValue, type);
if (converted == null) {
throw errors.converterReturnedNull(stringValue, source, type, matchingConverter)
.toException();
}
if (!type.getRawType().isInstance(converted)) {
throw errors.conversionTypeError(stringValue, source, type, matchingConverter, converted)
.toException();
}
return new ConvertedConstantBindingImpl<T>(this, key, converted, stringBinding);
} catch (ErrorsException e) {
throw e;
} catch (RuntimeException e) {
throw errors.conversionError(stringValue, source, type, matchingConverter, e)
.toException();
}
}
private static class ConvertedConstantBindingImpl<T>
extends BindingImpl<T> implements ConvertedConstantBinding<T> {
final T value;
final Provider<T> provider;
final Binding<String> originalBinding;
ConvertedConstantBindingImpl(
Injector injector, Key<T> key, T value, Binding<String> originalBinding) {
super(injector, key, originalBinding.getSource(),
new ConstantFactory<T>(Initializables.of(value)), Scoping.UNSCOPED);
this.value = value;
provider = Providers.of(value);
this.originalBinding = originalBinding;
}
@Override
public Provider<T> getProvider() {
return provider;
}
public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
return visitor.visit(this);
}
public T getValue() {
return value;
}
public Key<String> getSourceKey() {
return originalBinding.getKey();
}
public Set<Dependency<?>> getDependencies() {
return ImmutableSet.<Dependency<?>>of(Dependency.get(getSourceKey()));
}
public void applyTo(Binder binder) {
throw new UnsupportedOperationException("This element represents a synthetic binding.");
}
@Override
public String toString() {
return new ToStringBuilder(ConvertedConstantBinding.class)
.add("key", getKey())
.add("sourceKey", getSourceKey())
.add("value", value)
.toString();
}
}
<T> void initializeBinding(BindingImpl<T> binding, Errors errors) throws ErrorsException {
// Put the partially constructed binding in the map a little early. This enables us to handle
// circular dependencies. Example: FooImpl -> BarImpl -> FooImpl.
// Note: We don't need to synchronize on state.lock() during injector creation.
// TODO: for the above example, remove the binding for BarImpl if the binding for FooImpl fails
if (binding instanceof ConstructorBindingImpl<?>) {
Key<T> key = binding.getKey();
jitBindings.put(key, binding);
boolean successful = false;
try {
((ConstructorBindingImpl) binding).initialize(this, errors);
successful = true;
} finally {
if (!successful) {
jitBindings.remove(key);
}
}
}
}
/**
* Creates a binding for an injectable type with the given scope. Looks for a scope on the type if
* none is specified.
*/
<T> BindingImpl<T> createUnitializedBinding(Key<T> key, Scoping scoping, Object source,
Errors errors) throws ErrorsException {
Class<?> rawType = key.getTypeLiteral().getRawType();
// Don't try to inject arrays, or enums.
if (rawType.isArray() || rawType.isEnum()) {
throw errors.missingImplementation(key).toException();
}
// Handle TypeLiteral<T> by binding the inner type
if (rawType == TypeLiteral.class) {
@SuppressWarnings("unchecked") // we have to fudge the inner type as Object
BindingImpl<T> binding = (BindingImpl<T>) createTypeLiteralBinding(
(Key<TypeLiteral<Object>>) key, errors);
return binding;
}
// Handle @ImplementedBy
ImplementedBy implementedBy = rawType.getAnnotation(ImplementedBy.class);
if (implementedBy != null) {
Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors);
return createImplementedByBinding(key, scoping, implementedBy, errors);
}
// Handle @ProvidedBy.
ProvidedBy providedBy = rawType.getAnnotation(ProvidedBy.class);
if (providedBy != null) {
Annotations.checkForMisplacedScopeAnnotations(rawType, source, errors);
return createProvidedByBinding(key, scoping, providedBy, errors);
}
// We can't inject abstract classes.
// TODO: Method interceptors could actually enable us to implement
// abstract types. Should we remove this restriction?
if (Modifier.isAbstract(rawType.getModifiers())) {
throw errors.missingImplementation(key).toException();
}
// Error: Inner class.
if (Classes.isInnerClass(rawType)) {
throw errors.cannotInjectInnerClass(rawType).toException();
}
if (!scoping.isExplicitlyScoped()) {
Class<? extends Annotation> scopeAnnotation = findScopeAnnotation(errors, rawType);
if (scopeAnnotation != null) {
scoping = Scopes.makeInjectable(Scoping.forAnnotation(scopeAnnotation),
this, errors.withSource(rawType));
}
}
return ConstructorBindingImpl.create(this, key, source, scoping);
}
/**
* Converts a binding for a {@code Key<TypeLiteral<T>>} to the value {@code TypeLiteral<T>}. It's
* a bit awkward because we have to pull out the inner type in the type literal.
*/
private <T> BindingImpl<TypeLiteral<T>> createTypeLiteralBinding(
Key<TypeLiteral<T>> key, Errors errors) throws ErrorsException {
Type typeLiteralType = key.getTypeLiteral().getType();
if (!(typeLiteralType instanceof ParameterizedType)) {
throw errors.cannotInjectRawTypeLiteral().toException();
}
ParameterizedType parameterizedType = (ParameterizedType) typeLiteralType;
Type innerType = parameterizedType.getActualTypeArguments()[0];
// this is unforunate. We don't support building TypeLiterals for type variable like 'T'. If
// this proves problematic, we can probably fix TypeLiteral to support type variables
if (!(innerType instanceof Class)
&& !(innerType instanceof GenericArrayType)
&& !(innerType instanceof ParameterizedType)) {
throw errors.cannotInjectTypeLiteralOf(innerType).toException();
}
@SuppressWarnings("unchecked") // by definition, innerType == T, so this is safe
TypeLiteral<T> value = (TypeLiteral<T>) TypeLiteral.get(innerType);
InternalFactory<TypeLiteral<T>> factory = new ConstantFactory<TypeLiteral<T>>(
Initializables.of(value));
return new InstanceBindingImpl<TypeLiteral<T>>(this, key, SourceProvider.UNKNOWN_SOURCE,
factory, ImmutableSet.<InjectionPoint>of(), value);
}
/**
* Creates a binding for a type annotated with @ProvidedBy.
*/
<T> BindingImpl<T> createProvidedByBinding(Key<T> key, Scoping scoping,
ProvidedBy providedBy, Errors errors) throws ErrorsException {
final Class<?> rawType = key.getTypeLiteral().getRawType();
final Class<? extends Provider<?>> providerType = providedBy.value();
// Make sure it's not the same type. TODO: Can we check for deeper loops?
if (providerType == rawType) {
throw errors.recursiveProviderType().toException();
}
// Assume the provider provides an appropriate type. We double check at runtime.
@SuppressWarnings("unchecked")
final Key<? extends Provider<T>> providerKey
= (Key<? extends Provider<T>>) Key.get(providerType);
final BindingImpl<? extends Provider<?>> providerBinding
= getBindingOrThrow(providerKey, errors);
InternalFactory<T> internalFactory = new InternalFactory<T>() {
public T get(Errors errors, InternalContext context, Dependency dependency)
throws ErrorsException {
errors = errors.withSource(providerKey);
Provider<?> provider = providerBinding.getInternalFactory().get(
errors, context, dependency);
try {
Object o = provider.get();
if (o != null && !rawType.isInstance(o)) {
throw errors.subtypeNotProvided(providerType, rawType).toException();
}
@SuppressWarnings("unchecked") // protected by isInstance() check above
T t = (T) o;
return t;
} catch (RuntimeException e) {
throw errors.errorInProvider(e).toException();
}
}
};
return new LinkedProviderBindingImpl<T>(
this,
key,
rawType /* source */,
Scopes.<T>scope(key, this, internalFactory, scoping),
scoping,
providerKey);
}
/**
* Creates a binding for a type annotated with @ImplementedBy.
*/
<T> BindingImpl<T> createImplementedByBinding(Key<T> key, Scoping scoping,
ImplementedBy implementedBy, Errors errors)
throws ErrorsException {
Class<?> rawType = key.getTypeLiteral().getRawType();
Class<?> implementationType = implementedBy.value();
// Make sure it's not the same type. TODO: Can we check for deeper cycles?
if (implementationType == rawType) {
throw errors.recursiveImplementationType().toException();
}
// Make sure implementationType extends type.
if (!rawType.isAssignableFrom(implementationType)) {
throw errors.notASubtype(implementationType, rawType).toException();
}
@SuppressWarnings("unchecked") // After the preceding check, this cast is safe.
Class<? extends T> subclass = (Class<? extends T>) implementationType;
// Look up the target binding.
final Key<? extends T> targetKey = Key.get(subclass);
final BindingImpl<? extends T> targetBinding = getBindingOrThrow(targetKey, errors);
InternalFactory<T> internalFactory = new InternalFactory<T>() {
public T get(Errors errors, InternalContext context, Dependency<?> dependency)
throws ErrorsException {
return targetBinding.getInternalFactory().get(
errors.withSource(targetKey), context, dependency);
}
};
return new LinkedBindingImpl<T>(
this,
key,
rawType /* source */,
Scopes.<T>scope(key, this, internalFactory, scoping),
scoping,
targetKey);
}
/**
* Attempts to create a just-in-time binding for {@code key} in the root injector, falling back to
* other ancestor injectors until this injector is tried.
*/
private <T> BindingImpl<T> createJustInTimeBindingRecursive(Key<T> key, Errors errors)
throws ErrorsException {
// ask the parent to create the JIT binding
if (parent != null && !parent.readOnly /* ES: don't check on parent if its read only, its already created all the bindings it can*/) {
try {
return parent.createJustInTimeBindingRecursive(key, new Errors());
} catch (ErrorsException ignored) {
}
}
if (state.isBlacklisted(key)) {
throw errors.childBindingAlreadySet(key).toException();
}
BindingImpl<T> binding = createJustInTimeBinding(key, errors);
state.parent().blacklist(key);
jitBindings.put(key, binding);
return binding;
}
/**
* Returns a new just-in-time binding created by resolving {@code key}. The strategies used to
* create just-in-time bindings are:
* <ol>
* <li>Internalizing Providers. If the requested binding is for {@code Provider<T>}, we delegate
* to the binding for {@code T}.
* <li>Converting constants.
* <li>ImplementedBy and ProvidedBy annotations. Only for unannotated keys.
* <li>The constructor of the raw type. Only for unannotated keys.
* </ol>
*
* @throws org.elasticsearch.common.inject.internal.ErrorsException
* if the binding cannot be created.
*/
<T> BindingImpl<T> createJustInTimeBinding(Key<T> key, Errors errors) throws ErrorsException {
if (state.isBlacklisted(key)) {
throw errors.childBindingAlreadySet(key).toException();
}
// Handle cases where T is a Provider<?>.
if (isProvider(key)) {
// These casts are safe. We know T extends Provider<X> and that given Key<Provider<X>>,
// createProviderBinding() will return BindingImpl<Provider<X>>.
@SuppressWarnings("unchecked")
BindingImpl binding = createProviderBinding((Key) key, errors);
return binding;
}
// Handle cases where T is a MembersInjector<?>
if (isMembersInjector(key)) {
// These casts are safe. T extends MembersInjector<X> and that given Key<MembersInjector<X>>,
// createMembersInjectorBinding() will return BindingImpl<MembersInjector<X>>.
@SuppressWarnings("unchecked")
BindingImpl binding = createMembersInjectorBinding((Key) key, errors);
return binding;
}
// Try to convert a constant string binding to the requested type.
BindingImpl<T> convertedBinding = convertConstantStringBinding(key, errors);
if (convertedBinding != null) {
return convertedBinding;
}
// If the key has an annotation...
if (key.hasAnnotationType()) {
// Look for a binding without annotation attributes or return null.
if (key.hasAttributes()) {
try {
Errors ignored = new Errors();
return getBindingOrThrow(key.withoutAttributes(), ignored);
} catch (ErrorsException ignored) {
// throw with a more appropriate message below
}
}
throw errors.missingImplementation(key).toException();
}
Object source = key.getTypeLiteral().getRawType();
BindingImpl<T> binding = createUnitializedBinding(key, Scoping.UNSCOPED, source, errors);
initializeBinding(binding, errors);
return binding;
}
<T> InternalFactory<? extends T> getInternalFactory(Key<T> key, Errors errors)
throws ErrorsException {
return getBindingOrThrow(key, errors).getInternalFactory();
}
// not test-covered
public Map<Key<?>, Binding<?>> getBindings() {
return state.getExplicitBindingsThisLevel();
}
private static class BindingsMultimap {
final Map<TypeLiteral<?>, List<Binding<?>>> multimap = Maps.newHashMap();
<T> void put(TypeLiteral<T> type, Binding<T> binding) {
List<Binding<?>> bindingsForType = multimap.get(type);
if (bindingsForType == null) {
bindingsForType = Lists.newArrayList();
multimap.put(type, bindingsForType);
}
bindingsForType.add(binding);
}
@SuppressWarnings("unchecked")
// safe because we only put matching entries into the map
<T> List<Binding<T>> getAll(TypeLiteral<T> type) {
List<Binding<?>> bindings = multimap.get(type);
return bindings != null
? Collections.<Binding<T>>unmodifiableList((List) multimap.get(type))
: ImmutableList.<Binding<T>>of();
}
}
/**
* Returns parameter injectors, or {@code null} if there are no parameters.
*/
SingleParameterInjector<?>[] getParametersInjectors(
List<Dependency<?>> parameters, Errors errors) throws ErrorsException {
if (parameters.isEmpty()) {
return null;
}
int numErrorsBefore = errors.size();
SingleParameterInjector<?>[] result = new SingleParameterInjector<?>[parameters.size()];
int i = 0;
for (Dependency<?> parameter : parameters) {
try {
result[i++] = createParameterInjector(parameter, errors.withSource(parameter));
} catch (ErrorsException rethrownBelow) {
// rethrown below
}
}
errors.throwIfNewErrors(numErrorsBefore);
return result;
}
<T> SingleParameterInjector<T> createParameterInjector(final Dependency<T> dependency,
final Errors errors) throws ErrorsException {
InternalFactory<? extends T> factory = getInternalFactory(dependency.getKey(), errors);
return new SingleParameterInjector<T>(dependency, factory);
}
/**
* Invokes a method.
*/
interface MethodInvoker {
Object invoke(Object target, Object... parameters)
throws IllegalAccessException, InvocationTargetException;
}
/**
* Cached constructor injectors for each type
*/
ConstructorInjectorStore constructors = new ConstructorInjectorStore(this);
/**
* Cached field and method injectors for each type.
*/
MembersInjectorStore membersInjectorStore;
@SuppressWarnings("unchecked") // the members injector type is consistent with instance's type
public void injectMembers(Object instance) {
MembersInjector membersInjector = getMembersInjector(instance.getClass());
membersInjector.injectMembers(instance);
}
public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral) {
Errors errors = new Errors(typeLiteral);
try {
return membersInjectorStore.get(typeLiteral, errors);
} catch (ErrorsException e) {
throw new ConfigurationException(errors.merge(e.getErrors()).getMessages());
}
}
public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
return getMembersInjector(TypeLiteral.get(type));
}
public <T> Provider<T> getProvider(Class<T> type) {
return getProvider(Key.get(type));
}
<T> Provider<T> getProviderOrThrow(final Key<T> key, Errors errors) throws ErrorsException {
final InternalFactory<? extends T> factory = getInternalFactory(key, errors);
// ES: optimize for a common case of read only instance getting from the parent...
if (factory instanceof InternalFactory.Instance) {
return new Provider<T>() {
@Override
public T get() {
try {
return (T) ((InternalFactory.Instance) factory).get(null, null, null);
} catch (ErrorsException e) {
// ignore
}
// should never happen...
assert false;
return null;
}
};
}
final Dependency<T> dependency = Dependency.get(key);
return new Provider<T>() {
public T get() {
final Errors errors = new Errors(dependency);
try {
T t = callInContext(new ContextualCallable<T>() {
public T call(InternalContext context) throws ErrorsException {
context.setDependency(dependency);
try {
return factory.get(errors, context, dependency);
} finally {
context.setDependency(null);
}
}
});
errors.throwIfNewErrors(0);
return t;
} catch (ErrorsException e) {
throw new ProvisionException(errors.merge(e.getErrors()).getMessages());
}
}
@Override
public String toString() {
return factory.toString();
}
};
}
public <T> Provider<T> getProvider(final Key<T> key) {
Errors errors = new Errors(key);
try {
Provider<T> result = getProviderOrThrow(key, errors);
errors.throwIfNewErrors(0);
return result;
} catch (ErrorsException e) {
throw new ConfigurationException(errors.merge(e.getErrors()).getMessages());
}
}
public <T> T getInstance(Key<T> key) {
return getProvider(key).get();
}
public <T> T getInstance(Class<T> type) {
return getProvider(type).get();
}
final ThreadLocal<Object[]> localContext;
/**
* Looks up thread local context. Creates (and removes) a new context if necessary.
*/
<T> T callInContext(ContextualCallable<T> callable) throws ErrorsException {
Object[] reference = localContext.get();
if (reference[0] == null) {
reference[0] = new InternalContext();
try {
return callable.call((InternalContext) reference[0]);
} finally {
// Only clear the context if this call created it.
reference[0] = null;
}
} else {
// Someone else will clean up this context.
return callable.call((InternalContext) reference[0]);
}
}
public String toString() {
return new ToStringBuilder(Injector.class)
.add("bindings", state.getExplicitBindingsThisLevel().values())
.toString();
}
// ES_GUICE: clear caches
public void clearCache() {
state.clearBlacklisted();
constructors = new ConstructorInjectorStore(this);
membersInjectorStore = new MembersInjectorStore(this, state.getTypeListenerBindings());
jitBindings = Maps.newHashMap();
}
// ES_GUICE: make all registered bindings act as eager singletons
public void readOnlyAllSingletons() {
readOnly = true;
state.makeAllBindingsToEagerSingletons(this);
bindingsMultimap = new BindingsMultimap();
// reindex the bindings
index();
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_InjectorImpl.java |
1,616 | private class TaskPollThread extends Thread {
private final Map<Integer, Class<? extends ConsoleRequest>> consoleRequests =
new HashMap<Integer, Class<? extends ConsoleRequest>>();
private final Random rand = new Random();
TaskPollThread() {
super(instance.node.threadGroup, instance.node.getThreadNamePrefix("MC.Task.Poller"));
register(new RuntimeStateRequest());
register(new ThreadDumpRequest());
register(new ExecuteScriptRequest());
register(new EvictLocalMapRequest());
register(new ConsoleCommandRequest());
register(new MapConfigRequest());
register(new MemberConfigRequest());
register(new ClusterPropsRequest());
register(new GetLogsRequest());
register(new RunGcRequest());
register(new GetMemberSystemPropertiesRequest());
register(new GetMapEntryRequest());
register(new VersionMismatchLogRequest());
register(new ShutdownMemberRequest());
register(new GetSystemWarningsRequest());
}
public void register(ConsoleRequest consoleRequest) {
consoleRequests.put(consoleRequest.getType(), consoleRequest.getClass());
}
public void processTaskAndPostResponse(int taskId, ConsoleRequest task) {
try {
//todo: don't we need to close this connection?
HttpURLConnection connection = openPostResponseConnection();
OutputStream outputStream = connection.getOutputStream();
try {
identifier.write(outputStream);
ObjectDataOutputStream out = serializationService.createObjectDataOutputStream(outputStream);
out.writeInt(taskId);
out.writeInt(task.getType());
task.writeResponse(ManagementCenterService.this, out);
out.flush();
post(connection);
} finally {
closeResource(outputStream);
}
} catch (Exception e) {
logger.warning("Failed process task:" + task, e);
}
}
private HttpURLConnection openPostResponseConnection() throws IOException {
URL url = newPostResponseUrl();
if (logger.isFinestEnabled()) {
logger.finest("Opening sendResponse connection:" + url);
}
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setDoOutput(true);
connection.setRequestMethod("POST");
connection.setConnectTimeout(2000);
connection.setReadTimeout(2000);
return connection;
}
private URL newPostResponseUrl() throws MalformedURLException {
return new URL(cleanupUrl(managementCenterUrl) + "putResponse.do");
}
@Override
public void run() {
try {
while (isRunning()) {
sleepOnVersionMismatch();
processTask();
sleep();
}
} catch (Throwable throwable) {
inspectOutputMemoryError(throwable);
logger.warning("Problem on Hazelcast Management Center Service while polling for a task.", throwable);
}
}
private void sleep() throws InterruptedException {
//todo: magic numbers are no good.
//todo: why the random part
//todo: we want configurable frequency for task polling
Thread.sleep(700 + rand.nextInt(300));
}
private void processTask() {
ObjectDataInputStream inputStream = null;
try {
//todo: don't we need to close the connection?
inputStream = openTaskInputStream();
int taskId = inputStream.readInt();
if (taskId <= 0) {
return;
}
ConsoleRequest task = newTask(inputStream);
processTaskAndPostResponse(taskId, task);
} catch (Exception e) {
//todo: even if there is an internal error with the task, we don't see it. That is kinda shitty
logger.finest(e);
} finally {
IOUtil.closeResource(inputStream);
}
}
private ObjectDataInputStream openTaskInputStream() throws IOException {
URLConnection connection = openGetTaskConnection();
InputStream inputStream = connection.getInputStream();
return serializationService.createObjectDataInputStream(inputStream);
}
private ConsoleRequest newTask(ObjectDataInputStream inputStream)
throws InstantiationException, IllegalAccessException, IOException {
int requestType = inputStream.readInt();
Class<? extends ConsoleRequest> requestClass = consoleRequests.get(requestType);
if (requestClass == null) {
throw new RuntimeException("Failed to find a request for requestType:" + requestType);
}
ConsoleRequest task = requestClass.newInstance();
task.readData(inputStream);
return task;
}
private URLConnection openGetTaskConnection() throws IOException {
URL url = newGetTaskUrl();
if (logger.isFinestEnabled()) {
logger.finest("Opening getTask connection:" + url);
}
URLConnection connection = url.openConnection();
//todo: why do we set this property if the connection is not going to be re-used?
connection.setRequestProperty("Connection", "keep-alive");
return connection;
}
private URL newGetTaskUrl() throws MalformedURLException {
GroupConfig groupConfig = instance.getConfig().getGroupConfig();
Address localAddress = ((MemberImpl) instance.node.getClusterService().getLocalMember()).getAddress();
String urlString = cleanupUrl(managementCenterUrl) + "getTask.do?member=" + localAddress.getHost()
+ ":" + localAddress.getPort() + "&cluster=" + groupConfig.getName();
if (clusterId != null) {
urlString += "&clusterid=" + clusterId;
}
if (securityToken != null) {
urlString += "&securitytoken=" + securityToken;
}
return new URL(urlString);
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_management_ManagementCenterService.java |
2,229 | class CustomBoostFactorWeight extends Weight {
final Weight subQueryWeight;
final Bits[] docSets;
public CustomBoostFactorWeight(Weight subQueryWeight, int filterFunctionLength) throws IOException {
this.subQueryWeight = subQueryWeight;
this.docSets = new Bits[filterFunctionLength];
}
public Query getQuery() {
return FiltersFunctionScoreQuery.this;
}
@Override
public float getValueForNormalization() throws IOException {
float sum = subQueryWeight.getValueForNormalization();
sum *= getBoost() * getBoost();
return sum;
}
@Override
public void normalize(float norm, float topLevelBoost) {
subQueryWeight.normalize(norm, topLevelBoost * getBoost());
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
// we ignore scoreDocsInOrder parameter, because we need to score in
// order if documents are scored with a script. The
// ShardLookup depends on in order scoring.
Scorer subQueryScorer = subQueryWeight.scorer(context, true, false, acceptDocs);
if (subQueryScorer == null) {
return null;
}
for (int i = 0; i < filterFunctions.length; i++) {
FilterFunction filterFunction = filterFunctions[i];
filterFunction.function.setNextReader(context);
docSets[i] = DocIdSets.toSafeBits(context.reader(), filterFunction.filter.getDocIdSet(context, acceptDocs));
}
return new CustomBoostFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, docSets, combineFunction);
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(context, doc);
if (!subQueryExpl.isMatch()) {
return subQueryExpl;
}
// First: Gather explanations for all filters
List<ComplexExplanation> filterExplanations = new ArrayList<ComplexExplanation>();
for (FilterFunction filterFunction : filterFunctions) {
Bits docSet = DocIdSets.toSafeBits(context.reader(),
filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs()));
if (docSet.get(doc)) {
filterFunction.function.setNextReader(context);
Explanation functionExplanation = filterFunction.function.explainScore(doc, subQueryExpl);
double factor = functionExplanation.getValue();
float sc = CombineFunction.toFloat(factor);
ComplexExplanation filterExplanation = new ComplexExplanation(true, sc, "function score, product of:");
filterExplanation.addDetail(new Explanation(1.0f, "match filter: " + filterFunction.filter.toString()));
filterExplanation.addDetail(functionExplanation);
filterExplanations.add(filterExplanation);
}
}
if (filterExplanations.size() == 0) {
float sc = getBoost() * subQueryExpl.getValue();
Explanation res = new ComplexExplanation(true, sc, "function score, no filter match, product of:");
res.addDetail(subQueryExpl);
res.addDetail(new Explanation(getBoost(), "queryBoost"));
return res;
}
// Second: Compute the factor that would have been computed by the
// filters
double factor = 1.0;
switch (scoreMode) {
case First:
factor = filterExplanations.get(0).getValue();
break;
case Max:
factor = Double.NEGATIVE_INFINITY;
for (int i = 0; i < filterExplanations.size(); i++) {
factor = Math.max(filterExplanations.get(i).getValue(), factor);
}
break;
case Min:
factor = Double.POSITIVE_INFINITY;
for (int i = 0; i < filterExplanations.size(); i++) {
factor = Math.min(filterExplanations.get(i).getValue(), factor);
}
break;
case Multiply:
for (int i = 0; i < filterExplanations.size(); i++) {
factor *= filterExplanations.get(i).getValue();
}
break;
default: // Avg / Total
double totalFactor = 0.0f;
int count = 0;
for (int i = 0; i < filterExplanations.size(); i++) {
totalFactor += filterExplanations.get(i).getValue();
count++;
}
if (count != 0) {
factor = totalFactor;
if (scoreMode == ScoreMode.Avg) {
factor /= count;
}
}
}
ComplexExplanation factorExplanaition = new ComplexExplanation(true, CombineFunction.toFloat(factor),
"function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]");
for (int i = 0; i < filterExplanations.size(); i++) {
factorExplanaition.addDetail(filterExplanations.get(i));
}
return combineFunction.explain(getBoost(), subQueryExpl, factorExplanaition, maxBoost);
}
} | 1no label
| src_main_java_org_elasticsearch_common_lucene_search_function_FiltersFunctionScoreQuery.java |
1,530 | @Component("blUrlRewriteProcessor")
public class UrlRewriteProcessor extends AbstractAttributeModifierAttrProcessor {
@Resource(name = "blStaticAssetService")
protected StaticAssetService staticAssetService;
/**
* Sets the name of this processor to be used in Thymeleaf template
*/
public UrlRewriteProcessor() {
super("src");
}
@Override
public int getPrecedence() {
return 1000;
}
/**
* @return true if the current request.scheme = HTTPS or if the request.isSecure value is true.
*/
protected boolean isRequestSecure(HttpServletRequest request) {
return ("HTTPS".equalsIgnoreCase(request.getScheme()) || request.isSecure());
}
@Override
protected Map<String, String> getModifiedAttributeValues(Arguments arguments, Element element, String attributeName) {
Map<String, String> attrs = new HashMap<String, String>();
HttpServletRequest request = BroadleafRequestContext.getBroadleafRequestContext().getRequest();
boolean secureRequest = isRequestSecure(request);
String assetPath = (String) StandardExpressionProcessor.processExpression(arguments, element.getAttributeValue(attributeName));
//String assetPath = element.getAttributeValue(attributeName);
assetPath = staticAssetService.convertAssetPath(assetPath, request.getContextPath(), secureRequest);
attrs.put("src", assetPath);
/*
SearchFacetResultDTO result = (SearchFacetResultDTO) StandardExpressionProcessor.processExpression(arguments, element.getAttributeValue(attributeName));
String value = result.getFacet().getSearchFacet().getFieldName() + "[RESULT-VALUE]";
if (result.getValue() != null) {
value = value.replace("RESULT-VALUE", result.getValue());
} else {
value = value.replace("RESULT-VALUE", result.getMinValue() + "-" + result.getMaxValue());
}
*/
/*
attrs.put("id", value);
attrs.put("name", value);
*/
return attrs;
}
@Override
protected ModificationType getModificationType(Arguments arguments, Element element, String attributeName, String newAttributeName) {
return ModificationType.SUBSTITUTION;
}
@Override
protected boolean removeAttributeIfEmpty(Arguments arguments, Element element, String attributeName, String newAttributeName) {
return true;
}
@Override
protected boolean recomputeProcessorsAfterExecution(Arguments arguments, Element element, String attributeName) {
return false;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_processor_UrlRewriteProcessor.java |
3,358 | static class WithOrdinals extends GeoPointDoubleArrayAtomicFieldData {
private final BigDoubleArrayList lon, lat;
private final Ordinals ordinals;
public WithOrdinals(BigDoubleArrayList lon, BigDoubleArrayList lat, int numDocs, Ordinals ordinals) {
super(numDocs);
this.lon = lon;
this.lat = lat;
this.ordinals = ordinals;
}
@Override
public boolean isMultiValued() {
return ordinals.isMultiValued();
}
@Override
public boolean isValuesOrdered() {
return true;
}
@Override
public long getNumberUniqueValues() {
return ordinals.getNumOrds();
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + lon.sizeInBytes() + lat.sizeInBytes();
}
return size;
}
@Override
public GeoPointValues getGeoPointValues() {
return new GeoPointValuesWithOrdinals(lon, lat, ordinals.ordinals());
}
public static class GeoPointValuesWithOrdinals extends GeoPointValues {
private final BigDoubleArrayList lon, lat;
private final Ordinals.Docs ordinals;
private final GeoPoint scratch = new GeoPoint();
GeoPointValuesWithOrdinals(BigDoubleArrayList lon, BigDoubleArrayList lat, Ordinals.Docs ordinals) {
super(ordinals.isMultiValued());
this.lon = lon;
this.lat = lat;
this.ordinals = ordinals;
}
@Override
public GeoPoint nextValue() {
final long ord = ordinals.nextOrd();
assert ord > 0;
return scratch.reset(lat.get(ord), lon.get(ord));
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return ordinals.setDocument(docId);
}
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_GeoPointDoubleArrayAtomicFieldData.java |
3,522 | public static class Document implements Iterable<IndexableField> {
private final List<IndexableField> fields;
private ObjectObjectMap<Object, IndexableField> keyedFields;
public Document() {
fields = Lists.newArrayList();
}
@Override
public Iterator<IndexableField> iterator() {
return fields.iterator();
}
public List<IndexableField> getFields() {
return fields;
}
public void add(IndexableField field) {
fields.add(field);
}
/** Add fields so that they can later be fetched using {@link #getByKey(Object)}. */
public void addWithKey(Object key, IndexableField field) {
if (keyedFields == null) {
keyedFields = new ObjectObjectOpenHashMap<Object, IndexableField>();
} else if (keyedFields.containsKey(key)) {
throw new ElasticsearchIllegalStateException("Only one field can be stored per key");
}
keyedFields.put(key, field);
add(field);
}
/** Get back fields that have been previously added with {@link #addWithKey(Object, IndexableField)}. */
public IndexableField getByKey(Object key) {
return keyedFields == null ? null : keyedFields.get(key);
}
public IndexableField[] getFields(String name) {
List<IndexableField> f = new ArrayList<IndexableField>();
for (IndexableField field : fields) {
if (field.name().equals(name)) {
f.add(field);
}
}
return f.toArray(new IndexableField[f.size()]);
}
public IndexableField getField(String name) {
for (IndexableField field : fields) {
if (field.name().equals(name)) {
return field;
}
}
return null;
}
public String get(String name) {
for (IndexableField f : fields) {
if (f.name().equals(name) && f.stringValue() != null) {
return f.stringValue();
}
}
return null;
}
public BytesRef getBinaryValue(String name) {
for (IndexableField f : fields) {
if (f.name().equals(name) && f.binaryValue() != null) {
return f.binaryValue();
}
}
return null;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_ParseContext.java |
312 | new Thread() {
public void run() {
try {
map.lock(key);
map.put(key, value);
putWhileLocked.countDown();
checkingKeySet.await();
map.unlock(key);
}catch(Exception e){}
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java |
1,432 | executor.scheduleWithFixedDelay(new Runnable() {
public void run() {
cache.cleanup();
}
}, 60, 60, TimeUnit.SECONDS); | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_main_java_com_hazelcast_hibernate_local_CleanupService.java |
305 | public enum OGlobalConfiguration {
// ENVIRONMENT
ENVIRONMENT_DUMP_CFG_AT_STARTUP("environment.dumpCfgAtStartup", "Dumps the configuration at application startup", Boolean.class,
Boolean.FALSE),
ENVIRONMENT_CONCURRENT("environment.concurrent",
"Specifies if running in multi-thread environment. Setting this to false turns off the internal lock management",
Boolean.class, Boolean.TRUE),
// MEMORY
MEMORY_USE_UNSAFE("memory.useUnsafe", "Indicates whether Unsafe will be used if it is present", Boolean.class, true),
DIRECT_MEMORY_UNSAFE_MODE(
"memory.directMemory.unsafeMode",
"Indicates whether to do perform range check before each direct memory update, it is false by default, "
+ "but usually it can be safely put to true. It is needed to set to true only after dramatic changes in storage structures.",
Boolean.class, false),
JVM_GC_DELAY_FOR_OPTIMIZE("jvm.gc.delayForOptimize",
"Minimal amount of time (seconds) since last System.gc() when called after tree optimization", Long.class, 600),
// STORAGE
DISK_CACHE_SIZE("storage.diskCache.bufferSize", "Size of disk buffer in megabytes", Integer.class, 4 * 1024),
DISK_WRITE_CACHE_PART("storage.diskCache.writeCachePart", "Percent of disk cache which is use as write cache", Integer.class, 30),
DISK_WRITE_CACHE_PAGE_TTL("storage.diskCache.writeCachePageTTL",
"Max time till page will be flushed from write cache in seconds", Long.class, 24 * 60 * 60),
DISK_WRITE_CACHE_PAGE_FLUSH_INTERVAL("storage.diskCache.writeCachePageFlushInterval",
"Interval between flushing of pages from write cache in ms.", Integer.class, 100),
DISK_WRITE_CACHE_FLUSH_LOCK_TIMEOUT("storage.diskCache.writeCacheFlushLockTimeout",
"Maximum amount of time till write cache will be wait before page flush in ms.", Integer.class, -1),
STORAGE_COMPRESSION_METHOD("storage.compressionMethod", "Record compression method is used in storage."
+ " Possible values : gzip, nothing, snappy, snappy-native. Default is snappy.", String.class, "snappy"),
USE_WAL("storage.useWAL", "Whether WAL should be used in paginated storage", Boolean.class, true),
WAL_CACHE_SIZE("storage.wal.cacheSize",
"Maximum size of WAL cache (in amount of WAL pages, each page is 64k) <= 0 means that caching will be switched off.",
Integer.class, 3000),
WAL_MAX_SEGMENT_SIZE("storage.wal.maxSegmentSize", "Maximum size of single WAL segment in megabytes.", Integer.class, 256),
WAL_MAX_SIZE("storage.wal.maxSize", "Maximum size of WAL on disk in megabytes.", Integer.class, 4 * 1024),
WAL_COMMIT_TIMEOUT("storage.wal.commitTimeout", "Maximum interval between WAL commits (in ms.)", Integer.class, 1000),
WAL_SHUTDOWN_TIMEOUT("storage.wal.shutdownTimeout", "Maximum wait interval between events when background flush thread"
+ " will receive shutdown command and when background flush will be stopped (in ms.)", Integer.class, 10000),
WAL_FUZZY_CHECKPOINT_INTERVAL("storage.wal.fuzzyCheckpointInterval", "Interval between fuzzy checkpoints (in seconds)",
Integer.class, 2592000),
WAL_FUZZY_CHECKPOINT_SHUTDOWN_TIMEOUT("storage.wal.fuzzyCheckpointShutdownWait",
"Interval which we should wait till shutdown (in seconds)", Integer.class, 60 * 10),
WAL_FULL_CHECKPOINT_SHUTDOWN_TIMEOUT("storage.wal.fullCheckpointShutdownTimeout",
"Timeout till DB will wait that full checkpoint is finished during DB close (in seconds))", Integer.class, 60 * 10),
WAL_LOCATION("storage.wal.path", "Path to the wal file on the disk, by default is placed in DB directory but"
+ " it is highly recomended to use separate disk to store log operations", String.class, null),
STORAGE_MAKE_FULL_CHECKPOINT_AFTER_CREATE("storage.makeFullCheckpointAfterCreate",
"Indicates whether full checkpoint should be performed if storage was opened.", Boolean.class, true),
STORAGE_MAKE_FULL_CHECKPOINT_AFTER_CLUSTER_CREATE("storage.makeFullCheckpointAfterClusterCreate",
"Indicates whether full checkpoint should be performed if storage was opened.", Boolean.class, true),
DISK_CACHE_PAGE_SIZE("storage.diskCache.pageSize", "Size of page of disk buffer in kilobytes", Integer.class, 64),
PAGINATED_STORAGE_LOWEST_FREELIST_BOUNDARY("storage.lowestFreeListBound", "The minimal amount of free space (in kb)"
+ " in page which is tracked in paginated storage", Integer.class, 16),
USE_NODE_ID_CLUSTER_POSITION("storage.cluster.useNodeIdAsClusterPosition", "Indicates whether cluster position should be"
+ " treated as node id not as long value.", Boolean.class, Boolean.FALSE),
STORAGE_KEEP_OPEN(
"storage.keepOpen",
"Tells to the engine to not close the storage when a database is closed. Storages will be closed when the process shuts down",
Boolean.class, Boolean.TRUE),
STORAGE_LOCK_TIMEOUT("storage.lockTimeout", "Maximum timeout in milliseconds to lock the storage", Integer.class, 600000),
STORAGE_RECORD_LOCK_TIMEOUT("storage.record.lockTimeout", "Maximum timeout in milliseconds to lock a shared record",
Integer.class, 5000),
STORAGE_USE_TOMBSTONES("storage.useTombstones", "When record will be deleted its cluster"
+ " position will not be freed but tombstone will be placed instead", Boolean.class, false),
// RECORDS
RECORD_DOWNSIZING_ENABLED(
"record.downsizing.enabled",
"On updates if the record size is lower than before, reduces the space taken accordlying. If enabled this could increase defragmentation, but it reduces the used space",
Boolean.class, true),
// CACHE
CACHE_LEVEL1_ENABLED("cache.level1.enabled", "Use the level-1 cache", Boolean.class, true),
CACHE_LEVEL1_SIZE("cache.level1.size", "Size of the cache that keeps the record in memory", Integer.class, 1000),
CACHE_LEVEL2_ENABLED("cache.level2.enabled", "Use the level-2 cache", Boolean.class, false),
CACHE_LEVEL2_SIZE("cache.level2.size", "Size of the cache that keeps the record in memory", Integer.class, 0),
CACHE_LEVEL2_IMPL("cache.level2.impl", "Actual implementation of secondary cache", String.class, ODefaultCache.class
.getCanonicalName()),
CACHE_LEVEL2_STRATEGY("cache.level2.strategy",
"Strategy to use when a database requests a record: 0 = pop the record, 1 = copy the record", Integer.class, 0,
new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
// UPDATE ALL THE OPENED STORAGES SETTING THE NEW STRATEGY
// for (OStorage s : com.orientechnologies.orient.core.Orient.instance().getStorages()) {
// s.getCache().setStrategy((Integer) iNewValue);
// }
}
}),
// DATABASE
OBJECT_SAVE_ONLY_DIRTY("object.saveOnlyDirty", "Object Database only saves objects bound to dirty records", Boolean.class, false),
// DATABASE
DB_POOL_MIN("db.pool.min", "Default database pool minimum size", Integer.class, 1),
DB_POOL_MAX("db.pool.max", "Default database pool maximum size", Integer.class, 20),
DB_POOL_IDLE_TIMEOUT("db.pool.idleTimeout", "Default database pool maximum size", Integer.class, 0),
DB_POOL_IDLE_CHECK_DELAY("db.pool.idleCheckDelay", "Default database pool maximum size", Integer.class, 0),
@Deprecated
DB_MVCC("db.mvcc", "Enables or disables MVCC (Multi-Version Concurrency Control) even outside transactions", Boolean.class, true),
DB_MVCC_THROWFAST(
"db.mvcc.throwfast",
"Use fast-thrown exceptions for MVCC OConcurrentModificationExceptions. No context information will be available, use where these exceptions are handled and the detail is not neccessary",
Boolean.class, false),
DB_VALIDATION("db.validation", "Enables or disables validation of records", Boolean.class, true),
DB_USE_DISTRIBUTED_VERSION("db.use.distributedVersion", "Use extended version that is safe in distributed environment",
Boolean.class, Boolean.FALSE),
// SETTINGS OF NON-TRANSACTIONAL MODE
NON_TX_RECORD_UPDATE_SYNCH("nonTX.recordUpdate.synch",
"Executes a synch against the file-system at every record operation. This slows down records updates "
+ "but guarantee reliability on unreliable drives", Boolean.class, Boolean.FALSE),
NON_TX_CLUSTERS_SYNC_IMMEDIATELY("nonTX.clusters.sync.immediately",
"List of clusters to sync immediately after update separated by commas. Can be useful for manual index", String.class,
OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME),
// TRANSACTIONS
TX_USE_LOG("tx.useLog", "Transactions use log file to store temporary data to be rolled back in case of crash", Boolean.class,
true),
TX_AUTO_RETRY("tx.autoRetry",
"Maximum number of automatic retry if some resource has been locked in the middle of the transaction (Timeout exception)",
Integer.class, 1),
TX_LOG_TYPE("tx.log.fileType", "File type to handle transaction logs: mmap or classic", String.class, "classic"),
TX_LOG_SYNCH(
"tx.log.synch",
"Executes a synch against the file-system at every log entry. This slows down transactions but guarantee transaction reliability on unreliable drives",
Boolean.class, Boolean.FALSE),
TX_COMMIT_SYNCH("tx.commit.synch", "Synchronizes the storage after transaction commit", Boolean.class, false),
// INDEX
HASH_TABLE_SPLIT_BUCKETS_BUFFER_LENGTH("hashTable.slitBucketsBuffer.length", "Length of buffer (in pages) where buckets "
+ "that were splited but not flushed to the disk are kept. This buffer is used to minimize random IO overhead.",
Integer.class, 1500),
INDEX_AUTO_REBUILD_AFTER_NOTSOFTCLOSE("index.auto.rebuildAfterNotSoftClose",
"Auto rebuild all automatic indexes after upon database open when wasn't closed properly", Boolean.class, true),
INDEX_SYNCHRONOUS_AUTO_REBUILD("index.auto.synchronousAutoRebuild",
"Synchronous execution of auto rebuilding of indexes in case of db crash.", Boolean.class, Boolean.TRUE),
INDEX_AUTO_LAZY_UPDATES(
"index.auto.lazyUpdates",
"Configure the TreeMaps for automatic indexes as buffered or not. -1 means buffered until tx.commit() or db.close() are called",
Integer.class, 10000),
INDEX_MANUAL_LAZY_UPDATES("index.manual.lazyUpdates",
"Configure the TreeMaps for manual indexes as buffered or not. -1 means buffered until tx.commit() or db.close() are called",
Integer.class, 1),
INDEX_DURABLE_IN_NON_TX_MODE("index.durableInNonTxMode",
"Indicates whether index implementation for plocal storage will be durable in non-Tx mode, false by default", Boolean.class,
false),
INDEX_TX_MODE("index.txMode",
"Indicates index durability level in TX mode. Can be ROLLBACK_ONLY or FULL (ROLLBACK_ONLY by default)", String.class,
"ROLLBACK_ONLY"),
INDEX_USE_SBTREE_BY_DEFAULT("index.useSBTreeByDefault",
"Whether new SBTree index implementation should be used instead of old MVRB-Tree", Boolean.class, true),
INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT("index.notunique.useSBTreeContainerByDefault",
"Prefer SBTree based algorithm instead MVRBTree for storing sets of RID", Boolean.class, true),
// TREEMAP
MVRBTREE_TIMEOUT("mvrbtree.timeout", "Maximum timeout to get lock against the OMVRB-Tree", Integer.class, 5000),
MVRBTREE_NODE_PAGE_SIZE("mvrbtree.nodePageSize",
"Page size of each node. 256 means that 256 entries can be stored inside each node", Integer.class, 256),
MVRBTREE_LOAD_FACTOR("mvrbtree.loadFactor", "HashMap load factor", Float.class, 0.7f),
MVRBTREE_OPTIMIZE_THRESHOLD(
"mvrbtree.optimizeThreshold",
"Auto optimize the TreeMap every X tree rotations. This forces the optimization of the tree after many changes to recompute entry points. -1 means never",
Integer.class, 100000),
MVRBTREE_ENTRYPOINTS("mvrbtree.entryPoints", "Number of entry points to start searching entries", Integer.class, 64),
MVRBTREE_OPTIMIZE_ENTRYPOINTS_FACTOR("mvrbtree.optimizeEntryPointsFactor",
"Multiplicand factor to apply to entry-points list (parameter mvrbtree.entrypoints) to determine optimization is needed",
Float.class, 1.0f),
MVRBTREE_ENTRY_KEYS_IN_MEMORY("mvrbtree.entryKeysInMemory", "Keep unserialized keys in memory", Boolean.class, Boolean.FALSE),
MVRBTREE_ENTRY_VALUES_IN_MEMORY("mvrbtree.entryValuesInMemory", "Keep unserialized values in memory", Boolean.class,
Boolean.FALSE),
// TREEMAP OF RIDS
MVRBTREE_RID_BINARY_THRESHOLD(
"mvrbtree.ridBinaryThreshold",
"Valid for set of rids. It's the threshold as number of entries to use the binary streaming instead of classic string streaming. -1 means never use binary streaming",
Integer.class, 8),
MVRBTREE_RID_NODE_PAGE_SIZE("mvrbtree.ridNodePageSize",
"Page size of each treeset node. 16 means that 16 entries can be stored inside each node", Integer.class, 64),
MVRBTREE_RID_NODE_SAVE_MEMORY("mvrbtree.ridNodeSaveMemory",
"Save memory usage by avoid keeping RIDs in memory but creating them at every access", Boolean.class, Boolean.FALSE),
// SBTREE
SBTREE_MAX_KEY_SIZE("sbtree.maxKeySize", "Maximum size of key which can be put in SBTree in bytes (10240 by default)",
Integer.class, 10240),
SBTREE_MAX_EMBEDDED_VALUE_SIZE("sbtree.maxEmbeddedValueSize",
"Maximum size of value which can be put in SBTree without creation link to standalone page in bytes (40960 by default)",
Integer.class, 40960),
SBTREEBONSAI_BUCKET_SIZE("sbtreebonsai.bucketSize",
"Size of bucket in OSBTreeBonsai in kB. Contract: bucketSize < storagePageSize, storagePageSize % bucketSize == 0.",
Integer.class, 2),
// COLLECTIONS
LAZYSET_WORK_ON_STREAM("lazyset.workOnStream", "Upon add avoid unmarshalling set", Boolean.class, true),
PREFER_SBTREE_SET("collections.preferSBTreeSet", "This config is experimental.", Boolean.class, false),
// FILE
FILE_LOCK("file.lock", "Locks files when used. Default is false", boolean.class, true),
FILE_DEFRAG_STRATEGY("file.defrag.strategy", "Strategy to recycle free space: 0 = synchronous defrag, 1 = asynchronous defrag, ",
Integer.class, 0),
FILE_DEFRAG_HOLE_MAX_DISTANCE(
"file.defrag.holeMaxDistance",
"Max distance in bytes between holes to cause their defrag. Set it to -1 to use dynamic size. Beware that if the db is huge moving blocks to defrag could be expensive",
Integer.class, 32768),
FILE_MMAP_USE_OLD_MANAGER("file.mmap.useOldManager",
"Manager that will be used to handle mmap files. true = USE OLD MANAGER, false = USE NEW MANAGER", boolean.class, false),
FILE_MMAP_AUTOFLUSH_TIMER("file.mmap.autoFlush.timer", "Auto flushes memory mapped blocks every X seconds. 0 = disabled",
int.class, 30),
FILE_MMAP_AUTOFLUSH_UNUSED_TIME("file.mmap.autoFlush.unusedTime",
"Remove memory mapped blocks with unused time major than this value. Time is in seconds", int.class, 30),
FILE_MMAP_LOCK_MEMORY("file.mmap.lockMemory",
"When using new map manager this parameter specify prevent memory swap or not. true = LOCK MEMORY, false = NOT LOCK MEMORY",
boolean.class, true),
FILE_MMAP_STRATEGY(
"file.mmap.strategy",
"Strategy to use with memory mapped files. 0 = USE MMAP ALWAYS, 1 = USE MMAP ON WRITES OR ON READ JUST WHEN THE BLOCK POOL IS FREE, 2 = USE MMAP ON WRITES OR ON READ JUST WHEN THE BLOCK IS ALREADY AVAILABLE, 3 = USE MMAP ONLY IF BLOCK IS ALREADY AVAILABLE, 4 = NEVER USE MMAP",
Integer.class, 0),
FILE_MMAP_BLOCK_SIZE("file.mmap.blockSize", "Size of the memory mapped block, default is 1Mb", Integer.class, 1048576,
new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
OMMapManagerOld.setBlockSize(((Number) iNewValue).intValue());
}
}),
FILE_MMAP_BUFFER_SIZE("file.mmap.bufferSize", "Size of the buffer for direct access to the file through the channel",
Integer.class, 8192),
FILE_MMAP_MAX_MEMORY(
"file.mmap.maxMemory",
"Max memory allocatable by memory mapping manager. Note that on 32bit operating systems, the limit is 2Gb but will vary between operating systems",
Long.class, 134217728, new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
OMMapManagerOld.setMaxMemory(OFileUtils.getSizeAsNumber(iNewValue));
}
}),
FILE_MMAP_OVERLAP_STRATEGY(
"file.mmap.overlapStrategy",
"Strategy to use when a request overlaps in-memory buffers: 0 = Use the channel access, 1 = force the in-memory buffer and use the channel access, 2 = always create an overlapped in-memory buffer (default)",
Integer.class, 2, new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
OMMapManagerOld.setOverlapStrategy((Integer) iNewValue);
}
}),
FILE_MMAP_FORCE_DELAY("file.mmap.forceDelay",
"Delay time in ms to wait for another forced flush of the memory-mapped block to disk", Integer.class, 10),
FILE_MMAP_FORCE_RETRY("file.mmap.forceRetry", "Number of times the memory-mapped block will try to flush to disk", Integer.class,
50),
JNA_DISABLE_USE_SYSTEM_LIBRARY("jna.disable.system.library",
"This property disable to using JNA installed in your system. And use JNA bundled with database.", boolean.class, true),
// NETWORK
NETWORK_MAX_CONCURRENT_SESSIONS("network.maxConcurrentSessions", "Maximum number of concurrent sessions", Integer.class, 1000),
NETWORK_SOCKET_BUFFER_SIZE("network.socketBufferSize", "TCP/IP Socket buffer size", Integer.class, 32768),
NETWORK_LOCK_TIMEOUT("network.lockTimeout", "Timeout in ms to acquire a lock against a channel", Integer.class, 15000),
NETWORK_SOCKET_TIMEOUT("network.socketTimeout", "TCP/IP Socket timeout in ms", Integer.class, 15000),
NETWORK_SOCKET_RETRY("network.retry", "Number of times the client retries its connection to the server on failure",
Integer.class, 5),
NETWORK_SOCKET_RETRY_DELAY("network.retryDelay", "Number of ms the client waits before reconnecting to the server on failure",
Integer.class, 500),
NETWORK_BINARY_DNS_LOADBALANCING_ENABLED("network.binary.loadBalancing.enabled",
"Asks for DNS TXT record to determine if load balancing is supported", Boolean.class, Boolean.FALSE),
NETWORK_BINARY_DNS_LOADBALANCING_TIMEOUT("network.binary.loadBalancing.timeout",
"Maximum time (in ms) to wait for the answer from DNS about the TXT record for load balancing", Integer.class, 2000),
NETWORK_BINARY_MAX_CONTENT_LENGTH("network.binary.maxLength", "TCP/IP max content length in bytes of BINARY requests",
Integer.class, 32736),
NETWORK_BINARY_READ_RESPONSE_MAX_TIMES("network.binary.readResponse.maxTimes",
"Maximum times to wait until response will be read. Otherwise response will be dropped from chanel", Integer.class, 20),
NETWORK_BINARY_DEBUG("network.binary.debug", "Debug mode: print all data incoming on the binary channel", Boolean.class, false),
NETWORK_HTTP_MAX_CONTENT_LENGTH("network.http.maxLength", "TCP/IP max content length in bytes for HTTP requests", Integer.class,
1000000),
NETWORK_HTTP_CONTENT_CHARSET("network.http.charset", "Http response charset", String.class, "utf-8"),
NETWORK_HTTP_SESSION_EXPIRE_TIMEOUT("network.http.sessionExpireTimeout",
"Timeout after which an http session is considered tp have expired (seconds)", Integer.class, 300),
// PROFILER
PROFILER_ENABLED("profiler.enabled", "Enable the recording of statistics and counters", Boolean.class, false,
new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
if ((Boolean) iNewValue)
Orient.instance().getProfiler().startRecording();
else
Orient.instance().getProfiler().stopRecording();
}
}),
PROFILER_CONFIG("profiler.config", "Configures the profiler as <seconds-for-snapshot>,<archive-snapshot-size>,<summary-size>",
String.class, null, new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
Orient.instance().getProfiler().configure(iNewValue.toString());
}
}),
PROFILER_AUTODUMP_INTERVAL("profiler.autoDump.interval",
"Dumps the profiler values at regular intervals. Time is expressed in seconds", Integer.class, 0,
new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
Orient.instance().getProfiler().setAutoDump((Integer) iNewValue);
}
}),
// LOG
LOG_CONSOLE_LEVEL("log.console.level", "Console logging level", String.class, "info", new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
OLogManager.instance().setLevel((String) iNewValue, ConsoleHandler.class);
}
}),
LOG_FILE_LEVEL("log.file.level", "File logging level", String.class, "fine", new OConfigurationChangeCallback() {
public void change(final Object iCurrentValue, final Object iNewValue) {
OLogManager.instance().setLevel((String) iNewValue, FileHandler.class);
}
}),
// COMMAND
COMMAND_TIMEOUT("command.timeout", "Default timeout for commands expressed in milliseconds", Long.class, 0),
// CLIENT
CLIENT_CHANNEL_MIN_POOL("client.channel.minPool", "Minimum pool size", Integer.class, 1),
CLIENT_CHANNEL_MAX_POOL("client.channel.maxPool", "Maximum channel pool size", Integer.class, 20),
CLIENT_CONNECT_POOL_WAIT_TIMEOUT("client.connectionPool.waitTimeout",
"Maximum time which client should wait connection from the pool", Integer.class, 5000),
CLIENT_DB_RELEASE_WAIT_TIMEOUT("client.channel.dbReleaseWaitTimeout",
"Delay in ms. after which data modification command will be resent if DB was frozen", Integer.class, 10000),
// SERVER
SERVER_CHANNEL_CLEAN_DELAY("server.channel.cleanDelay", "Time in ms of delay to check pending closed connections", Integer.class,
5000),
SERVER_CACHE_FILE_STATIC("server.cache.staticFile", "Cache static resources loading", Boolean.class, false),
SERVER_CACHE_INCREASE_ON_DEMAND("server.cache.2q.increaseOnDemand", "Increase 2q cache on demand", Boolean.class, true),
SERVER_CACHE_INCREASE_STEP("server.cache.2q.increaseStep",
"Increase 2q cache step in percent. Will only work if server.cache.2q.increaseOnDemand is true", Float.class, 0.1f),
SERVER_LOG_DUMP_CLIENT_EXCEPTION_LEVEL(
"server.log.dumpClientExceptionLevel",
"Logs client exceptions. Use any level supported by Java java.util.logging.Level class: OFF, FINE, CONFIG, INFO, WARNING, SEVERE",
Level.class, Level.FINE),
SERVER_LOG_DUMP_CLIENT_EXCEPTION_FULLSTACKTRACE("server.log.dumpClientExceptionFullStackTrace",
"Dumps the full stack trace of the exception to sent to the client", Level.class, Boolean.TRUE),
// DISTRIBUTED
DISTRIBUTED_THREAD_QUEUE_SIZE("distributed.threadQueueSize", "Size of the queue for internal thread dispatching", Integer.class,
10000),
DISTRIBUTED_CRUD_TASK_SYNCH_TIMEOUT("distributed.crudTaskTimeout",
"Maximum timeout in milliseconds to wait for CRUD remote tasks", Integer.class, 3000l),
DISTRIBUTED_COMMAND_TASK_SYNCH_TIMEOUT("distributed.commandTaskTimeout",
"Maximum timeout in milliseconds to wait for Command remote tasks", Integer.class, 5000l),
DISTRIBUTED_QUEUE_TIMEOUT("distributed.queueTimeout", "Maximum timeout in milliseconds to wait for the response in replication",
Integer.class, 5000l),
DISTRIBUTED_ASYNCH_RESPONSES_TIMEOUT("distributed.asynchResponsesTimeout",
"Maximum timeout in milliseconds to collect all the asynchronous responses from replication", Integer.class, 15000l),
DISTRIBUTED_PURGE_RESPONSES_TIMER_DELAY("distributed.purgeResponsesTimerDelay",
"Maximum timeout in milliseconds to collect all the asynchronous responses from replication", Integer.class, 15000l);
private final String key;
private final Object defValue;
private final Class<?> type;
private Object value = null;
private String description;
private OConfigurationChangeCallback changeCallback = null;
// AT STARTUP AUTO-CONFIG
static {
readConfiguration();
autoConfig();
}
OGlobalConfiguration(final String iKey, final String iDescription, final Class<?> iType, final Object iDefValue,
final OConfigurationChangeCallback iChangeAction) {
this(iKey, iDescription, iType, iDefValue);
changeCallback = iChangeAction;
}
OGlobalConfiguration(final String iKey, final String iDescription, final Class<?> iType, final Object iDefValue) {
key = iKey;
description = iDescription;
defValue = iDefValue;
type = iType;
}
public void setValue(final Object iValue) {
Object oldValue = value;
if (iValue != null)
if (type == Boolean.class)
value = Boolean.parseBoolean(iValue.toString());
else if (type == Integer.class)
value = Integer.parseInt(iValue.toString());
else if (type == Float.class)
value = Float.parseFloat(iValue.toString());
else if (type == String.class)
value = iValue.toString();
else
value = iValue;
if (changeCallback != null)
changeCallback.change(oldValue, value);
}
public Object getValue() {
return value != null ? value : defValue;
}
public boolean getValueAsBoolean() {
final Object v = value != null ? value : defValue;
return v instanceof Boolean ? ((Boolean) v).booleanValue() : Boolean.parseBoolean(v.toString());
}
public String getValueAsString() {
return value != null ? value.toString() : defValue != null ? defValue.toString() : null;
}
public int getValueAsInteger() {
final Object v = value != null ? value : defValue;
return (int) (v instanceof Number ? ((Number) v).intValue() : OFileUtils.getSizeAsNumber(v.toString()));
}
public long getValueAsLong() {
final Object v = value != null ? value : defValue;
return v instanceof Number ? ((Number) v).longValue() : OFileUtils.getSizeAsNumber(v.toString());
}
public float getValueAsFloat() {
final Object v = value != null ? value : defValue;
return v instanceof Float ? ((Float) v).floatValue() : Float.parseFloat(v.toString());
}
public String getKey() {
return key;
}
public Class<?> getType() {
return type;
}
public String getDescription() {
return description;
}
public static void dumpConfiguration(final PrintStream out) {
out.print("OrientDB ");
out.print(OConstants.getVersion());
out.println(" configuration dump:");
String lastSection = "";
for (OGlobalConfiguration v : values()) {
final String section = v.key.substring(0, v.key.indexOf('.'));
if (!lastSection.equals(section)) {
out.print("- ");
out.println(section.toUpperCase());
lastSection = section;
}
out.print(" + ");
out.print(v.key);
out.print(" = ");
out.println(v.getValue());
}
}
/**
* Find the OGlobalConfiguration instance by the key. Key is case insensitive.
*
* @param iKey
* Key to find. It's case insensitive.
* @return OGlobalConfiguration instance if found, otherwise null
*/
public static OGlobalConfiguration findByKey(final String iKey) {
for (OGlobalConfiguration v : values()) {
if (v.getKey().equalsIgnoreCase(iKey))
return v;
}
return null;
}
/**
* Changes the configuration values in one shot by passing a Map of values. Keys can be the Java ENUM names or the string
* representation of configuration values
*/
public static void setConfiguration(final Map<String, Object> iConfig) {
for (Entry<String, Object> config : iConfig.entrySet()) {
for (OGlobalConfiguration v : values()) {
if (v.getKey().equals(config.getKey())) {
v.setValue(config.getValue());
break;
} else if (v.name().equals(config.getKey())) {
v.setValue(config.getValue());
break;
}
}
}
}
/**
* Assign configuration values by reading system properties.
*/
private static void readConfiguration() {
String prop;
for (OGlobalConfiguration config : values()) {
prop = System.getProperty(config.key);
if (prop != null)
config.setValue(prop);
}
}
private static void autoConfig() {
if (System.getProperty("os.arch").indexOf("64") > -1) {
// 64 BIT
if (FILE_MMAP_MAX_MEMORY.getValueAsInteger() == 134217728) {
final OperatingSystemMXBean bean = java.lang.management.ManagementFactory.getOperatingSystemMXBean();
try {
final Class<?> cls = Class.forName("com.sun.management.OperatingSystemMXBean");
if (cls.isAssignableFrom(bean.getClass())) {
final Long maxOsMemory = (Long) cls.getMethod("getTotalPhysicalMemorySize", new Class[] {}).invoke(bean);
final long maxProcessMemory = Runtime.getRuntime().maxMemory();
long mmapBestMemory = (maxOsMemory.longValue() - maxProcessMemory) / 2;
FILE_MMAP_MAX_MEMORY.setValue(mmapBestMemory);
}
} catch (Exception e) {
// SUN JMX CLASS NOT AVAILABLE: CAN'T AUTO TUNE THE ENGINE
}
}
} else {
// 32 BIT, USE THE DEFAULT CONFIGURATION
}
System.setProperty(MEMORY_USE_UNSAFE.getKey(), MEMORY_USE_UNSAFE.getValueAsString());
System.setProperty(DIRECT_MEMORY_UNSAFE_MODE.getKey(), DIRECT_MEMORY_UNSAFE_MODE.getValueAsString());
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_config_OGlobalConfiguration.java |
Subsets and Splits