Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
1,591 |
public class ODistributedStorage implements OStorage, OFreezableStorage {
protected final OServer serverInstance;
protected final ODistributedServerManager dManager;
protected final OStorageEmbedded wrapped;
public ODistributedStorage(final OServer iServer, final OStorageEmbedded wrapped) {
this.serverInstance = iServer;
this.dManager = iServer.getDistributedManager();
this.wrapped = wrapped;
}
@Override
public boolean isDistributed() {
return true;
}
public Object command(final OCommandRequestText iCommand) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.command(iCommand);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(null))
// DON'T REPLICATE
return wrapped.command(iCommand);
final OCommandExecutor executor = OCommandManager.instance().getExecutor(iCommand);
executor.setProgressListener(iCommand.getProgressListener());
executor.parse(iCommand);
final OCommandExecutor exec = executor instanceof OCommandExecutorSQLDelegate ? ((OCommandExecutorSQLDelegate) executor)
.getDelegate() : executor;
boolean distribute = false;
if (OScenarioThreadLocal.INSTANCE.get() != RUN_MODE.RUNNING_DISTRIBUTED)
if (exec instanceof OCommandDistributedReplicateRequest)
distribute = ((OCommandDistributedReplicateRequest) exec).isReplicated();
if (!distribute)
// DON'T REPLICATE
return wrapped.executeCommand(iCommand, executor);
try {
// REPLICATE IT
// final OAbstractRemoteTask task = exec instanceof OCommandExecutorSQLResultsetAbstract ? new OMapReduceCommandTask(
// iCommand.getText()) : new OSQLCommandTask(iCommand.getText());
final OAbstractRemoteTask task = new OSQLCommandTask(iCommand.getText());
final Object result = dManager.sendRequest(getName(), null, task, EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed COMMAND", (Throwable) result);
return result;
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route COMMAND operation to the distributed node", e);
// UNREACHABLE
return null;
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(final int iDataSegmentId, final ORecordId iRecordId,
final byte[] iContent, final ORecordVersion iRecordVersion, final byte iRecordType, final int iMode,
final ORecordCallback<OClusterPosition> iCallback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.createRecord(iDataSegmentId, iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
Object result = null;
try {
// ASSIGN DESTINATION NODE
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.createRecord(iDataSegmentId, iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
// REPLICATE IT
result = dManager.sendRequest(getName(), clusterName,
new OCreateRecordTask(iRecordId, iContent, iRecordVersion, iRecordType), EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed CREATE_RECORD", (Throwable) result);
iRecordId.clusterPosition = ((OPhysicalPosition) result).clusterPosition;
return new OStorageOperationResult<OPhysicalPosition>((OPhysicalPosition) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route CREATE_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRecordId, final String iFetchPlan,
final boolean iIgnoreCache, final ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, iCallback, loadTombstones);
try {
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, iCallback, loadTombstones);
final ODistributedPartitioningStrategy strategy = dManager.getPartitioningStrategy(dConfig.getPartitionStrategy(clusterName));
final ODistributedPartition partition = strategy.getPartition(dManager, getName(), clusterName);
if (partition.getNodes().contains(dManager.getLocalNodeName()))
// LOCAL NODE OWNS THE DATA: GET IT LOCALLY BECAUSE IT'S FASTER
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, iCallback, loadTombstones);
// DISTRIBUTE IT
final Object result = dManager.sendRequest(getName(), clusterName, new OReadRecordTask(iRecordId), EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed READ_RECORD", (Throwable) result);
return new OStorageOperationResult<ORawBuffer>((ORawBuffer) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route READ_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId iRecordId, final byte[] iContent,
final ORecordVersion iVersion, final byte iRecordType, final int iMode, final ORecordCallback<ORecordVersion> iCallback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.updateRecord(iRecordId, iContent, iVersion, iRecordType, iMode, iCallback);
try {
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.updateRecord(iRecordId, iContent, iVersion, iRecordType, iMode, iCallback);
// REPLICATE IT
final Object result = dManager.sendRequest(getName(), clusterName, new OUpdateRecordTask(iRecordId, iContent, iVersion,
iRecordType), EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed UPDATE_RECORD", (Throwable) result);
// UPDATE LOCALLY
return new OStorageOperationResult<ORecordVersion>((ORecordVersion) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route UPDATE_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
public String getClusterNameByRID(final ORecordId iRid) {
final OCluster cluster = getClusterById(iRid.clusterId);
return cluster != null ? cluster.getName() : "*";
}
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRecordId, final ORecordVersion iVersion, final int iMode,
final ORecordCallback<Boolean> iCallback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
try {
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
// REPLICATE IT
final Object result = dManager.sendRequest(getName(), clusterName, new ODeleteRecordTask(iRecordId, iVersion),
EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed DELETE_RECORD", (Throwable) result);
return new OStorageOperationResult<Boolean>(true);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route DELETE_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
@Override
public boolean updateReplica(int dataSegmentId, ORecordId rid, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
return wrapped.updateReplica(dataSegmentId, rid, content, recordVersion, recordType);
}
@Override
public ORecordMetadata getRecordMetadata(ORID rid) {
return wrapped.getRecordMetadata(rid);
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
return wrapped.cleanOutRecord(recordId, recordVersion, iMode, callback);
}
public boolean existsResource(final String iName) {
return wrapped.existsResource(iName);
}
@SuppressWarnings("unchecked")
public <T> T removeResource(final String iName) {
return (T) wrapped.removeResource(iName);
}
public <T> T getResource(final String iName, final Callable<T> iCallback) {
return (T) wrapped.getResource(iName, iCallback);
}
public void open(final String iUserName, final String iUserPassword, final Map<String, Object> iProperties) {
wrapped.open(iUserName, iUserPassword, iProperties);
}
public void create(final Map<String, Object> iProperties) {
wrapped.create(iProperties);
}
public boolean exists() {
return wrapped.exists();
}
public void reload() {
wrapped.reload();
}
public void delete() {
wrapped.delete();
}
public void close() {
wrapped.close();
}
public void close(final boolean iForce) {
wrapped.close(iForce);
}
public boolean isClosed() {
return wrapped.isClosed();
}
public OLevel2RecordCache getLevel2Cache() {
return wrapped.getLevel2Cache();
}
public void commit(final OTransaction iTx, final Runnable callback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
wrapped.commit(iTx, callback);
else {
try {
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(null))
// DON'T REPLICATE
wrapped.commit(iTx, callback);
else {
final OTxTask txTask = new OTxTask();
for (ORecordOperation op : iTx.getCurrentRecordEntries()) {
final OAbstractRecordReplicatedTask task;
final ORecordInternal<?> record = op.getRecord();
switch (op.type) {
case ORecordOperation.CREATED:
task = new OCreateRecordTask((ORecordId) op.record.getIdentity(), record.toStream(), record.getRecordVersion(),
record.getRecordType());
break;
case ORecordOperation.UPDATED:
task = new OUpdateRecordTask((ORecordId) op.record.getIdentity(), record.toStream(), record.getRecordVersion(),
record.getRecordType());
break;
case ORecordOperation.DELETED:
task = new ODeleteRecordTask((ORecordId) op.record.getIdentity(), record.getRecordVersion());
break;
default:
continue;
}
txTask.add(task);
}
// REPLICATE IT
dManager.sendRequest(getName(), null, txTask, EXECUTION_MODE.RESPONSE);
}
} catch (Exception e) {
handleDistributedException("Cannot route TX operation against distributed node", e);
}
}
}
public void rollback(final OTransaction iTx) {
wrapped.rollback(iTx);
}
public OStorageConfiguration getConfiguration() {
return wrapped.getConfiguration();
}
public int getClusters() {
return wrapped.getClusters();
}
public Set<String> getClusterNames() {
return wrapped.getClusterNames();
}
public OCluster getClusterById(int iId) {
return wrapped.getClusterById(iId);
}
public Collection<? extends OCluster> getClusterInstances() {
return wrapped.getClusterInstances();
}
public int addCluster(final String iClusterType, final String iClusterName, final String iLocation,
final String iDataSegmentName, boolean forceListBased, final Object... iParameters) {
return wrapped.addCluster(iClusterType, iClusterName, iLocation, iDataSegmentName, false, iParameters);
}
public int addCluster(String iClusterType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
boolean forceListBased, Object... iParameters) {
return wrapped.addCluster(iClusterType, iClusterName, iRequestedId, iLocation, iDataSegmentName, forceListBased, iParameters);
}
public boolean dropCluster(final String iClusterName, final boolean iTruncate) {
return wrapped.dropCluster(iClusterName, iTruncate);
}
public boolean dropCluster(final int iId, final boolean iTruncate) {
return wrapped.dropCluster(iId, iTruncate);
}
public int addDataSegment(final String iDataSegmentName) {
return wrapped.addDataSegment(iDataSegmentName);
}
public int addDataSegment(final String iSegmentName, final String iDirectory) {
return wrapped.addDataSegment(iSegmentName, iDirectory);
}
public long count(final int iClusterId) {
return wrapped.count(iClusterId);
}
@Override
public long count(int iClusterId, boolean countTombstones) {
return wrapped.count(iClusterId, countTombstones);
}
public long count(final int[] iClusterIds) {
return wrapped.count(iClusterIds);
}
@Override
public long count(int[] iClusterIds, boolean countTombstones) {
return wrapped.count(iClusterIds, countTombstones);
}
public long getSize() {
return wrapped.getSize();
}
public long countRecords() {
return wrapped.countRecords();
}
public int getDefaultClusterId() {
return wrapped.getDefaultClusterId();
}
public void setDefaultClusterId(final int defaultClusterId) {
wrapped.setDefaultClusterId(defaultClusterId);
}
public int getClusterIdByName(String iClusterName) {
return wrapped.getClusterIdByName(iClusterName);
}
public String getClusterTypeByName(final String iClusterName) {
return wrapped.getClusterTypeByName(iClusterName);
}
public String getPhysicalClusterNameById(final int iClusterId) {
return wrapped.getPhysicalClusterNameById(iClusterId);
}
public boolean checkForRecordValidity(final OPhysicalPosition ppos) {
return wrapped.checkForRecordValidity(ppos);
}
public String getName() {
return wrapped.getName();
}
public String getURL() {
return wrapped.getURL();
}
public long getVersion() {
return wrapped.getVersion();
}
public void synch() {
wrapped.synch();
}
public int getUsers() {
return wrapped.getUsers();
}
public int addUser() {
return wrapped.addUser();
}
public int removeUser() {
return wrapped.removeUser();
}
public OClusterPosition[] getClusterDataRange(final int currentClusterId) {
return wrapped.getClusterDataRange(currentClusterId);
}
public <V> V callInLock(final Callable<V> iCallable, final boolean iExclusiveLock) {
return wrapped.callInLock(iCallable, iExclusiveLock);
}
@Override
public <V> V callInRecordLock(Callable<V> iCallable, ORID rid, boolean iExclusiveLock) {
return wrapped.callInRecordLock(iCallable, rid, iExclusiveLock);
}
public ODataSegment getDataSegmentById(final int iDataSegmentId) {
return wrapped.getDataSegmentById(iDataSegmentId);
}
public int getDataSegmentIdByName(final String iDataSegmentName) {
return wrapped.getDataSegmentIdByName(iDataSegmentName);
}
public boolean dropDataSegment(final String iName) {
return wrapped.dropDataSegment(iName);
}
public STATUS getStatus() {
return wrapped.getStatus();
}
@Override
public void checkForClusterPermissions(final String iClusterName) {
wrapped.checkForClusterPermissions(iClusterName);
}
@Override
public OPhysicalPosition[] higherPhysicalPositions(int currentClusterId, OPhysicalPosition entry) {
return wrapped.higherPhysicalPositions(currentClusterId, entry);
}
@Override
public OPhysicalPosition[] ceilingPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
return wrapped.ceilingPhysicalPositions(clusterId, physicalPosition);
}
@Override
public OPhysicalPosition[] floorPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
return wrapped.floorPhysicalPositions(clusterId, physicalPosition);
}
@Override
public OPhysicalPosition[] lowerPhysicalPositions(int currentClusterId, OPhysicalPosition entry) {
return wrapped.lowerPhysicalPositions(currentClusterId, entry);
}
@Override
public OSharedResourceAdaptiveExternal getLock() {
return wrapped.getLock();
}
public OStorage getUnderlying() {
return wrapped;
}
@Override
public String getType() {
return "distributed";
}
protected void handleDistributedException(final String iMessage, Exception e, Object... iParams) {
OLogManager.instance().error(this, iMessage, e, iParams);
final Throwable t = e.getCause();
if (t != null) {
if (t instanceof OException)
throw (OException) t;
else if (t.getCause() instanceof OException)
throw (OException) t.getCause();
}
throw new OStorageException(String.format(iMessage, iParams), e);
}
@Override
public void freeze(boolean throwException) {
getFreezableStorage().freeze(throwException);
}
@Override
public void release() {
getFreezableStorage().release();
}
@Override
public void backup(OutputStream out, Map<String, Object> options, Callable<Object> callable) throws IOException {
wrapped.backup(out, options, callable);
}
@Override
public void restore(InputStream in, Map<String, Object> options, Callable<Object> callable) throws IOException {
wrapped.restore(in, options, callable);
}
private OFreezableStorage getFreezableStorage() {
if (wrapped instanceof OFreezableStorage)
return ((OFreezableStorage) wrapped);
else
throw new UnsupportedOperationException("Storage engine " + wrapped.getType() + " does not support freeze operation");
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_distributed_ODistributedStorage.java
|
4,522 |
private class ExpiredDocsCollector extends Collector {
private final MapperService mapperService;
private AtomicReaderContext context;
private List<DocToPurge> docsToPurge = new ArrayList<DocToPurge>();
public ExpiredDocsCollector(String index) {
mapperService = indicesService.indexService(index).mapperService();
}
public void setScorer(Scorer scorer) {
}
public boolean acceptsDocsOutOfOrder() {
return true;
}
public void collect(int doc) {
try {
UidAndRoutingFieldsVisitor fieldsVisitor = new UidAndRoutingFieldsVisitor();
context.reader().document(doc, fieldsVisitor);
Uid uid = fieldsVisitor.uid();
final long version = Versions.loadVersion(context.reader(), new Term(UidFieldMapper.NAME, uid.toBytesRef()));
docsToPurge.add(new DocToPurge(uid.type(), uid.id(), version, fieldsVisitor.routing()));
} catch (Exception e) {
logger.trace("failed to collect doc", e);
}
}
public void setNextReader(AtomicReaderContext context) throws IOException {
this.context = context;
}
public List<DocToPurge> getDocsToPurge() {
return this.docsToPurge;
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_ttl_IndicesTTLService.java
|
154 |
public class ODoubleSerializer implements OBinarySerializer<Double> {
private static final OBinaryConverter CONVERTER = OBinaryConverterFactory.getConverter();
public static ODoubleSerializer INSTANCE = new ODoubleSerializer();
public static final byte ID = 6;
/**
* size of double value in bytes
*/
public static final int DOUBLE_SIZE = 8;
public int getObjectSize(Double object, Object... hints) {
return DOUBLE_SIZE;
}
public void serialize(Double object, byte[] stream, int startPosition, Object... hints) {
OLongSerializer.INSTANCE.serialize(Double.doubleToLongBits(object), stream, startPosition);
}
public Double deserialize(byte[] stream, int startPosition) {
return Double.longBitsToDouble(OLongSerializer.INSTANCE.deserialize(stream, startPosition));
}
public int getObjectSize(byte[] stream, int startPosition) {
return DOUBLE_SIZE;
}
public byte getId() {
return ID;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return DOUBLE_SIZE;
}
public void serializeNative(Double object, byte[] stream, int startPosition, Object... hints) {
CONVERTER.putLong(stream, startPosition, Double.doubleToLongBits(object), ByteOrder.nativeOrder());
}
public Double deserializeNative(byte[] stream, int startPosition) {
return Double.longBitsToDouble(CONVERTER.getLong(stream, startPosition, ByteOrder.nativeOrder()));
}
@Override
public void serializeInDirectMemory(Double object, ODirectMemoryPointer pointer, long offset, Object... hints) {
pointer.setLong(offset, Double.doubleToLongBits(object));
}
@Override
public Double deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
return Double.longBitsToDouble(pointer.getLong(offset));
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return DOUBLE_SIZE;
}
public boolean isFixedLength() {
return true;
}
public int getFixedLength() {
return DOUBLE_SIZE;
}
@Override
public Double preprocess(Double value, Object... hints) {
return value;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_serialization_types_ODoubleSerializer.java
|
1,129 |
public class FulfillmentGroupStatusType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, FulfillmentGroupStatusType> TYPES = new LinkedHashMap<String, FulfillmentGroupStatusType>();
/**
* Use FULFILLED, PARTIALLY_FULFILLED, DELIVERED, or PARTIALLY_DELIVERED
* @deprecated
*/
@Deprecated
public static final FulfillmentGroupStatusType SHIPPED = new FulfillmentGroupStatusType("SHIPPED", "Shipped");
/**
* PROCESSING: Used to indicate that the fulfillment group is being processed. For example, during pick or pack processes
* in a warehouse.
*/
public static final FulfillmentGroupStatusType PROCESSING = new FulfillmentGroupStatusType("PROCESSING", "Processing");
/**
* FULFILLED: Used to indicate that the Fulfillment Group is completely fulfilled (e.g. shipped, downloaded, etc.). For some systems,
* this will be the final status on a fulfillment group. For others that want to differentiate between FULFILLED and DELIVERED, usually
* to differentiate between items that have been shipped vs. items that have been received by the customer.
*/
public static final FulfillmentGroupStatusType FULFILLED = new FulfillmentGroupStatusType("FULFILLED", "Fulfilled");
/**
* PARTIALLY_FULFILLED: Used to indicate that one or more items has been fulfilled or partially fulfilled, but that there
* are some items in the fulfillment group that are not fulfilled.
*/
public static final FulfillmentGroupStatusType PARTIALLY_FULFILLED = new FulfillmentGroupStatusType("PARTIALLY_FULFILLED", "Partially Fulfilled");
/**
* DELIVERED: Used to indicate that all items in the fulfillment group have been delivered. This will generally only be used when there is some
* integration with a shipping or fulfillment system to indicate that an item has actually been received by the customer.
*/
public static final FulfillmentGroupStatusType DELIVERED = new FulfillmentGroupStatusType("DELIVERED", "Delivered");
/**
* PARTIALLY_DELIVERED: Indicates that an item or a FulfillemntGroup has been partially received by the customer.
*/
public static final FulfillmentGroupStatusType PARTIALLY_DELIVERED = new FulfillmentGroupStatusType("PARTIALLY_DELIVERED", "Partially Delivered");
public static FulfillmentGroupStatusType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public FulfillmentGroupStatusType() {
//do nothing
}
public FulfillmentGroupStatusType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
@Override
public String getType() {
return type;
}
@Override
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
FulfillmentGroupStatusType other = (FulfillmentGroupStatusType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_type_FulfillmentGroupStatusType.java
|
128 |
final class ClientServiceProxy implements ClientService {
private final ClientEngineImpl clientEngine;
ClientServiceProxy(ClientEngineImpl clientEngine) {
this.clientEngine = clientEngine;
}
@Override
public Collection<Client> getConnectedClients() {
return clientEngine.getClients();
}
@Override
public String addClientListener(ClientListener clientListener) {
return clientEngine.addClientListener(clientListener);
}
@Override
public boolean removeClientListener(String registrationId) {
return clientEngine.removeClientListener(registrationId);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_ClientServiceProxy.java
|
125 |
public enum Mapping {
DEFAULT,
TEXT,
STRING,
TEXTSTRING;
/**
* Returns the mapping as a parameter so that it can be passed to {@link TitanManagement#addIndexKey(TitanGraphIndex, com.thinkaurelius.titan.core.PropertyKey, Parameter[])}
* @return
*/
public Parameter getParameter() {
return ParameterType.MAPPING.getParameter(this);
}
//------------ USED INTERNALLY -----------
public static Mapping getMapping(KeyInformation information) {
Object value = ParameterType.MAPPING.findParameter(information.getParameters(),null);
if (value==null) return DEFAULT;
else {
Preconditions.checkArgument((value instanceof Mapping || value instanceof String),"Invalid mapping specified: %s",value);
if (value instanceof String) {
value = Mapping.valueOf(value.toString().toUpperCase());
}
return (Mapping)value;
}
}
public static Mapping getMapping(String store, String key, KeyInformation.IndexRetriever informations) {
KeyInformation ki = informations.get(store, key);
Preconditions.checkArgument(ki!=null,"Could not find key information for: %s",key);
return getMapping(ki);
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_schema_Mapping.java
|
17 |
final class EntryIterator extends AbstractEntryIterator<K, V, Map.Entry<K, V>> {
EntryIterator(final OMVRBTreeEntry<K, V> first) {
super(first);
}
public Map.Entry<K, V> next() {
return nextEntry();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
5,361 |
public class InternalMin extends MetricsAggregation.SingleValue implements Min {
public final static Type TYPE = new Type("min");
public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalMin readResult(StreamInput in) throws IOException {
InternalMin result = new InternalMin();
result.readFrom(in);
return result;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
private double min;
InternalMin() {} // for serialization
public InternalMin(String name, double min) {
super(name);
this.min = min;
}
@Override
public double value() {
return min;
}
public double getValue() {
return min;
}
@Override
public Type type() {
return TYPE;
}
@Override
public InternalMin reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return (InternalMin) aggregations.get(0);
}
InternalMin reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
reduced = (InternalMin) aggregation;
} else {
reduced.min = Math.min(reduced.min, ((InternalMin) aggregation).min);
}
}
if (reduced != null) {
return reduced;
}
return (InternalMin) aggregations.get(0);
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
valueFormatter = ValueFormatterStreams.readOptional(in);
min = in.readDouble();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
ValueFormatterStreams.writeOptional(valueFormatter, out);
out.writeDouble(min);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
boolean hasValue = !Double.isInfinite(min);
builder.field(CommonFields.VALUE, hasValue ? min : null);
if (hasValue && valueFormatter != null) {
builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(min));
}
builder.endObject();
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_min_InternalMin.java
|
1,132 |
public class OrderStatus implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, OrderStatus> TYPES = new LinkedHashMap<String, OrderStatus>();
public static final OrderStatus NAMED = new OrderStatus("NAMED", "Named");
public static final OrderStatus QUOTE = new OrderStatus("QUOTE", "Quote");
public static final OrderStatus IN_PROCESS = new OrderStatus("IN_PROCESS", "In Process");
public static final OrderStatus SUBMITTED = new OrderStatus("SUBMITTED", "Submitted");
public static final OrderStatus CANCELLED = new OrderStatus("CANCELLED", "Cancelled");
public static OrderStatus getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public OrderStatus() {
//do nothing
}
public OrderStatus(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OrderStatus other = (OrderStatus) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_type_OrderStatus.java
|
5,313 |
public class StringTermsAggregator extends BucketsAggregator {
private final ValuesSource valuesSource;
private final InternalOrder order;
private final int requiredSize;
private final int shardSize;
private final long minDocCount;
protected final BytesRefHash bucketOrds;
private final IncludeExclude includeExclude;
private BytesValues values;
public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, long estimatedBucketCount,
InternalOrder order, int requiredSize, int shardSize, long minDocCount,
IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent) {
super(name, BucketAggregationMode.PER_BUCKET, factories, estimatedBucketCount, aggregationContext, parent);
this.valuesSource = valuesSource;
this.order = InternalOrder.validate(order, this);
this.requiredSize = requiredSize;
this.shardSize = shardSize;
this.minDocCount = minDocCount;
this.includeExclude = includeExclude;
bucketOrds = new BytesRefHash(estimatedBucketCount, aggregationContext.pageCacheRecycler());
}
@Override
public boolean shouldCollect() {
return true;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
values = valuesSource.bytesValues();
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
assert owningBucketOrdinal == 0;
final int valuesCount = values.setDocument(doc);
for (int i = 0; i < valuesCount; ++i) {
final BytesRef bytes = values.nextValue();
if (includeExclude != null && !includeExclude.accept(bytes)) {
continue;
}
final int hash = values.currentValueHash();
assert hash == bytes.hashCode();
long bucketOrdinal = bucketOrds.add(bytes, hash);
if (bucketOrdinal < 0) { // already seen
bucketOrdinal = - 1 - bucketOrdinal;
}
collectBucket(doc, bucketOrdinal);
}
}
/** Returns an iterator over the field data terms. */
private static Iterator<BytesRef> terms(final BytesValues.WithOrdinals bytesValues, boolean reverse) {
final Ordinals.Docs ordinals = bytesValues.ordinals();
if (reverse) {
return new UnmodifiableIterator<BytesRef>() {
long i = ordinals.getMaxOrd() - 1;
@Override
public boolean hasNext() {
return i >= Ordinals.MIN_ORDINAL;
}
@Override
public BytesRef next() {
bytesValues.getValueByOrd(i--);
return bytesValues.copyShared();
}
};
} else {
return new UnmodifiableIterator<BytesRef>() {
long i = Ordinals.MIN_ORDINAL;
@Override
public boolean hasNext() {
return i < ordinals.getMaxOrd();
}
@Override
public BytesRef next() {
bytesValues.getValueByOrd(i++);
return bytesValues.copyShared();
}
};
}
}
@Override
public StringTerms buildAggregation(long owningBucketOrdinal) {
assert owningBucketOrdinal == 0;
if (minDocCount == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < requiredSize)) {
// we need to fill-in the blanks
List<BytesValues.WithOrdinals> valuesWithOrdinals = Lists.newArrayList();
for (AtomicReaderContext ctx : context.searchContext().searcher().getTopReaderContext().leaves()) {
context.setNextReader(ctx);
final BytesValues values = valuesSource.bytesValues();
if (values instanceof BytesValues.WithOrdinals) {
valuesWithOrdinals.add((BytesValues.WithOrdinals) values);
} else {
// brute force
for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
final int valueCount = values.setDocument(docId);
for (int i = 0; i < valueCount; ++i) {
final BytesRef term = values.nextValue();
if (includeExclude == null || includeExclude.accept(term)) {
bucketOrds.add(term, values.currentValueHash());
}
}
}
}
}
// With ordinals we can be smarter and add just as many terms as necessary to the hash table
// For instance, if sorting by term asc, we only need to get the first `requiredSize` terms as other terms would
// either be excluded by the priority queue or at reduce time.
if (valuesWithOrdinals.size() > 0) {
final boolean reverse = order == InternalOrder.TERM_DESC;
Comparator<BytesRef> comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
if (reverse) {
comparator = Collections.reverseOrder(comparator);
}
Iterator<? extends BytesRef>[] iterators = new Iterator[valuesWithOrdinals.size()];
for (int i = 0; i < valuesWithOrdinals.size(); ++i) {
iterators[i] = terms(valuesWithOrdinals.get(i), reverse);
}
Iterator<BytesRef> terms = Iterators2.mergeSorted(Arrays.asList(iterators), comparator, true);
if (includeExclude != null) {
terms = Iterators.filter(terms, new Predicate<BytesRef>() {
@Override
public boolean apply(BytesRef input) {
return includeExclude.accept(input);
}
});
}
if (order == InternalOrder.COUNT_ASC) {
// let's try to find `shardSize` terms that matched no hit
// this one needs shardSize and not requiredSize because even though terms have a count of 0 here,
// they might have higher counts on other shards
for (int added = 0; added < shardSize && terms.hasNext(); ) {
if (bucketOrds.add(terms.next()) >= 0) {
++added;
}
}
} else if (order == InternalOrder.COUNT_DESC) {
// add terms until there are enough buckets
while (bucketOrds.size() < requiredSize && terms.hasNext()) {
bucketOrds.add(terms.next());
}
} else if (order == InternalOrder.TERM_ASC || order == InternalOrder.TERM_DESC) {
// add the `requiredSize` least terms
for (int i = 0; i < requiredSize && terms.hasNext(); ++i) {
bucketOrds.add(terms.next());
}
} else {
// other orders (aggregations) are not optimizable
while (terms.hasNext()) {
bucketOrds.add(terms.next());
}
}
}
}
final int size = (int) Math.min(bucketOrds.size(), shardSize);
BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(this));
StringTerms.Bucket spare = null;
for (int i = 0; i < bucketOrds.size(); i++) {
if (spare == null) {
spare = new StringTerms.Bucket(new BytesRef(), 0, null);
}
bucketOrds.get(i, spare.termBytes);
spare.docCount = bucketDocCount(i);
spare.bucketOrd = i;
spare = (StringTerms.Bucket) ordered.insertWithOverflow(spare);
}
final InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
for (int i = ordered.size() - 1; i >= 0; --i) {
final StringTerms.Bucket bucket = (StringTerms.Bucket) ordered.pop();
bucket.aggregations = bucketAggregations(bucket.bucketOrd);
list[i] = bucket;
}
return new StringTerms(name, order, requiredSize, minDocCount, Arrays.asList(list));
}
@Override
public StringTerms buildEmptyAggregation() {
return new StringTerms(name, order, requiredSize, minDocCount, Collections.<InternalTerms.Bucket>emptyList());
}
@Override
public void doRelease() {
Releasables.release(bucketOrds);
}
/**
* Extension of StringTermsAggregator that caches bucket ords using terms ordinals.
*/
public static class WithOrdinals extends StringTermsAggregator {
private final BytesValuesSource.WithOrdinals valuesSource;
private BytesValues.WithOrdinals bytesValues;
private Ordinals.Docs ordinals;
private LongArray ordinalToBucket;
public WithOrdinals(String name, AggregatorFactories factories, BytesValuesSource.WithOrdinals valuesSource, long esitmatedBucketCount,
InternalOrder order, int requiredSize, int shardSize, long minDocCount, AggregationContext aggregationContext, Aggregator parent) {
super(name, factories, valuesSource, esitmatedBucketCount, order, requiredSize, shardSize, minDocCount, null, aggregationContext, parent);
this.valuesSource = valuesSource;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
bytesValues = valuesSource.bytesValues();
ordinals = bytesValues.ordinals();
final long maxOrd = ordinals.getMaxOrd();
if (ordinalToBucket == null || ordinalToBucket.size() < maxOrd) {
if (ordinalToBucket != null) {
ordinalToBucket.release();
}
ordinalToBucket = BigArrays.newLongArray(BigArrays.overSize(maxOrd), context().pageCacheRecycler(), false);
}
ordinalToBucket.fill(0, maxOrd, -1L);
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
assert owningBucketOrdinal == 0 : "this is a per_bucket aggregator";
final int valuesCount = ordinals.setDocument(doc);
for (int i = 0; i < valuesCount; ++i) {
final long ord = ordinals.nextOrd();
long bucketOrd = ordinalToBucket.get(ord);
if (bucketOrd < 0) { // unlikely condition on a low-cardinality field
final BytesRef bytes = bytesValues.getValueByOrd(ord);
final int hash = bytesValues.currentValueHash();
assert hash == bytes.hashCode();
bucketOrd = bucketOrds.add(bytes, hash);
if (bucketOrd < 0) { // already seen in another segment
bucketOrd = - 1 - bucketOrd;
}
ordinalToBucket.set(ord, bucketOrd);
}
collectBucket(doc, bucketOrd);
}
}
@Override
public void doRelease() {
Releasables.release(bucketOrds, ordinalToBucket);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_terms_StringTermsAggregator.java
|
115 |
public interface Combiner<M> {
/**
* Combines two state into a combined state.
*
* @param m1
* @param m2
* @return The combination of m1 and m2
*/
public M combine(M m1, M m2);
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_olap_Combiner.java
|
526 |
public class OSerializationException extends OException {
private static final long serialVersionUID = -3003977236233691448L;
public OSerializationException(String string) {
super(string);
}
public OSerializationException(String message, Throwable cause) {
super(message, cause);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_exception_OSerializationException.java
|
1,949 |
public class MapPutRequest extends KeyBasedClientRequest implements Portable, SecureRequest {
protected Data key;
protected Data value;
protected String name;
protected long threadId;
protected long ttl;
protected transient long startTime;
public MapPutRequest() {
}
public MapPutRequest(String name, Data key, Data value, long threadId, long ttl) {
this.name = name;
this.key = key;
this.value = value;
this.threadId = threadId;
this.ttl = ttl;
}
public MapPutRequest(String name, Data key, Data value, long threadId) {
this.name = name;
this.key = key;
this.value = value;
this.threadId = threadId;
this.ttl = -1;
}
public int getFactoryId() {
return MapPortableHook.F_ID;
}
public int getClassId() {
return MapPortableHook.PUT;
}
protected Object getKey() {
return key;
}
@Override
protected void beforeProcess() {
startTime = System.currentTimeMillis();
}
@Override
protected void afterResponse() {
final long latency = System.currentTimeMillis() - startTime;
final MapService mapService = getService();
MapContainer mapContainer = mapService.getMapContainer(name);
if (mapContainer.getMapConfig().isStatisticsEnabled()) {
mapService.getLocalMapStatsImpl(name).incrementPuts(latency);
}
}
@Override
protected Operation prepareOperation() {
PutOperation op = new PutOperation(name, key, value, ttl);
op.setThreadId(threadId);
return op;
}
public String getServiceName() {
return MapService.SERVICE_NAME;
}
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
writer.writeLong("t", threadId);
writer.writeLong("ttl", ttl);
final ObjectDataOutput out = writer.getRawDataOutput();
key.writeData(out);
value.writeData(out);
}
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
threadId = reader.readLong("t");
ttl = reader.readLong("ttl");
final ObjectDataInput in = reader.getRawDataInput();
key = new Data();
key.readData(in);
value = new Data();
value.readData(in);
}
public Permission getRequiredPermission() {
return new MapPermission(name, ActionConstants.ACTION_PUT);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_client_MapPutRequest.java
|
729 |
private static class FindInvocationsVisitor extends Visitor {
private Declaration declaration;
private final Set<Tree.PositionalArgumentList> posResults =
new HashSet<Tree.PositionalArgumentList>();
private final Set<Tree.NamedArgumentList> namedResults =
new HashSet<Tree.NamedArgumentList>();
Set<Tree.PositionalArgumentList> getPositionalArgLists() {
return posResults;
}
Set<Tree.NamedArgumentList> getNamedArgLists() {
return namedResults;
}
private FindInvocationsVisitor(Declaration declaration) {
this.declaration=declaration;
}
@Override
public void visit(Tree.InvocationExpression that) {
super.visit(that);
Tree.Primary primary = that.getPrimary();
if (primary instanceof Tree.MemberOrTypeExpression) {
if (((Tree.MemberOrTypeExpression) primary).getDeclaration()
.refines(declaration)) {
Tree.PositionalArgumentList pal = that.getPositionalArgumentList();
if (pal!=null) {
posResults.add(pal);
}
Tree.NamedArgumentList nal = that.getNamedArgumentList();
if (nal!=null) {
namedResults.add(nal);
}
}
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_refactor_ChangeParametersRefactoring.java
|
1,121 |
public class ScriptsScoreBenchmark extends BasicScriptBenchmark {
public static void main(String[] args) throws Exception {
int minTerms = 1;
int maxTerms = 50;
int maxIter = 100;
int warmerIter = 10;
boolean runMVEL = false;
init(maxTerms);
List<Results> allResults = new ArrayList<BasicScriptBenchmark.Results>();
Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
Client client = node1.client();
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
indexData(10000, client, false);
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
Results results = new Results();
results.init(maxTerms - minTerms, "native tfidf script score dense posting list",
"Results for native script score with dense posting list:", "black", "--");
// init native script searches
List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
results = new Results();
results.init(maxTerms - minTerms, "term query dense posting list", "Results for term query with dense posting lists:", "green",
"--");
// init term queries
searchRequests = initTermQueries(minTerms, maxTerms);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
if (runMVEL) {
results = new Results();
results.init(maxTerms - minTerms, "mvel tfidf dense posting list", "Results for mvel score with dense posting list:", "red",
"--");
// init native script searches
searchRequests = initNativeSearchRequests(
minTerms,
maxTerms,
"score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
false);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
}
indexData(10000, client, true);
results = new Results();
results.init(maxTerms - minTerms, "native tfidf script score sparse posting list",
"Results for native script scorewith sparse posting list:", "black", "-.");
// init native script searches
searchRequests = initNativeSearchRequests(minTerms, maxTerms, NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
results = new Results();
results.init(maxTerms - minTerms, "term query sparse posting list", "Results for term query with sparse posting lists:", "green",
"-.");
// init term queries
searchRequests = initTermQueries(minTerms, maxTerms);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
if (runMVEL) {
results = new Results();
results.init(maxTerms - minTerms, "mvel tfidf sparse posting list", "Results for mvel score with sparse posting list:", "red",
"-.");
// init native script searches
searchRequests = initNativeSearchRequests(
minTerms,
maxTerms,
"score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
false);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
}
printOctaveScript(allResults, args);
client.close();
node1.close();
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_scripts_score_ScriptsScoreBenchmark.java
|
1,378 |
public static class Builder {
private String index;
private State state = State.OPEN;
private long version = 1;
private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
private final ImmutableOpenMap.Builder<String, Custom> customs;
public Builder(String index) {
this.index = index;
this.mappings = ImmutableOpenMap.builder();
this.aliases = ImmutableOpenMap.builder();
this.customs = ImmutableOpenMap.builder();
}
public Builder(IndexMetaData indexMetaData) {
this.index = indexMetaData.index();
this.state = indexMetaData.state;
this.version = indexMetaData.version;
this.settings = indexMetaData.settings();
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
}
public String index() {
return index;
}
public Builder index(String index) {
this.index = index;
return this;
}
public Builder numberOfShards(int numberOfShards) {
settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
return this;
}
public int numberOfShards() {
return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
}
public Builder numberOfReplicas(int numberOfReplicas) {
settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
return this;
}
public int numberOfReplicas() {
return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
}
public Builder settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
public Builder settings(Settings settings) {
this.settings = settings;
return this;
}
public MappingMetaData mapping(String type) {
return mappings.get(type);
}
public Builder removeMapping(String mappingType) {
mappings.remove(mappingType);
return this;
}
public Builder putMapping(String type, String source) throws IOException {
XContentParser parser = XContentFactory.xContent(source).createParser(source);
try {
putMapping(new MappingMetaData(type, parser.mapOrdered()));
} finally {
parser.close();
}
return this;
}
public Builder putMapping(MappingMetaData mappingMd) {
mappings.put(mappingMd.type(), mappingMd);
return this;
}
public Builder state(State state) {
this.state = state;
return this;
}
public Builder putAlias(AliasMetaData aliasMetaData) {
aliases.put(aliasMetaData.alias(), aliasMetaData);
return this;
}
public Builder putAlias(AliasMetaData.Builder aliasMetaData) {
aliases.put(aliasMetaData.alias(), aliasMetaData.build());
return this;
}
public Builder removerAlias(String alias) {
aliases.remove(alias);
return this;
}
public Builder putCustom(String type, Custom customIndexMetaData) {
this.customs.put(type, customIndexMetaData);
return this;
}
public Builder removeCustom(String type) {
this.customs.remove(type);
return this;
}
public Custom getCustom(String type) {
return this.customs.get(type);
}
public long version() {
return this.version;
}
public Builder version(long version) {
this.version = version;
return this;
}
public IndexMetaData build() {
ImmutableOpenMap.Builder<String, AliasMetaData> tmpAliases = aliases;
Settings tmpSettings = settings;
// For backward compatibility
String[] legacyAliases = settings.getAsArray("index.aliases");
if (legacyAliases.length > 0) {
tmpAliases = ImmutableOpenMap.builder();
for (String alias : legacyAliases) {
AliasMetaData aliasMd = AliasMetaData.newAliasMetaDataBuilder(alias).build();
tmpAliases.put(alias, aliasMd);
}
tmpAliases.putAll(aliases);
// Remove index.aliases from settings once they are migrated to the new data structure
tmpSettings = ImmutableSettings.settingsBuilder().put(settings).putArray("index.aliases").build();
}
// update default mapping on the MappingMetaData
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
MappingMetaData defaultMapping = mappings.get(MapperService.DEFAULT_MAPPING);
for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
cursor.value.updateDefaultMapping(defaultMapping);
}
}
return new IndexMetaData(index, version, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build());
}
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("version", indexMetaData.version());
builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
boolean binary = params.paramAsBoolean("binary", false);
builder.startObject("settings");
for (Map.Entry<String, String> entry : indexMetaData.settings().getAsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
builder.endObject();
builder.startArray("mappings");
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
if (binary) {
builder.value(cursor.value.source().compressed());
} else {
byte[] data = cursor.value.source().uncompressed();
XContentParser parser = XContentFactory.xContent(data).createParser(data);
Map<String, Object> mapping = parser.mapOrdered();
parser.close();
builder.map(mapping);
}
}
builder.endArray();
for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.customs()) {
builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
builder.startObject("aliases");
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
}
builder.endObject();
builder.endObject();
}
public static IndexMetaData fromXContent(XContentParser parser) throws IOException {
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
parser.nextToken();
}
Builder builder = new Builder(parser.currentName());
String currentFieldName = null;
XContentParser.Token token = parser.nextToken();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("settings".equals(currentFieldName)) {
builder.settings(ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
} else if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String mappingType = currentFieldName;
Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
builder.putMapping(new MappingMetaData(mappingType, mappingSource));
}
}
} else if ("aliases".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
}
} else {
// check if its a custom index metadata
Custom.Factory<Custom> factory = lookupFactory(currentFieldName);
if (factory == null) {
//TODO warn
parser.skipChildren();
} else {
builder.putCustom(factory.type(), factory.fromXContent(parser));
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
builder.putMapping(new MappingMetaData(new CompressedString(parser.binaryValue())));
} else {
Map<String, Object> mapping = parser.mapOrdered();
if (mapping.size() == 1) {
String mappingType = mapping.keySet().iterator().next();
builder.putMapping(new MappingMetaData(mappingType, mapping));
}
}
}
}
} else if (token.isValue()) {
if ("state".equals(currentFieldName)) {
builder.state(State.fromString(parser.text()));
} else if ("version".equals(currentFieldName)) {
builder.version(parser.longValue());
}
}
}
return builder.build();
}
public static IndexMetaData readFrom(StreamInput in) throws IOException {
Builder builder = new Builder(in.readString());
builder.version(in.readLong());
builder.state(State.fromId(in.readByte()));
builder.settings(readSettingsFromStream(in));
int mappingsSize = in.readVInt();
for (int i = 0; i < mappingsSize; i++) {
MappingMetaData mappingMd = MappingMetaData.readFrom(in);
builder.putMapping(mappingMd);
}
int aliasesSize = in.readVInt();
for (int i = 0; i < aliasesSize; i++) {
AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
builder.putAlias(aliasMd);
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
return builder.build();
}
public static void writeTo(IndexMetaData indexMetaData, StreamOutput out) throws IOException {
out.writeString(indexMetaData.index());
out.writeLong(indexMetaData.version());
out.writeByte(indexMetaData.state().id());
writeSettingsToStream(indexMetaData.settings(), out);
out.writeVInt(indexMetaData.mappings().size());
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
MappingMetaData.writeTo(cursor.value, out);
}
out.writeVInt(indexMetaData.aliases().size());
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
AliasMetaData.Builder.writeTo(cursor.value, out);
}
out.writeVInt(indexMetaData.customs().size());
for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.customs()) {
out.writeString(cursor.key);
lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_metadata_IndexMetaData.java
|
324 |
public class NodesInfoAction extends ClusterAction<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
public static final NodesInfoAction INSTANCE = new NodesInfoAction();
public static final String NAME = "cluster/nodes/info";
private NodesInfoAction() {
super(NAME);
}
@Override
public NodesInfoResponse newResponse() {
return new NodesInfoResponse();
}
@Override
public NodesInfoRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new NodesInfoRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_NodesInfoAction.java
|
36 |
public class SetCommandParser extends TypeAwareCommandParser {
public SetCommandParser(TextCommandConstants.TextCommandType type) {
super(type);
}
public TextCommand parser(SocketTextReader socketTextReader, String cmd, int space) {
StringTokenizer st = new StringTokenizer(cmd);
st.nextToken();
String key = null;
int valueLen = 0;
int flag = 0;
int expiration = 0;
boolean noReply = false;
if (st.hasMoreTokens()) {
key = st.nextToken();
} else {
return new ErrorCommand(ERROR_CLIENT);
}
if (st.hasMoreTokens()) {
flag = Integer.parseInt(st.nextToken());
} else {
return new ErrorCommand(ERROR_CLIENT);
}
if (st.hasMoreTokens()) {
expiration = Integer.parseInt(st.nextToken());
} else {
return new ErrorCommand(ERROR_CLIENT);
}
if (st.hasMoreTokens()) {
valueLen = Integer.parseInt(st.nextToken());
} else {
return new ErrorCommand(ERROR_CLIENT);
}
if (st.hasMoreTokens()) {
noReply = "noreply".equals(st.nextToken());
}
return new SetCommand(type, key, flag, expiration, valueLen, noReply);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_SetCommandParser.java
|
640 |
public class ShardStatus extends BroadcastShardOperationResponse {
private ShardRouting shardRouting;
IndexShardState state;
ByteSizeValue storeSize;
long translogId = -1;
long translogOperations = -1;
DocsStatus docs;
MergeStats mergeStats;
RefreshStats refreshStats;
FlushStats flushStats;
PeerRecoveryStatus peerRecoveryStatus;
GatewayRecoveryStatus gatewayRecoveryStatus;
GatewaySnapshotStatus gatewaySnapshotStatus;
ShardStatus() {
}
ShardStatus(ShardRouting shardRouting) {
super(shardRouting.index(), shardRouting.id());
this.shardRouting = shardRouting;
}
/**
* The shard routing information (cluster wide shard state).
*/
public ShardRouting getShardRouting() {
return this.shardRouting;
}
/**
* The shard state (index/local state).
*/
public IndexShardState getState() {
return state;
}
/**
* The current size of the shard index storage.
*/
public ByteSizeValue getStoreSize() {
return storeSize;
}
/**
* The transaction log id.
*/
public long getTranslogId() {
return translogId;
}
/**
* The number of transaction operations in the transaction log.
*/
public long getTranslogOperations() {
return translogOperations;
}
/**
* Docs level information for the shard index, <tt>null</tt> if not applicable.
*/
public DocsStatus getDocs() {
return docs;
}
/**
* Index merge statistics.
*/
public MergeStats getMergeStats() {
return this.mergeStats;
}
/**
* Refresh stats.
*/
public RefreshStats getRefreshStats() {
return this.refreshStats;
}
public FlushStats getFlushStats() {
return this.flushStats;
}
/**
* Peer recovery status (<tt>null</tt> if not applicable). Both real time if an on going recovery
* is in progress and summary once it is done.
*/
public PeerRecoveryStatus getPeerRecoveryStatus() {
return peerRecoveryStatus;
}
/**
* Gateway recovery status (<tt>null</tt> if not applicable). Both real time if an on going recovery
* is in progress adn summary once it is done.
*/
public GatewayRecoveryStatus getGatewayRecoveryStatus() {
return gatewayRecoveryStatus;
}
/**
* The current on going snapshot to the gateway or the last one if none is on going.
*/
public GatewaySnapshotStatus getGatewaySnapshotStatus() {
return gatewaySnapshotStatus;
}
public static ShardStatus readIndexShardStatus(StreamInput in) throws IOException {
ShardStatus shardStatus = new ShardStatus();
shardStatus.readFrom(in);
return shardStatus;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardRouting.writeTo(out);
out.writeByte(state.id());
if (storeSize == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
storeSize.writeTo(out);
}
out.writeLong(translogId);
out.writeLong(translogOperations);
if (docs == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeLong(docs.getNumDocs());
out.writeLong(docs.getMaxDoc());
out.writeLong(docs.getDeletedDocs());
}
if (peerRecoveryStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(peerRecoveryStatus.stage.value());
out.writeVLong(peerRecoveryStatus.startTime);
out.writeVLong(peerRecoveryStatus.time);
out.writeVLong(peerRecoveryStatus.indexSize);
out.writeVLong(peerRecoveryStatus.reusedIndexSize);
out.writeVLong(peerRecoveryStatus.recoveredIndexSize);
out.writeVLong(peerRecoveryStatus.recoveredTranslogOperations);
}
if (gatewayRecoveryStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(gatewayRecoveryStatus.stage.value());
out.writeVLong(gatewayRecoveryStatus.startTime);
out.writeVLong(gatewayRecoveryStatus.time);
out.writeVLong(gatewayRecoveryStatus.indexSize);
out.writeVLong(gatewayRecoveryStatus.reusedIndexSize);
out.writeVLong(gatewayRecoveryStatus.recoveredIndexSize);
out.writeVLong(gatewayRecoveryStatus.recoveredTranslogOperations);
}
if (gatewaySnapshotStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(gatewaySnapshotStatus.stage.value());
out.writeVLong(gatewaySnapshotStatus.startTime);
out.writeVLong(gatewaySnapshotStatus.time);
out.writeVLong(gatewaySnapshotStatus.indexSize);
out.writeVInt(gatewaySnapshotStatus.getExpectedNumberOfOperations());
}
if (mergeStats == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
mergeStats.writeTo(out);
}
if (refreshStats == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
refreshStats.writeTo(out);
}
if (flushStats == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
flushStats.writeTo(out);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardRouting = readShardRoutingEntry(in);
state = IndexShardState.fromId(in.readByte());
if (in.readBoolean()) {
storeSize = readBytesSizeValue(in);
}
translogId = in.readLong();
translogOperations = in.readLong();
if (in.readBoolean()) {
docs = new DocsStatus();
docs.numDocs = in.readLong();
docs.maxDoc = in.readLong();
docs.deletedDocs = in.readLong();
}
if (in.readBoolean()) {
peerRecoveryStatus = new PeerRecoveryStatus(PeerRecoveryStatus.Stage.fromValue(in.readByte()),
in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong());
}
if (in.readBoolean()) {
gatewayRecoveryStatus = new GatewayRecoveryStatus(GatewayRecoveryStatus.Stage.fromValue(in.readByte()),
in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong());
}
if (in.readBoolean()) {
gatewaySnapshotStatus = new GatewaySnapshotStatus(GatewaySnapshotStatus.Stage.fromValue(in.readByte()),
in.readVLong(), in.readVLong(), in.readVLong(), in.readVInt());
}
if (in.readBoolean()) {
mergeStats = MergeStats.readMergeStats(in);
}
if (in.readBoolean()) {
refreshStats = RefreshStats.readRefreshStats(in);
}
if (in.readBoolean()) {
flushStats = FlushStats.readFlushStats(in);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_ShardStatus.java
|
161 |
return executeRead(new Callable<EntryList>() {
@Override
public EntryList call() throws Exception {
return cacheEnabled?edgeStore.getSlice(query, storeTx):
edgeStore.getSliceNoCache(query,storeTx);
}
@Override
public String toString() {
return "EdgeStoreQuery";
}
});
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_BackendTransaction.java
|
581 |
getValuesMinor(toKey, isInclusive, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
});
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexAbstract.java
|
1,518 |
public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCase {
/* This test will make random allocation decision on a growing and shrinking
* cluster leading to a random distribution of the shards. After a certain
* amount of iterations the test allows allocation unless the same shard is
* already allocated on a node and balances the cluster to gain optimal
* balance.*/
@Test
public void testRandomDecisions() {
RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(getRandom());
AllocationService strategy = new AllocationService(settingsBuilder().build(), new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(new SameShardAllocationDecider(ImmutableSettings.EMPTY),
randomAllocationDecider))), new ShardsAllocators(), ClusterInfoService.EMPTY);
int indices = between(1, 20);
Builder metaBuilder = MetaData.builder();
int maxNumReplicas = 1;
int totalNumShards = 0;
for (int i = 0; i < indices; i++) {
int replicas = between(0, 6);
maxNumReplicas = Math.max(maxNumReplicas, replicas + 1);
int numShards = between(1, 20);
totalNumShards += numShards * (replicas + 1);
metaBuilder.put(IndexMetaData.builder("INDEX_" + i).numberOfShards(numShards).numberOfReplicas(replicas));
}
MetaData metaData = metaBuilder.build();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
for (int i = 0; i < indices; i++) {
routingTableBuilder.addAsNew(metaData.index("INDEX_" + i));
}
RoutingTable routingTable = routingTableBuilder.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
int numIters = atLeast(20);
int nodeIdCounter = 0;
int atMostNodes = between(Math.max(1, maxNumReplicas), numIters);
final boolean frequentNodes = randomBoolean();
for (int i = 0; i < numIters; i++) {
ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
if (clusterState.nodes().size() <= atMostNodes &&
(nodeIdCounter == 0 || (frequentNodes ? frequently() : rarely()))) {
int numNodes = atLeast(1);
for (int j = 0; j < numNodes; j++) {
logger.info("adding node [{}]", nodeIdCounter);
newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
}
}
if (nodeIdCounter > 1 && rarely()) {
int nodeId = between(0, nodeIdCounter - 2);
logger.info("removing node [{}]", nodeId);
newNodesBuilder.remove("NODE_" + nodeId);
}
stateBuilder.nodes(newNodesBuilder.build());
clusterState = stateBuilder.build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
.routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
}
}
logger.info("Fill up nodes such that every shard can be allocated");
if (clusterState.nodes().size() < maxNumReplicas) {
ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
for (int j = 0; j < (maxNumReplicas - clusterState.nodes().size()); j++) {
logger.info("adding node [{}]", nodeIdCounter);
newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
}
stateBuilder.nodes(newNodesBuilder.build());
clusterState = stateBuilder.build();
}
randomAllocationDecider.allwaysSayYes = true;
logger.info("now say YES to everything");
int iterations = 0;
do {
iterations++;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
.routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
}
} while (clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 ||
clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200);
logger.info("Done Balancing after [{}] iterations", iterations);
// we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong
assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200));
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0));
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0));
int shards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size();
assertThat(shards, equalTo(totalNumShards));
final int numNodes = clusterState.nodes().size();
final int upperBound = (int) Math.round(((shards / numNodes) * 1.10));
final int lowerBound = (int) Math.round(((shards / numNodes) * 0.90));
for (int i = 0; i < nodeIdCounter; i++) {
if (clusterState.getRoutingNodes().node("NODE_" + i) == null) {
continue;
}
assertThat(clusterState.getRoutingNodes().node("NODE_" + i).size(), Matchers.anyOf(
Matchers.anyOf(equalTo((shards / numNodes) + 1), equalTo((shards / numNodes) - 1), equalTo((shards / numNodes))),
Matchers.allOf(Matchers.greaterThanOrEqualTo(lowerBound), Matchers.lessThanOrEqualTo(upperBound))));
}
}
private static final class RandomAllocationDecider extends AllocationDecider {
private final Random random;
public RandomAllocationDecider(Random random) {
super(ImmutableSettings.EMPTY);
this.random = random;
}
public boolean allwaysSayYes = false;
@Override
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
return getRandomDecision();
}
private Decision getRandomDecision() {
if (allwaysSayYes) {
return Decision.YES;
}
switch (random.nextInt(10)) {
case 9:
case 8:
case 7:
case 6:
case 5:
return Decision.NO;
case 4:
return Decision.THROTTLE;
case 3:
case 2:
case 1:
return Decision.YES;
default:
return Decision.ALWAYS;
}
}
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return getRandomDecision();
}
@Override
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return getRandomDecision();
}
}
}
| 0true
|
src_test_java_org_elasticsearch_cluster_routing_allocation_RandomAllocationDeciderTests.java
|
1,378 |
private static class CassandraMapIterator implements Iterator<Entry> {
private final Iterator<Map.Entry<ByteBuffer, Column>> iterator;
public CassandraMapIterator(final Iterator<Map.Entry<ByteBuffer, Column>> iterator) {
this.iterator = iterator;
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Entry next() {
final Map.Entry<ByteBuffer, Column> entry = iterator.next();
ByteBuffer col = entry.getKey();
ByteBuffer val = entry.getValue().value();
return StaticArrayEntry.of(StaticArrayBuffer.of(col), StaticArrayBuffer.of(val));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_cassandra_TitanCassandraHadoopGraph.java
|
221 |
public class ClientTestApp implements EntryListener, ItemListener, MessageListener {
private static final int ONE_KB = 1024;
private static final int ONE_THOUSAND = 1000;
private static final int ONE_HUNDRED = 100;
private static final int ONE_HOUR = 3600;
private IQueue<Object> queue;
private ITopic<Object> topic;
private IMap<Object, Object> map;
private MultiMap<Object, Object> multiMap;
private ISet<Object> set;
private IList<Object> list;
private IAtomicLong atomicNumber;
private String namespace = "default";
private boolean silent;
private boolean echo;
private volatile HazelcastInstance hazelcast;
private volatile LineReader lineReader;
private volatile boolean running;
public ClientTestApp(HazelcastInstance hazelcast) {
this.hazelcast = hazelcast;
}
public IQueue<Object> getQueue() {
queue = hazelcast.getQueue(namespace);
return queue;
}
public ITopic<Object> getTopic() {
topic = hazelcast.getTopic(namespace);
return topic;
}
public IMap<Object, Object> getMap() {
map = hazelcast.getMap(namespace);
return map;
}
public MultiMap<Object, Object> getMultiMap() {
multiMap = hazelcast.getMultiMap(namespace);
return multiMap;
}
public IAtomicLong getAtomicNumber() {
atomicNumber = hazelcast.getAtomicLong(namespace);
return atomicNumber;
}
public ISet<Object> getSet() {
set = hazelcast.getSet(namespace);
return set;
}
public IList<Object> getList() {
list = hazelcast.getList(namespace);
return list;
}
public void setHazelcast(HazelcastInstance hazelcast) {
this.hazelcast = hazelcast;
map = null;
list = null;
set = null;
queue = null;
topic = null;
}
public void stop() {
running = false;
}
public void start(String[] args) throws Exception {
getMap().size();
getList().size();
getSet().size();
getQueue().size();
getMultiMap().size();
if (lineReader == null) {
lineReader = new DefaultLineReader();
}
running = true;
while (running) {
print("hazelcast[" + namespace + "] > ");
try {
final String command = lineReader.readLine();
handleCommand(command);
} catch (Throwable e) {
e.printStackTrace();
}
}
}
/**
* A line reader
*/
static class DefaultLineReader implements LineReader {
@edu.umd.cs.findbugs.annotations.SuppressWarnings("DM_DEFAULT_ENCODING")
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
public String readLine() throws Exception {
return in.readLine();
}
}
//CECKSTYLE:OFF
/**
* Handle a command
*
* @param commandInputted
*/
protected void handleCommand(String commandInputted) {
String command = commandInputted;
if (command == null) {
return;
}
if (command.contains("__")) {
namespace = command.split("__")[0];
command = command.substring(command.indexOf("__") + 2);
}
if (echo) {
handleEcho(command);
}
if (command == null || command.startsWith("//")) {
return;
}
command = command.trim();
if (command == null || command.length() == 0) {
return;
}
String first = command;
int spaceIndex = command.indexOf(' ');
String[] argsSplit = command.split(" ");
String[] args = new String[argsSplit.length];
for (int i = 0; i < argsSplit.length; i++) {
args[i] = argsSplit[i].trim();
}
if (spaceIndex != -1) {
first = args[0];
}
if (command.startsWith("help")) {
handleHelp(command);
} else if (first.startsWith("#") && first.length() > 1) {
int repeat = Integer.parseInt(first.substring(1));
long t0 = Clock.currentTimeMillis();
for (int i = 0; i < repeat; i++) {
handleCommand(command.substring(first.length()).replaceAll("\\$i", "" + i));
}
println("ops/s = " + repeat * ONE_THOUSAND / (Clock.currentTimeMillis() - t0));
} else if (first.startsWith("&") && first.length() > 1) {
final int fork = Integer.parseInt(first.substring(1));
ExecutorService pool = Executors.newFixedThreadPool(fork);
final String threadCommand = command.substring(first.length());
for (int i = 0; i < fork; i++) {
final int threadID = i;
pool.submit(new Runnable() {
public void run() {
String command = threadCommand;
String[] threadArgs = command.replaceAll("\\$t", "" + threadID).trim()
.split(" ");
// TODO &t #4 m.putmany x k
if ("m.putmany".equals(threadArgs[0])
|| "m.removemany".equals(threadArgs[0])) {
if (threadArgs.length < 4) {
command += " " + Integer.parseInt(threadArgs[1]) * threadID;
}
}
handleCommand(command);
}
});
}
pool.shutdown();
try {
// wait 1h
pool.awaitTermination(ONE_HOUR, TimeUnit.SECONDS);
} catch (Exception e) {
e.printStackTrace();
}
} else if (first.startsWith("@")) {
handleAt(first);
} else if (command.indexOf(';') != -1) {
handleColon(command);
} else if ("silent".equals(first)) {
silent = Boolean.parseBoolean(args[1]);
} else if ("shutdown".equals(first)) {
hazelcast.getLifecycleService().shutdown();
} else if ("echo".equals(first)) {
echo = Boolean.parseBoolean(args[1]);
println("echo: " + echo);
} else if ("ns".equals(first)) {
handleNamespace(args);
} else if ("whoami".equals(first)) {
handleWhoami();
} else if ("who".equals(first)) {
handleWho();
} else if ("jvm".equals(first)) {
handleJvm();
} else if (first.contains("ock") && !first.contains(".")) {
handleLock(args);
} else if (first.contains(".size")) {
handleSize(args);
} else if (first.contains(".clear")) {
handleClear(args);
} else if (first.contains(".destroy")) {
handleDestroy(args);
} else if (first.contains(".iterator")) {
handleIterator(args);
} else if (first.contains(".contains")) {
handleContains(args);
} else if (first.contains(".stats")) {
handStats(args);
} else if ("t.publish".equals(first)) {
handleTopicPublish(args);
} else if ("q.offer".equals(first)) {
handleQOffer(args);
} else if ("q.take".equals(first)) {
handleQTake(args);
} else if ("q.poll".equals(first)) {
handleQPoll(args);
} else if ("q.peek".equals(first)) {
handleQPeek(args);
} else if ("q.capacity".equals(first)) {
handleQCapacity(args);
} else if ("q.offermany".equals(first)) {
handleQOfferMany(args);
} else if ("q.pollmany".equals(first)) {
handleQPollMany(args);
} else if ("s.add".equals(first)) {
handleSetAdd(args);
} else if ("s.remove".equals(first)) {
handleSetRemove(args);
} else if ("s.addmany".equals(first)) {
handleSetAddMany(args);
} else if ("s.removemany".equals(first)) {
handleSetRemoveMany(args);
} else if (first.equals("m.replace")) {
handleMapReplace(args);
} else if (first.equalsIgnoreCase("m.putIfAbsent")) {
handleMapPutIfAbsent(args);
} else if (first.equals("m.putAsync")) {
handleMapPutAsync(args);
} else if (first.equals("m.getAsync")) {
handleMapGetAsync(args);
} else if (first.equals("m.put")) {
handleMapPut(args);
} else if (first.equals("m.get")) {
handleMapGet(args);
} else if (first.equalsIgnoreCase("m.getMapEntry")) {
handleMapGetMapEntry(args);
} else if (first.equals("m.remove")) {
handleMapRemove(args);
} else if (first.equals("m.evict")) {
handleMapEvict(args);
} else if (first.equals("m.putmany") || first.equalsIgnoreCase("m.putAll")) {
handleMapPutMany(args);
} else if (first.equals("m.getmany")) {
handleMapGetMany(args);
} else if (first.equals("m.removemany")) {
handleMapRemoveMany(args);
} else if (command.equalsIgnoreCase("m.localKeys")) {
handleMapLocalKeys();
} else if (command.equalsIgnoreCase("m.localSize")) {
handleMapLocalSize();
} else if (command.equals("m.keys")) {
handleMapKeys();
} else if (command.equals("m.values")) {
handleMapValues();
} else if (command.equals("m.entries")) {
handleMapEntries();
} else if (first.equals("m.lock")) {
handleMapLock(args);
} else if (first.equalsIgnoreCase("m.tryLock")) {
handleMapTryLock(args);
} else if (first.equals("m.unlock")) {
handleMapUnlock(args);
} else if (first.contains(".addListener")) {
handleAddListener(args);
} else if (first.equals("m.removeMapListener")) {
handleRemoveListener(args);
} else if (first.equals("m.unlock")) {
handleMapUnlock(args);
} else if (first.equals("mm.put")) {
handleMultiMapPut(args);
} else if (first.equals("mm.get")) {
handleMultiMapGet(args);
} else if (first.equals("mm.remove")) {
handleMultiMapRemove(args);
} else if (command.equals("mm.keys")) {
handleMultiMapKeys();
} else if (command.equals("mm.values")) {
handleMultiMapValues();
} else if (command.equals("mm.entries")) {
handleMultiMapEntries();
} else if (first.equals("mm.lock")) {
handleMultiMapLock(args);
} else if (first.equalsIgnoreCase("mm.tryLock")) {
handleMultiMapTryLock(args);
} else if (first.equals("mm.unlock")) {
handleMultiMapUnlock(args);
} else if (first.equals("l.add")) {
handleListAdd(args);
} else if (first.equals("l.set")) {
handleListSet(args);
} else if ("l.addmany".equals(first)) {
handleListAddMany(args);
} else if (first.equals("l.remove")) {
handleListRemove(args);
} else if (first.equals("l.contains")) {
handleListContains(args);
} else if ("a.get".equals(first)) {
handleAtomicNumberGet(args);
} else if ("a.set".equals(first)) {
handleAtomicNumberSet(args);
} else if ("a.inc".equals(first)) {
handleAtomicNumberInc(args);
} else if ("a.dec".equals(first)) {
handleAtomicNumberDec(args);
} else if (first.equals("execute")) {
execute(args);
} else if (first.equals("partitions")) {
handlePartitions(args);
// } else if (first.equals("txn")) {
// hazelcast.getTransaction().begin();
// } else if (first.equals("commit")) {
// hazelcast.getTransaction().commit();
// } else if (first.equals("rollback")) {
// hazelcast.getTransaction().rollback();
} else if (first.equalsIgnoreCase("executeOnKey")) {
executeOnKey(args);
} else if (first.equalsIgnoreCase("executeOnMember")) {
executeOnMember(args);
} else if (first.equalsIgnoreCase("executeOnMembers")) {
executeOnMembers(args);
// } else if (first.equalsIgnoreCase("longOther") || first.equalsIgnoreCase("executeLongOther")) {
// executeLongTaskOnOtherMember(args);
//} else if (first.equalsIgnoreCase("long") || first.equalsIgnoreCase("executeLong")) {
// executeLong(args);
} else if (first.equalsIgnoreCase("instances")) {
handleInstances(args);
} else if (first.equalsIgnoreCase("quit") || first.equalsIgnoreCase("exit")) {
System.exit(0);
} else if (first.startsWith("e") && first.endsWith(".simulateLoad")) {
handleExecutorSimulate(args);
} else {
println("type 'help' for help");
}
}
private void handleExecutorSimulate(String[] args) {
String first = args[0];
int threadCount = Integer.parseInt(first.substring(1, first.indexOf(".")));
if (threadCount < 1 || threadCount > 16) {
throw new RuntimeException("threadcount can't be smaller than 1 or larger than 16");
}
int taskCount = Integer.parseInt(args[1]);
int durationSec = Integer.parseInt(args[2]);
long startMs = System.currentTimeMillis();
IExecutorService executor = hazelcast.getExecutorService("e" + threadCount);
List<Future> futures = new LinkedList<Future>();
List<Member> members = new LinkedList<Member>(hazelcast.getCluster().getMembers());
int totalThreadCount = hazelcast.getCluster().getMembers().size() * threadCount;
int latchId = 0;
for (int k = 0; k < taskCount; k++) {
Member member = members.get(k % members.size());
if (taskCount % totalThreadCount == 0) {
latchId = taskCount / totalThreadCount;
hazelcast.getCountDownLatch("latch" + latchId).trySetCount(totalThreadCount);
}
Future f = executor.submitToMember(new SimulateLoadTask(durationSec, k + 1, "latch" + latchId), member);
futures.add(f);
}
for (Future f : futures) {
try {
f.get();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
long durationMs = System.currentTimeMillis() - startMs;
println(format("Executed %s tasks in %s ms", taskCount, durationMs));
}
/**
* A simulated load test
*/
private static final class SimulateLoadTask implements Callable, Serializable, HazelcastInstanceAware {
private static final long serialVersionUID = 1;
private final int delay;
private final int taskId;
private final String latchId;
private transient HazelcastInstance hz;
private SimulateLoadTask(int delay, int taskId, String latchId) {
this.delay = delay;
this.taskId = taskId;
this.latchId = latchId;
}
@Override
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
this.hz = hazelcastInstance;
}
@Override
public Object call() throws Exception {
try {
Thread.sleep(delay * ONE_THOUSAND);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
hz.getCountDownLatch(latchId).countDown();
System.out.println("Finished task:" + taskId);
return null;
}
}
private void handleColon(String command) {
StringTokenizer st = new StringTokenizer(command, ";");
while (st.hasMoreTokens()) {
handleCommand(st.nextToken());
}
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings("DM_DEFAULT_ENCODING")
private void handleAt(String first) {
if (first.length() == 1) {
println("usage: @<file-name>");
return;
}
File f = new File(first.substring(1));
println("Executing script file " + f.getAbsolutePath());
if (f.exists()) {
try {
BufferedReader br = new BufferedReader(new FileReader(f));
String l = br.readLine();
while (l != null) {
handleCommand(l);
l = br.readLine();
}
br.close();
} catch (IOException e) {
e.printStackTrace();
}
} else {
println("File not found! " + f.getAbsolutePath());
}
}
private void handleEcho(String command) {
if (!Thread.currentThread().getName().toLowerCase().contains("main")) {
println(" [" + Thread.currentThread().getName() + "] " + command);
} else {
println(command);
}
}
private void handleNamespace(String[] args) {
if (args.length > 1) {
namespace = args[1];
println("namespace: " + namespace);
// init();
}
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings("DM_GC")
private void handleJvm() {
System.gc();
println("Memory max: " + Runtime.getRuntime().maxMemory() / ONE_KB / ONE_KB
+ "M");
println("Memory free: "
+ Runtime.getRuntime().freeMemory()
/ ONE_KB
/ ONE_KB
+ "M "
+ (int) (Runtime.getRuntime().freeMemory() * 100 / Runtime.getRuntime()
.maxMemory()) + "%");
long total = Runtime.getRuntime().totalMemory();
long free = Runtime.getRuntime().freeMemory();
println("Used Memory:" + ((total - free) / ONE_KB / ONE_KB) + "MB");
println("# procs: " + Runtime.getRuntime().availableProcessors());
println("OS info: " + ManagementFactory.getOperatingSystemMXBean().getArch()
+ " " + ManagementFactory.getOperatingSystemMXBean().getName() + " "
+ ManagementFactory.getOperatingSystemMXBean().getVersion());
println("JVM: " + ManagementFactory.getRuntimeMXBean().getVmVendor() + " "
+ ManagementFactory.getRuntimeMXBean().getVmName() + " "
+ ManagementFactory.getRuntimeMXBean().getVmVersion());
}
private void handleWhoami() {
println(hazelcast.getCluster().getLocalMember());
}
private void handleWho() {
StringBuilder sb = new StringBuilder("\n\nMembers [");
final Collection<Member> members = hazelcast.getCluster().getMembers();
sb.append(members != null ? members.size() : 0);
sb.append("] {");
if (members != null) {
for (Member member : members) {
sb.append("\n\t").append(member);
}
}
sb.append("\n}\n");
println(sb.toString());
}
private void handleAtomicNumberGet(String[] args) {
println(getAtomicNumber().get());
}
private void handleAtomicNumberSet(String[] args) {
long v = 0;
if (args.length > 1) {
v = Long.valueOf(args[1]);
}
getAtomicNumber().set(v);
println(getAtomicNumber().get());
}
private void handleAtomicNumberInc(String[] args) {
println(getAtomicNumber().incrementAndGet());
}
private void handleAtomicNumberDec(String[] args) {
println(getAtomicNumber().decrementAndGet());
}
protected void handlePartitions(String[] args) {
Set<Partition> partitions = hazelcast.getPartitionService().getPartitions();
Map<Member, Integer> partitionCounts = new HashMap<Member, Integer>();
for (Partition partition : partitions) {
Member owner = partition.getOwner();
if (owner != null) {
Integer count = partitionCounts.get(owner);
int newCount = 1;
if (count != null) {
newCount = count + 1;
}
partitionCounts.put(owner, newCount);
}
println(partition);
}
Set<Entry<Member, Integer>> entries = partitionCounts.entrySet();
for (Entry<Member, Integer> entry : entries) {
println(entry.getKey() + ":" + entry.getValue());
}
}
protected void handleInstances(String[] args) {
Collection<DistributedObject> distributedObjects = hazelcast.getDistributedObjects();
for (DistributedObject distributedObject : distributedObjects) {
println(distributedObject);
}
}
// ==================== list ===================================
protected void handleListContains(String[] args) {
println(getList().contains(args[1]));
}
protected void handleListRemove(String[] args) {
int index = -1;
try {
index = Integer.parseInt(args[1]);
} catch (NumberFormatException e) {
throw new RuntimeException(e);
}
if (index >= 0) {
println(getList().remove(index));
} else {
println(getList().remove(args[1]));
}
}
protected void handleListAdd(String[] args) {
if (args.length == 3) {
final int index = Integer.parseInt(args[1]);
getList().add(index, args[2]);
println("true");
} else {
println(getList().add(args[1]));
}
}
protected void handleListSet(String[] args) {
final int index = Integer.parseInt(args[1]);
println(getList().set(index, args[2]));
}
protected void handleListAddMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
int successCount = 0;
long t0 = Clock.currentTimeMillis();
for (int i = 0; i < count; i++) {
boolean success = getList().add("obj" + i);
if (success) {
successCount++;
}
}
long t1 = Clock.currentTimeMillis();
println("Added " + successCount + " objects.");
println("size = " + list.size() + ", " + successCount * ONE_THOUSAND / (t1 - t0)
+ " evt/s");
}
// ==================== map ===================================
protected void handleMapPut(String[] args) {
println(getMap().put(args[1], args[2]));
}
protected void handleMapPutAsync(String[] args) {
try {
println(getMap().putAsync(args[1], args[2]).get());
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
protected void handleMapPutIfAbsent(String[] args) {
println(getMap().putIfAbsent(args[1], args[2]));
}
protected void handleMapReplace(String[] args) {
println(getMap().replace(args[1], args[2]));
}
protected void handleMapGet(String[] args) {
println(getMap().get(args[1]));
}
protected void handleMapGetAsync(String[] args) {
try {
println(getMap().getAsync(args[1]).get());
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
protected void handleMapGetMapEntry(String[] args) {
println(getMap().getEntryView(args[1]));
}
protected void handleMapRemove(String[] args) {
println(getMap().remove(args[1]));
}
protected void handleMapEvict(String[] args) {
println(getMap().evict(args[1]));
}
protected void handleMapPutMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
int b = ONE_HUNDRED;
byte[] value = new byte[b];
if (args.length > 2) {
b = Integer.parseInt(args[2]);
value = new byte[b];
}
int start = getMap().size();
if (args.length > 3) {
start = Integer.parseInt(args[3]);
}
Map theMap = new HashMap(count);
for (int i = 0; i < count; i++) {
theMap.put("key" + (start + i), value);
}
long t0 = Clock.currentTimeMillis();
getMap().putAll(theMap);
long t1 = Clock.currentTimeMillis();
if (t1 - t0 > 1) {
println("size = " + getMap().size() + ", " + count * ONE_THOUSAND / (t1 - t0)
+ " evt/s, " + (count * ONE_THOUSAND / (t1 - t0)) * (b * 8) / ONE_KB + " Kbit/s, "
+ count * b / ONE_KB + " KB added");
}
}
protected void handleMapGetMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
for (int i = 0; i < count; i++) {
println(getMap().get("key" + i));
}
}
protected void handleMapRemoveMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
int start = 0;
if (args.length > 2) {
start = Integer.parseInt(args[2]);
}
long t0 = Clock.currentTimeMillis();
for (int i = 0; i < count; i++) {
getMap().remove("key" + (start + i));
}
long t1 = Clock.currentTimeMillis();
println("size = " + getMap().size() + ", " + count * ONE_THOUSAND / (t1 - t0) + " evt/s");
}
protected void handleMapLock(String[] args) {
getMap().lock(args[1]);
println("true");
}
protected void handleMapTryLock(String[] args) {
String key = args[1];
long time = (args.length > 2) ? Long.valueOf(args[2]) : 0;
boolean locked;
if (time == 0) {
locked = getMap().tryLock(key);
} else {
try {
locked = getMap().tryLock(key, time, TimeUnit.SECONDS);
} catch (InterruptedException e) {
locked = false;
}
}
println(locked);
}
protected void handleMapUnlock(String[] args) {
getMap().unlock(args[1]);
println("true");
}
protected void handleMapLocalKeys() {
Set set = getMap().localKeySet();
Iterator it = set.iterator();
int count = 0;
while (it.hasNext()) {
count++;
println(it.next());
}
println("Total " + count);
}
protected void handleMapLocalSize() {
println("Local Size = " + getMap().localKeySet().size());
}
protected void handleMapKeys() {
Set set = getMap().keySet();
Iterator it = set.iterator();
int count = 0;
while (it.hasNext()) {
count++;
println(it.next());
}
println("Total " + count);
}
protected void handleMapEntries() {
Set set = getMap().entrySet();
Iterator it = set.iterator();
int count = 0;
while (it.hasNext()) {
count++;
Entry entry = (Entry) it.next();
println(entry.getKey() + " : " + entry.getValue());
}
println("Total " + count);
}
protected void handleMapValues() {
Collection set = getMap().values();
Iterator it = set.iterator();
int count = 0;
while (it.hasNext()) {
count++;
println(it.next());
}
println("Total " + count);
}
// ==================== multimap ===================================
protected void handleMultiMapPut(String[] args) {
println(getMultiMap().put(args[1], args[2]));
}
protected void handleMultiMapGet(String[] args) {
println(getMultiMap().get(args[1]));
}
protected void handleMultiMapRemove(String[] args) {
println(getMultiMap().remove(args[1]));
}
protected void handleMultiMapKeys() {
Set set = getMultiMap().keySet();
Iterator it = set.iterator();
int count = 0;
while (it.hasNext()) {
count++;
println(it.next());
}
println("Total " + count);
}
protected void handleMultiMapEntries() {
Set set = getMultiMap().entrySet();
Iterator it = set.iterator();
int count = 0;
while (it.hasNext()) {
count++;
Entry entry = (Entry) it.next();
println(entry.getKey() + " : " + entry.getValue());
}
println("Total " + count);
}
protected void handleMultiMapValues() {
Collection set = getMultiMap().values();
Iterator it = set.iterator();
int count = 0;
while (it.hasNext()) {
count++;
println(it.next());
}
println("Total " + count);
}
protected void handleMultiMapLock(String[] args) {
getMultiMap().lock(args[1]);
println("true");
}
protected void handleMultiMapTryLock(String[] args) {
String key = args[1];
long time = (args.length > 2) ? Long.valueOf(args[2]) : 0;
boolean locked;
if (time == 0) {
locked = getMultiMap().tryLock(key);
} else {
try {
locked = getMultiMap().tryLock(key, time, TimeUnit.SECONDS);
} catch (InterruptedException e) {
locked = false;
}
}
println(locked);
}
protected void handleMultiMapUnlock(String[] args) {
getMultiMap().unlock(args[1]);
println("true");
}
// =======================================================
private void handStats(String[] args) {
String iteratorStr = args[0];
if (iteratorStr.startsWith("m.")) {
println(getMap().getLocalMapStats());
} else if (iteratorStr.startsWith("mm.")) {
println(getMultiMap().getLocalMultiMapStats());
} else if (iteratorStr.startsWith("q.")) {
println(getQueue().getLocalQueueStats());
}
}
@SuppressWarnings("LockAcquiredButNotSafelyReleased")
protected void handleLock(String[] args) {
String lockStr = args[0];
String key = args[1];
Lock lock = hazelcast.getLock(key);
if (lockStr.equalsIgnoreCase("lock")) {
lock.lock();
println("true");
} else if (lockStr.equalsIgnoreCase("unlock")) {
lock.unlock();
println("true");
} else if (lockStr.equalsIgnoreCase("trylock")) {
String timeout = args.length > 2 ? args[2] : null;
if (timeout == null) {
println(lock.tryLock());
} else {
long time = Long.valueOf(timeout);
try {
println(lock.tryLock(time, TimeUnit.SECONDS));
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
protected void handleAddListener(String[] args) {
String first = args[0];
if (first.startsWith("s.")) {
getSet().addItemListener(this, true);
} else if (first.startsWith("m.")) {
if (args.length > 1) {
getMap().addEntryListener(this, args[1], true);
} else {
getMap().addEntryListener(this, true);
}
} else if (first.startsWith("mm.")) {
if (args.length > 1) {
getMultiMap().addEntryListener(this, args[1], true);
} else {
getMultiMap().addEntryListener(this, true);
}
} else if (first.startsWith("q.")) {
getQueue().addItemListener(this, true);
} else if (first.startsWith("t.")) {
getTopic().addMessageListener(this);
} else if (first.startsWith("l.")) {
getList().addItemListener(this, true);
}
}
protected void handleRemoveListener(String[] args) {
// String first = args[0];
// if (first.startsWith("s.")) {
// getSet().removeItemListener(this);
// } else if (first.startsWith("m.")) {
// if (args.length > 1) {
// // todo revise here
// getMap().removeEntryListener(args[1]);
// } else {
// getMap().removeEntryListener(args[0]);
// }
// } else if (first.startsWith("q.")) {
// getQueue().removeItemListener(this);
// } else if (first.startsWith("t.")) {
// getTopic().removeMessageListener(this);
// } else if (first.startsWith("l.")) {
// getList().removeItemListener(this);
// }
}
protected void handleSetAdd(String[] args) {
println(getSet().add(args[1]));
}
protected void handleSetRemove(String[] args) {
println(getSet().remove(args[1]));
}
protected void handleSetAddMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
int successCount = 0;
long t0 = Clock.currentTimeMillis();
for (int i = 0; i < count; i++) {
boolean success = getSet().add("obj" + i);
if (success) {
successCount++;
}
}
long t1 = Clock.currentTimeMillis();
println("Added " + successCount + " objects.");
println("size = " + getSet().size() + ", " + successCount * ONE_THOUSAND / (t1 - t0)
+ " evt/s");
}
protected void handleSetRemoveMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
int successCount = 0;
long t0 = Clock.currentTimeMillis();
for (int i = 0; i < count; i++) {
boolean success = getSet().remove("obj" + i);
if (success) {
successCount++;
}
}
long t1 = Clock.currentTimeMillis();
println("Removed " + successCount + " objects.");
println("size = " + getSet().size() + ", " + successCount * ONE_THOUSAND / (t1 - t0)
+ " evt/s");
}
protected void handleIterator(String[] args) {
Iterator it = null;
String iteratorStr = args[0];
if (iteratorStr.startsWith("s.")) {
it = getSet().iterator();
} else if (iteratorStr.startsWith("m.")) {
it = getMap().keySet().iterator();
} else if (iteratorStr.startsWith("mm.")) {
it = getMultiMap().keySet().iterator();
} else if (iteratorStr.startsWith("q.")) {
it = getQueue().iterator();
} else if (iteratorStr.startsWith("l.")) {
it = getList().iterator();
}
if (it != null) {
boolean remove = false;
if (args.length > 1) {
String removeStr = args[1];
remove = removeStr.equals("remove");
}
int count = 1;
while (it.hasNext()) {
print(count++ + " " + it.next());
if (remove) {
it.remove();
print(" removed");
}
println("");
}
}
}
protected void handleContains(String[] args) {
String iteratorStr = args[0];
boolean key = false;
boolean value = false;
if (iteratorStr.toLowerCase().endsWith("key")) {
key = true;
} else if (iteratorStr.toLowerCase().endsWith("value")) {
value = true;
}
String data = args[1];
boolean result = false;
if (iteratorStr.startsWith("s.")) {
result = getSet().contains(data);
} else if (iteratorStr.startsWith("m.")) {
result = (key) ? getMap().containsKey(data) : getMap().containsValue(data);
} else if (iteratorStr.startsWith("mmm.")) {
result = (key) ? getMultiMap().containsKey(data) : getMultiMap().containsValue(data);
} else if (iteratorStr.startsWith("q.")) {
result = getQueue().contains(data);
} else if (iteratorStr.startsWith("l.")) {
result = getList().contains(data);
}
println("Contains : " + result);
}
protected void handleSize(String[] args) {
int size = 0;
String iteratorStr = args[0];
if (iteratorStr.startsWith("s.")) {
size = getSet().size();
} else if (iteratorStr.startsWith("m.")) {
size = getMap().size();
} else if (iteratorStr.startsWith("mm.")) {
size = getMultiMap().size();
} else if (iteratorStr.startsWith("q.")) {
size = getQueue().size();
} else if (iteratorStr.startsWith("l.")) {
size = getList().size();
}
println("Size = " + size);
}
protected void handleClear(String[] args) {
String iteratorStr = args[0];
if (iteratorStr.startsWith("s.")) {
getSet().clear();
} else if (iteratorStr.startsWith("m.")) {
getMap().clear();
} else if (iteratorStr.startsWith("mm.")) {
getMultiMap().clear();
} else if (iteratorStr.startsWith("q.")) {
getQueue().clear();
} else if (iteratorStr.startsWith("l.")) {
getList().clear();
}
println("Cleared all.");
}
protected void handleDestroy(String[] args) {
String iteratorStr = args[0];
if (iteratorStr.startsWith("s.")) {
getSet().destroy();
} else if (iteratorStr.startsWith("m.")) {
getMap().destroy();
} else if (iteratorStr.startsWith("mm.")) {
getMultiMap().destroy();
} else if (iteratorStr.startsWith("q.")) {
getQueue().destroy();
} else if (iteratorStr.startsWith("l.")) {
getList().destroy();
} else if (iteratorStr.startsWith("t.")) {
getTopic().destroy();
}
println("Destroyed!");
}
protected void handleQOffer(String[] args) {
long timeout = 0;
if (args.length > 2) {
timeout = Long.valueOf(args[2]);
}
try {
boolean offered = getQueue().offer(args[1], timeout, TimeUnit.SECONDS);
println(offered);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
protected void handleQTake(String[] args) {
try {
println(getQueue().take());
} catch (InterruptedException e) {
e.printStackTrace();
}
}
protected void handleQPoll(String[] args) {
long timeout = 0;
if (args.length > 1) {
timeout = Long.valueOf(args[1]);
}
try {
println(getQueue().poll(timeout, TimeUnit.SECONDS));
} catch (InterruptedException e) {
e.printStackTrace();
}
}
protected void handleTopicPublish(String[] args) {
getTopic().publish(args[1]);
}
protected void handleQOfferMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
Object value = null;
if (args.length > 2) {
value = new byte[Integer.parseInt(args[2])];
}
long t0 = Clock.currentTimeMillis();
for (int i = 0; i < count; i++) {
if (value == null) {
getQueue().offer("obj");
} else {
getQueue().offer(value);
}
}
long t1 = Clock.currentTimeMillis();
print("size = " + getQueue().size() + ", " + count * ONE_THOUSAND / (t1 - t0) + " evt/s");
if (value == null) {
println("");
} else {
int b = Integer.parseInt(args[2]);
println(", " + (count * ONE_THOUSAND / (t1 - t0)) * (b * 8) / ONE_KB + " Kbit/s, "
+ count * b / ONE_KB + " KB added");
}
}
protected void handleQPollMany(String[] args) {
int count = 1;
if (args.length > 1) {
count = Integer.parseInt(args[1]);
}
int c = 1;
for (int i = 0; i < count; i++) {
Object obj = getQueue().poll();
if (obj instanceof byte[]) {
println(c++ + " " + ((byte[]) obj).length);
} else {
println(c++ + " " + obj);
}
}
}
protected void handleQPeek(String[] args) {
println(getQueue().peek());
}
protected void handleQCapacity(String[] args) {
println(getQueue().remainingCapacity());
}
private void execute(String[] args) {
// execute <echo-string>
doExecute(false, false, args);
}
private void executeOnKey(String[] args) {
// executeOnKey <echo-string> <key>
doExecute(true, false, args);
}
private void executeOnMember(String[] args) {
// executeOnMember <echo-string> <memberIndex>
doExecute(false, true, args);
}
private void doExecute(boolean onKey, boolean onMember, String[] args) {
// executeOnKey <echo-string> <key>
try {
IExecutorService executorService = hazelcast.getExecutorService("default");
Echo callable = new Echo(args[1]);
Future<String> future;
if (onKey) {
String key = args[2];
future = executorService.submitToKeyOwner(callable, key);
} else if (onMember) {
int memberIndex = Integer.parseInt(args[2]);
List<Member> members = new LinkedList(hazelcast.getCluster().getMembers());
if (memberIndex >= members.size()) {
throw new IndexOutOfBoundsException("Member index: " + memberIndex + " must be smaller than " + members
.size());
}
Member member = members.get(memberIndex);
future = executorService.submitToMember(callable, member);
} else {
future = executorService.submit(callable);
}
println("Result: " + future.get());
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
private void executeOnMembers(String[] args) {
// executeOnMembers <echo-string>
try {
IExecutorService executorService = hazelcast.getExecutorService("default");
Echo task = new Echo(args[1]);
Map<Member, Future<String>> results = executorService.submitToAllMembers(task);
for (Future f : results.values()) {
println(f.get());
}
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
@Override
public void entryAdded(EntryEvent event) {
println(event);
}
@Override
public void entryRemoved(EntryEvent event) {
println(event);
}
@Override
public void entryUpdated(EntryEvent event) {
println(event);
}
@Override
public void entryEvicted(EntryEvent event) {
println(event);
}
@Override
public void itemAdded(ItemEvent itemEvent) {
println("Item added = " + itemEvent.getItem());
}
@Override
public void itemRemoved(ItemEvent itemEvent) {
println("Item removed = " + itemEvent.getItem());
}
@Override
public void onMessage(Message msg) {
println("Topic received = " + msg.getMessageObject());
}
/**
* Echoes to screen
*/
public static class Echo extends HazelcastInstanceAwareObject implements Callable<String>, DataSerializable {
String input;
public Echo() {
}
public Echo(String input) {
this.input = input;
}
@Override
public String call() {
getHazelcastInstance().getCountDownLatch("latch").countDown();
return getHazelcastInstance().getCluster().getLocalMember().toString() + ":" + input;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(input);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
input = in.readUTF();
}
}
/**
* A Hazelcast instance aware object
*/
private static class HazelcastInstanceAwareObject implements HazelcastInstanceAware {
HazelcastInstance hazelcastInstance;
public HazelcastInstance getHazelcastInstance() {
return hazelcastInstance;
}
@Override
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
this.hazelcastInstance = hazelcastInstance;
}
}
/**
* Handled the help command
*
* @param command
*/
protected void handleHelp(String command) {
boolean silentBefore = silent;
silent = false;
println("Commands:");
printGeneralCommands();
printQueueCommands();
printSetCommands();
printLockCommands();
printMapCommands();
printMulitiMapCommands();
printListCommands();
printAtomicLongCommands();
printExecutorServiceCommands();
silent = silentBefore;
}
private void printGeneralCommands() {
println("-- General commands");
println("echo true|false //turns on/off echo of commands (default false)");
println("silent true|false //turns on/off silent of command output (default false)");
println("#<number> <command> //repeats <number> time <command>, replace $i in <command> with current "
+ "iteration (0..<number-1>)");
println("&<number> <command> //forks <number> threads to execute <command>, "
+ "replace $t in <command> with current thread number (0..<number-1>");
println(" When using #x or &x, is is advised to use silent true as well.");
println(" When using &x with m.putmany and m.removemany, each thread will get a different share of keys unless a "
+ "start key index is specified");
println("jvm //displays info about the runtime");
println("who //displays info about the cluster");
println("whoami //displays info about this cluster member");
println("ns <string> //switch the namespace for using the distributed queue/map/set/list "
+ "<string> (defaults to \"default\"");
println("@<file> //executes the given <file> script. Use '//' for comments in the script");
println("");
}
private void printQueueCommands() {
println("-- Queue commands");
println("q.offer <string> //adds a string object to the queue");
println("q.poll //takes an object from the queue");
println("q.offermany <number> [<size>] //adds indicated number of string objects to the queue ('obj<i>' or "
+ "byte[<size>]) ");
println("q.pollmany <number> //takes indicated number of objects from the queue");
println("q.iterator [remove] //iterates the queue, remove if specified");
println("q.size //size of the queue");
println("q.clear //clears the queue");
println("");
}
private void printSetCommands() {
println("-- Set commands");
println("s.add <string> //adds a string object to the set");
println("s.remove <string> //removes the string object from the set");
println("s.addmany <number> //adds indicated number of string objects to the set ('obj<i>')");
println("s.removemany <number> //takes indicated number of objects from the set");
println("s.iterator [remove] //iterates the set, removes if specified");
println("s.size //size of the set");
println("s.clear //clears the set");
println("");
}
private void printLockCommands() {
println("-- Lock commands");
println("lock <key> //same as Hazelcast.getLock(key).lock()");
println("tryLock <key> //same as Hazelcast.getLock(key).tryLock()");
println("tryLock <key> <time> //same as tryLock <key> with timeout in seconds");
println("unlock <key> //same as Hazelcast.getLock(key).unlock()");
println("");
}
private void printMapCommands() {
println("-- Map commands");
println("m.put <key> <value> //puts an entry to the map");
println("m.remove <key> //removes the entry of given key from the map");
println("m.get <key> //returns the value of given key from the map");
println("m.putmany <number> [<size>] [<index>]//puts indicated number of entries to the map ('key<i>':byte[<size>], "
+ "<index>+(0..<number>)");
println("m.removemany <number> [<index>] //removes indicated number of entries from the map ('key<i>', "
+ "<index>+(0..<number>)");
println(" When using &x with m.putmany and m.removemany, each thread will get a different share of keys unless a "
+ "start key <index> is specified");
println("m.keys //iterates the keys of the map");
println("m.values //iterates the values of the map");
println("m.entries //iterates the entries of the map");
println("m.iterator [remove] //iterates the keys of the map, remove if specified");
println("m.size //size of the map");
println("m.localSize //local size of the map");
println("m.clear //clears the map");
println("m.destroy //destroys the map");
println("m.lock <key> //locks the key");
println("m.tryLock <key> //tries to lock the key and returns immediately");
println("m.tryLock <key> <time> //tries to lock the key within given seconds");
println("m.unlock <key> //unlocks the key");
println("m.stats //shows the local stats of the map");
println("");
}
private void printMulitiMapCommands() {
println("-- MultiMap commands");
println("mm.put <key> <value> //puts an entry to the multimap");
println("mm.get <key> //returns the value of given key from the multimap");
println("mm.remove <key> //removes the entry of given key from the multimap");
println("mm.size //size of the multimap");
println("mm.clear //clears the multimap");
println("mm.destroy //destroys the multimap");
println("mm.iterator [remove] //iterates the keys of the multimap, remove if specified");
println("mm.keys //iterates the keys of the multimap");
println("mm.values //iterates the values of the multimap");
println("mm.entries //iterates the entries of the multimap");
println("mm.lock <key> //locks the key");
println("mm.tryLock <key> //tries to lock the key and returns immediately");
println("mm.tryLock <key> <time> //tries to lock the key within given seconds");
println("mm.unlock <key> //unlocks the key");
println("mm.stats //shows the local stats of the multimap");
println("");
}
private void printExecutorServiceCommands() {
println("-- Executor Service commands:");
println("execute <echo-input> //executes an echo task on random member");
println("executeOnKey <echo-input> <key> //executes an echo task on the member that owns the given key");
println("executeOnMember <echo-input> <memberIndex> //executes an echo task on the member with given index");
println("executeOnMembers <echo-input> //executes an echo task on all of the members");
println("e<threadcount>.simulateLoad <task-count> <delaySeconds> //simulates load on executor with given number "
+ "of thread (e1..e16)");
println("");
}
private void printAtomicLongCommands() {
println("-- IAtomicLong commands:");
println("a.get");
println("a.set <long>");
println("a.inc");
println("a.dec");
print("");
}
private void printListCommands() {
println("-- List commands:");
println("l.add <string>");
println("l.add <index> <string>");
println("l.contains <string>");
println("l.remove <string>");
println("l.remove <index>");
println("l.set <index> <string>");
println("l.iterator [remove]");
println("l.size");
println("l.clear");
print("");
}
public void println(Object obj) {
if (!silent) {
System.out.println(obj);
}
}
public void print(Object obj) {
if (!silent) {
System.out.print(obj);
}
}
/**
* Starts the test application. Loads the config from classpath hazelcast.xml,
* if it fails to load, will use default config.
*
* @param args none
* @throws Exception
*/
public static void main(String[] args) throws Exception {
ClientConfig clientConfig;
try {
clientConfig = new XmlClientConfigBuilder("hazelcast-client.xml").build();
} catch (IllegalArgumentException e) {
clientConfig = new ClientConfig();
}
final HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
ClientTestApp clientTestApp = new ClientTestApp(client);
clientTestApp.start(args);
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_examples_ClientTestApp.java
|
63 |
{
@Override
public TransactionState create( Transaction tx )
{
return new NoTransactionState()
{
@Override
@SuppressWarnings("deprecation")
public TxIdGenerator getTxIdGenerator()
{
return TxIdGenerator.DEFAULT;
}
};
}
};
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestXaFramework.java
|
1,190 |
public class OQueryOperatorMinorEquals extends OQueryOperatorEqualityNotNulls {
public OQueryOperatorMinorEquals() {
super("<=", 5, false);
}
@Override
@SuppressWarnings("unchecked")
protected boolean evaluateExpression(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft,
final Object iRight, OCommandContext iContext) {
final Object right = OType.convert(iRight, iLeft.getClass());
if (right == null)
return false;
return ((Comparable<Object>) iLeft).compareTo(right) <= 0;
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
if (iRight == null || iLeft == null)
return OIndexReuseType.NO_INDEX;
return OIndexReuseType.INDEX_METHOD;
}
@Override
public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType,
List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) {
final OIndexDefinition indexDefinition = index.getDefinition();
final OIndexInternal<?> internalIndex = index.getInternal();
if (!internalIndex.canBeUsedInEqualityOperators() || !internalIndex.hasRangeQuerySupport())
return null;
final Object result;
if (indexDefinition.getParamCount() == 1) {
final Object key;
if (indexDefinition instanceof OIndexDefinitionMultiValue)
key = ((OIndexDefinitionMultiValue) indexDefinition).createSingleValue(keyParams.get(0));
else
key = indexDefinition.createValue(keyParams);
if (key == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(null, false, key, true, fetchLimit);
else if (resultListener != null) {
index.getValuesMinor(key, true, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesMinor(key, true);
} else {
// if we have situation like "field1 = 1 AND field2 <= 2"
// then we fetch collection which left included boundary is the smallest composite key in the
// index that contains key with value field1=1 and which right not included boundary
// is the biggest composite key in the index that contains key with value field1=1 and field2=2.
final OCompositeIndexDefinition compositeIndexDefinition = (OCompositeIndexDefinition) indexDefinition;
final Object keyOne = compositeIndexDefinition.createSingleValue(keyParams.subList(0, keyParams.size() - 1));
if (keyOne == null)
return null;
final Object keyTwo = compositeIndexDefinition.createSingleValue(keyParams);
if (keyTwo == null)
return null;
if (INDEX_OPERATION_TYPE.COUNT.equals(iOperationType))
result = index.count(keyOne, true, keyTwo, true, fetchLimit);
else if (resultListener != null) {
index.getValuesBetween(keyOne, true, keyTwo, true, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesBetween(keyOne, true, keyTwo, true);
}
updateProfiler(iContext, index, keyParams, indexDefinition);
return result;
}
@Override
public ORID getBeginRidRange(Object iLeft, Object iRight) {
return null;
}
@Override
public ORID getEndRidRange(final Object iLeft, final Object iRight) {
if (iLeft instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot()))
if (iRight instanceof ORID)
return (ORID) iRight;
else {
if (iRight instanceof OSQLFilterItemParameter && ((OSQLFilterItemParameter) iRight).getValue(null, null) instanceof ORID)
return (ORID) ((OSQLFilterItemParameter) iRight).getValue(null, null);
}
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorMinorEquals.java
|
44 |
abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
Node<K,V>[] tab; // same as Traverser
Node<K,V> next;
int index;
int baseIndex;
int baseLimit;
final int baseSize;
int batch; // split control
BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
super(par);
this.batch = b;
this.index = this.baseIndex = i;
if ((this.tab = t) == null)
this.baseSize = this.baseLimit = 0;
else if (par == null)
this.baseSize = this.baseLimit = t.length;
else {
this.baseLimit = f;
this.baseSize = par.baseSize;
}
}
/**
* Same as Traverser version
*/
final Node<K,V> advance() {
Node<K,V> e;
if ((e = next) != null)
e = e.next;
for (;;) {
Node<K,V>[] t; int i, n; K ek; // must use locals in checks
if (e != null)
return next = e;
if (baseIndex >= baseLimit || (t = tab) == null ||
(n = t.length) <= (i = index) || i < 0)
return next = null;
if ((e = tabAt(t, index)) != null && e.hash < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
e = null;
continue;
}
else if (e instanceof TreeBin)
e = ((TreeBin<K,V>)e).first;
else
e = null;
}
if ((index += baseSize) >= n)
index = ++baseIndex; // visit upper slots if present
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
320 |
public class OStorageEHClusterConfiguration implements OStorageClusterConfiguration, Serializable {
public transient OStorageConfiguration root;
public int id;
public String name;
public String location;
public int dataSegmentId;
public OStorageEHClusterConfiguration(OStorageConfiguration root, int id, String name, String location, int dataSegmentId) {
this.root = root;
this.id = id;
this.name = name;
this.location = location;
this.dataSegmentId = dataSegmentId;
}
@Override
public int getId() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public String getLocation() {
return location;
}
@Override
public int getDataSegmentId() {
return dataSegmentId;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_config_OStorageEHClusterConfiguration.java
|
3,865 |
public class IdsFilterParser implements FilterParser {
public static final String NAME = "ids";
@Inject
public IdsFilterParser() {
}
@Override
public String[] names() {
return new String[]{NAME};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
List<BytesRef> ids = new ArrayList<BytesRef>();
Collection<String> types = null;
String filterName = null;
String currentFieldName = null;
XContentParser.Token token;
boolean idsProvided = false;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("values".equals(currentFieldName)) {
idsProvided = true;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
BytesRef value = parser.bytesOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
}
ids.add(value);
}
} else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) {
types = new ArrayList<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String value = parser.textOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No type specified for term filter");
}
types.add(value);
}
} else {
throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) {
types = ImmutableList.of(parser.text());
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]");
}
}
}
if (!idsProvided) {
throw new QueryParsingException(parseContext.index(), "[ids] filter requires providing a values element");
}
if (ids.isEmpty()) {
return Queries.MATCH_NO_FILTER;
}
if (types == null || types.isEmpty()) {
types = parseContext.queryTypes();
} else if (types.size() == 1 && Iterables.getFirst(types, null).equals("_all")) {
types = parseContext.mapperService().types();
}
TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
if (filterName != null) {
parseContext.addNamedFilter(filterName, filter);
}
return filter;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_IdsFilterParser.java
|
71 |
public class AuthenticationException extends HazelcastException {
public AuthenticationException() {
super("Wrong group name and password.");
}
public AuthenticationException(String message) {
super(message);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_AuthenticationException.java
|
595 |
public class PlotConstants {
/*
* Default Plot Properties.
*/
public static final int DEFAULT_NUMBER_OF_SUBPLOTS = 1;
public static final boolean LOCAL_CONTROLS_ENABLED_BY_DEFAULT = true;
public static final YAxisMaximumLocationSetting DEFAULT_Y_AXIS_MAX_LOCATION_SETTING = YAxisMaximumLocationSetting.MAXIMUM_AT_TOP;
public static final NonTimeAxisSubsequentBoundsSetting DEFAULT_NON_TIME_AXIS_MIN_SUBSEQUENT_SETTING = NonTimeAxisSubsequentBoundsSetting.SEMI_FIXED;
public static final NonTimeAxisSubsequentBoundsSetting DEFAULT_NON_TIME_AXIS_MAX_SUBSEQUENT_SETTING = NonTimeAxisSubsequentBoundsSetting.SEMI_FIXED;
public static final int MILLISECONDS_IN_SECOND = 1000;
public static final int MILLISECONDS_IN_MIN = MILLISECONDS_IN_SECOND * 60;
public static final int MILLISECONDS_IN_HOUR = MILLISECONDS_IN_MIN * 60;
public static final int MILLISECONDS_IN_DAY = MILLISECONDS_IN_HOUR * 24;
public static final int DEFAUlT_PLOT_SPAN = 30 * 60 * 1000; // 30 mins in Milliseconds
public static final Color ROLL_OVER_PLOT_LINE_COLOR = Color.white;
public static final int DEFAULT_TIME_AXIS_FONT_SIZE = 10;
public static final Font DEFAULT_TIME_AXIS_FONT = new Font("Arial", Font.PLAIN, DEFAULT_TIME_AXIS_FONT_SIZE);
public static final int DEFAULT_PLOTLINE_THICKNESS = 1;
public static final int SELECTED_LINE_THICKNESS = 2;
public static final Color DEFAULT_PLOT_FRAME_BACKGROUND_COLOR = new Color(51, 51, 51);
public static final Color DEFAULT_PLOT_AREA_BACKGROUND_COLOR = Color.black;
public static final int DEFAULT_TIME_AXIS_INTERCEPT = 0;
public static final Color DEFAULT_TIME_AXIS_COLOR = Color.white;
public static final Color DEFAULT_TIME_AXIS_LABEL_COLOR = Color.white;
public static final Color DEFAULT_NON_TIME_AXIS_COLOR= Color.white;
public static final Color DEFAULT_GRID_LINE_COLOR = Color.LIGHT_GRAY;
public static final int DEFAULT_MIN_SAMPLES_FOR_AUTO_SCALE = 0;
public static final double DEFAULT_TIME_AXIS_PADDING = 0.25;
public static final double DEFAULT_TIME_AXIS_PADDING_JUMP_MIN = 0.05;
public static final double DEFAULT_TIME_AXIS_PADDING_JUMP_MAX = 0.25;
public static final double DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MIN = 0.20;
public static final double DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MAX = 0.25;
public static final double DEFAULT_NON_TIME_AXIS_PADDING_MAX = 0.05;
public static final double DEFAULT_NON_TIME_AXIS_PADDING_MIN = 0.05;
public static final double DEFAULT_NON_TIME_AXIS_MIN_VALUE = 0;
public static final double DEFAULT_NON_TIME_AXIS_MAX_VALUE = 1;
public static final long DEFAULT_TIME_AXIS_MIN_VALUE = new GregorianCalendar().getTimeInMillis();
public static final long DEFAULT_TIME_AXIS_MAX_VALUE= DEFAULT_TIME_AXIS_MIN_VALUE + DEFAUlT_PLOT_SPAN;
public static final int MAX_NUMBER_OF_DATA_ITEMS_ON_A_PLOT = 30;
public static final int MAX_NUMBER_SUBPLOTS = 10;
public static final PlotLineDrawingFlags DEFAULT_PLOT_LINE_DRAW = new PlotLineDrawingFlags(true, false);
public static final int MAJOR_TICK_MARK_LENGTH = 3;
public static final int MINOR_TICK_MARK_LENGTH = 1;
public static final String GMT = "GMT";
public static final String DEFAULT_TIME_ZONE = GMT;
public static final String DEFAULT_TIME_AXIS_DATA_FORMAT = "DDD/HH:mm:ss"; // add a z to see the time zone.
// Field names for persistence
public static final String TIME_AXIS_SETTING = "PlotTimeAxisSetting";
public static final String X_AXIS_MAXIMUM_LOCATION_SETTING = "PlotXAxisMaximumLocation";
public static final String Y_AXIS_MAXIMUM_LOCATION_SETTING = "PlotYAxisMaximumLocation";
public static final String TIME_AXIS_SUBSEQUENT_SETTING = "PlotTimeAxisSubsequentSetting";
public static final String NON_TIME_AXIS_SUBSEQUENT_MIN_SETTING = "PlotNonTimeAxisSubsequentMinSetting";
public static final String NON_TIME_AXIS_SUBSEQUENT_MAX_SETTING = "PlotNonTimeAxisSubsequentMaxSetting";
public static final String NON_TIME_MAX = "NonTimeMax";
public static final String NON_TIME_MIN = "NonTimeMin";
public static final String TIME_MAX = "TimeMax";
public static final String TIME_MIN = "TimeMin";
public static final String TIME_PADDING = "TimePadding";
public static final String NON_TIME_MIN_PADDING = "NonTimeMinPadding";
public static final String NON_TIME_MAX_PADDING = "NonTimeMaxPadding";
public static final String GROUP_BY_ORDINAL_POSITION = "GroupByOrdinalPosition";
public static final String PIN_TIME_AXIS = "PinTimeAxis";
public static final String DRAW_LINES = "PlotLineDrawLines";
public static final String DRAW_MARKERS = "PlotLineDrawMarkers";
public static final String DRAW_CHARACTERS = "PlotLineDrawCharacters";
public static final String CONNECTION_TYPE = "PlotLineConnectionType";
public static final String COLOR_ASSIGNMENTS = "PlotColorAssignments";
public static final String LINE_SETTINGS = "PlotLineSettings";
// Delay before firing a request for data at a higher resolution on a window.
public final static int RESIZE_TIMER = 200; // in milliseconds.
// Limit button border settings
public static final int ARROW_BUTTON_BORDER_STYLE_TOP = 1;
public static final int ARROW_BUTTON_BORDER_STYLE_LEFT = 0;
public static final int ARROW_BUTTON_BORDER_STYLE_BOTTOM = 0;
public static final int ARROW_BUTTON_BORDER_STYLE_RIGHT = 0;
// The size below which the plot will not go before it starts to truncate the legends.
public static final int MINIMUM_PLOT_WIDTH = 200; //200;
public static final int MINIMUM_PLOT_HEIGHT = 100;
public static final int Y_AXIS_WHEN_NON_TIME_LABEL_WIDTH = 28;
// Legends
public final static Color LEGEND_BACKGROUND_COLOR = DEFAULT_PLOT_FRAME_BACKGROUND_COLOR;
public static final int PLOT_LEGEND_BUFFER = 5;
public static final int PLOT_LEGEND_WIDTH = 120;
public static final int PLOT_MINIMUM_LEGEND_WIDTH = 40;
public static final int PLOT_LEGEND_OFFSET_FROM_LEFT_HAND_SIDE = 0;
public static final String LEGEND_NEWLINE_CHARACTER = "\n";
public static final String LEGEND_ELLIPSES = "...";
public static final int MAXIMUM_LEGEND_TEXT_SIZE = 20; //maximum width of a legend
public static final DecimalFormat DECIMAL_FORMAT = new DecimalFormat("#0.000");
// Sync line
public static final Color TIME_SYNC_LINE_COLOR = Color.orange;
public static final int TIME_SYNC_LINE_WIDTH = 2;
public static final int SYNC_LINE_STYLE = 9; // ChartConstants.LS_DASH_DOT;
public static final int SHIFT_KEY_MASK = InputEvent.SHIFT_MASK;
public static final int ALT_KEY_MASK = InputEvent.ALT_MASK;
public static final int CTL_KEY_MASK = InputEvent.CTRL_MASK;
// Data Cursor
public static final Color DATA_CURSOR_COLOR = new Color(235, 235, 235);//new Color(51, 102, 153);
public static final int SLOPE_LINE_STYLE = 0; // ChartConstants.LS_SOLID;
public static final int SLOPE_LINE_WIDTH = 1;
public static final String SLOPE_UNIT = "/min";
public static final String REGRESSION_LINE = "RegressionLine";
public static final int NUMBER_REGRESSION_POINTS = 20;
public static final int SLOPE_UNIT_DIVIDER_IN_MS = PlotConstants.MILLISECONDS_IN_MIN; // per second.
public final static float dash1[] = {10.0f};
// Data Compression
// Sets the default value for data compression which can be overridden by the client.
public static final boolean COMPRESSION_ENABLED_BY_DEFAULT = true;
public static final int MAXIMUM_PLOT_DATA_BUFFER_SLIZE_REQUEST_SIZE = 12 * MILLISECONDS_IN_HOUR ;
// Panning and zooming controls
public static final double PANNING_NON_TIME_AXIS_PERCENTAGE = 25;
public static final double PANNING_TIME_AXIS_PERCENTAGE = 25;
public static final double ZOOMING_NON_TIME_AXIS_PERCENTAGE = 10;
public static final double ZOOMING_TIME_AXIS_PERCENTAGE = 10;
public static final int zoomingTimeAxisIncrementInMiliseconds = 30 * MILLISECONDS_IN_SECOND;
public static final int zoomingNonTimeAxisIncrement = 10;
public static final int LOCAL_CONTROL_HEIGHT = 25;
public static final int LOCAL_CONTROL_WIDTH = 28;
/**
* Orientation of the time axis.
*/
public enum AxisOrientationSetting {
X_AXIS_AS_TIME, Y_AXIS_AS_TIME
}
public enum AxisBounds {
MAX, MIN
}
public enum XAxisMaximumLocationSetting {
MAXIMUM_AT_RIGHT, MAXIMUM_AT_LEFT
}
public enum YAxisMaximumLocationSetting {
MAXIMUM_AT_TOP, MAXIMUM_AT_BOTTOM
}
/**
* Subsequent modes on the time axis.
*/
public enum TimeAxisSubsequentBoundsSetting {
JUMP, SCRUNCH
}
/**
* Subsequent modes on the non-time axis
*/
public enum NonTimeAxisSubsequentBoundsSetting {
AUTO, FIXED, SEMI_FIXED
}
/**
* State that limit alarms can be in.
*/
public enum LimitAlarmState{
NO_ALARM, ALARM_RAISED, ALARM_OPENED_BY_USER, ALARM_CLOSED_BY_USER
}
/**
* Panning actions
*/
public enum PanDirection {
PAN_LOWER_X_AXIS, PAN_HIGHER_X_AXIS, PAN_LOWER_Y_AXIS, PAN_HIGHER_Y_AXIS;
}
/**
* Zoom actions
*/
public enum ZoomDirection {
ZOOM_IN_HIGH_Y_AXIS, ZOOM_OUT_HIGH_Y_AXIS,
ZOOM_IN_CENTER_Y_AXIS, ZOOM_OUT_CENTER_Y_AXIS,
ZOOM_IN_LOW_Y_AXIS, ZOOM_OUT_LOW_Y_AXIS,
ZOOM_IN_LEFT_X_AXIS, ZOOM_OUT_LEFT_X_AXIS,
ZOOM_IN_CENTER_X_AXIS, ZOOM_OUT_CENTER_X_AXIS,
ZOOM_IN_RIGHT_X_AXIS, ZOOM_OUT_RIGHT_X_AXIS;
}
public enum AxisType {
TIME_IN_JUMP_MODE (DEFAULT_TIME_AXIS_PADDING_JUMP_MIN,
DEFAULT_TIME_AXIS_PADDING_JUMP_MAX),
TIME_IN_SCRUNCH_MODE (DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MIN,
DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MAX),
NON_TIME (DEFAULT_NON_TIME_AXIS_PADDING_MIN,
DEFAULT_NON_TIME_AXIS_PADDING_MAX);
private final double minimumDefaultPadding;
private final double maximumDefaultPadding;
AxisType(double minPadding, double maxPadding) {
this.minimumDefaultPadding = minPadding;
this.maximumDefaultPadding = maxPadding;
}
public double getMinimumDefaultPadding() {
return minimumDefaultPadding;
}
public String getMinimumDefaultPaddingAsText() {
String percentString = NumberFormat.getPercentInstance().format(this.minimumDefaultPadding);
return percentString.substring(0, percentString.length()-1);
}
public double getMaximumDefaultPadding() {
return maximumDefaultPadding;
}
public String getMaximumDefaultPaddingAsText() {
String percentString = NumberFormat.getPercentInstance().format(this.maximumDefaultPadding);
return percentString.substring(0, percentString.length()-1);
}
}
/**
* DISPLAY_ONLY optimizes the plot buffering for displaying multiple plots with the minimum buffer wait.
* Switching to USER_INTERACTION mode deepens and widens the plot buffer to support user interactions such
* as panning and zooming.
*/
public enum PlotDisplayState {
DISPLAY_ONLY, USER_INTERACTION;
}
/**
* Indicates whether we will be drawing plot lines, point markers, or both.
*/
public static class PlotLineDrawingFlags {
private boolean line, markers;
public PlotLineDrawingFlags(boolean line, boolean markers) {
this.line = line;
this.markers = markers;
}
public boolean drawLine() {
return line;
}
public boolean drawMarkers() {
return markers;
}
}
/**
* Indicates how to connect plot point with lines.
*/
public enum PlotLineConnectionType {
DIRECT, STEP_X_THEN_Y
}
/**
* Params for Labeling Algorithm
*/
/**
* The regular expression defining the delimiter pattern between words.
* Words are delimited by a sequence of one or more spaces or underscores.
*/
public static final String WORD_DELIMITERS = "[ _]+";
/**
* The compiled regular expression defining the delimiter pattern between
* words.
*/
public static final Pattern WORD_DELIMITER_PATTERN = Pattern.compile(WORD_DELIMITERS);
/**
* The separator to use when concatenating words together to form labels.
*/
public static final String WORD_SEPARATOR = " ";
/**
* The maximum thickness for a plot line's stroke
*/
public static final int MAX_LINE_THICKNESS = 5;
}
| 1no label
|
fastPlotViews_src_main_java_gov_nasa_arc_mct_fastplot_bridge_PlotConstants.java
|
495 |
public interface SiteDao {
/**
* Finds a site by its id.
* @param id
* @return
*/
public Site retrieve(Long id);
/**
* Finds a site by its domain or domain prefix.
* @param domain
* @param prefix
* @return
*/
public Site retrieveSiteByDomainOrDomainPrefix(String domain, String prefix);
/**
* Persists the site changes.
* @param site
* @return
*/
public Site save(Site site);
/**
* Returns a default site. This method returns null in the out of box implementation of Broadleaf.
* Extend for implementation specific behavior.
*
* @return
*/
public Site retrieveDefaultSite();
/**
* @return a List of all sites in the system
*/
public List<Site> readAllActiveSites();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_site_dao_SiteDao.java
|
1,218 |
public class MockPageCacheRecycler extends PageCacheRecycler {
private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = Maps.newConcurrentMap();
public static void ensureAllPagesAreReleased() {
if (ACQUIRED_PAGES.size() > 0) {
final Throwable t = ACQUIRED_PAGES.entrySet().iterator().next().getValue();
throw new RuntimeException(ACQUIRED_PAGES.size() + " pages have not been released", t);
}
ACQUIRED_PAGES.clear();
}
private final Random random;
@Inject
public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings, threadPool);
final long seed = settings.getAsLong(TestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
random = new Random(seed);
}
private static <T> V<T> wrap(final V<T> v) {
ACQUIRED_PAGES.put(v, new Throwable());
final Thread t = Thread.currentThread();
return new V<T>() {
@Override
public boolean release() throws ElasticsearchException {
if (t != Thread.currentThread()) {
// Releasing from a different thread doesn't break anything but this is bad practice as pages should be acquired
// as late as possible and released as soon as possible in a try/finally fashion
throw new RuntimeException("Page was allocated in " + t + " but released in " + Thread.currentThread());
}
final Throwable t = ACQUIRED_PAGES.remove(v);
if (t == null) {
throw new IllegalStateException("Releasing a page that has not been acquired");
}
return v.release();
}
@Override
public T v() {
return v.v();
}
@Override
public boolean isRecycled() {
return v.isRecycled();
}
};
}
@Override
public V<byte[]> bytePage(boolean clear) {
final V<byte[]> page = super.bytePage(clear);
if (!clear) {
random.nextBytes(page.v());
}
return wrap(page);
}
@Override
public V<int[]> intPage(boolean clear) {
final V<int[]> page = super.intPage(clear);
if (!clear) {
for (int i = 0; i < page.v().length; ++i) {
page.v()[i] = random.nextInt();
}
}
return wrap(page);
}
@Override
public V<long[]> longPage(boolean clear) {
final V<long[]> page = super.longPage(clear);
if (!clear) {
for (int i = 0; i < page.v().length; ++i) {
page.v()[i] = random.nextLong();
}
}
return wrap(page);
}
@Override
public V<double[]> doublePage(boolean clear) {
final V<double[]> page = super.doublePage(clear);
if (!clear) {
for (int i = 0; i < page.v().length; ++i) {
page.v()[i] = random.nextDouble() - 0.5;
}
}
return wrap(page);
}
@Override
public V<Object[]> objectPage() {
return wrap(super.objectPage());
}
}
| 1no label
|
src_test_java_org_elasticsearch_cache_recycler_MockPageCacheRecycler.java
|
649 |
public class StatusExposingServletResponse extends HttpServletResponseWrapper {
private int httpStatus=200;
public StatusExposingServletResponse(HttpServletResponse response) {
super(response);
}
@Override
public void sendError(int sc) throws IOException {
httpStatus = sc;
super.sendError(sc);
}
@Override
public void sendError(int sc, String msg) throws IOException {
httpStatus = sc;
super.sendError(sc, msg);
}
@Override
public void setStatus(int sc) {
httpStatus = sc;
super.setStatus(sc);
}
@Override
public void reset() {
super.reset();
this.httpStatus = SC_OK;
}
@Override
public void setStatus(int status, String string) {
super.setStatus(status, string);
this.httpStatus = status;
}
public int getStatus() {
return httpStatus;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_web_util_StatusExposingServletResponse.java
|
5,470 |
public static Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = new Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>>() {
@Override
public int compare(AtomicArray.Entry<? extends QuerySearchResultProvider> o1, AtomicArray.Entry<? extends QuerySearchResultProvider> o2) {
int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index());
if (i == 0) {
i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId();
}
return i;
}
};
| 1no label
|
src_main_java_org_elasticsearch_search_controller_SearchPhaseController.java
|
516 |
public interface TimeSource {
long timeInMillis();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_time_TimeSource.java
|
793 |
public class PercolateRequest extends BroadcastOperationRequest<PercolateRequest> {
public static final XContentType contentType = Requests.CONTENT_TYPE;
private String documentType;
private String routing;
private String preference;
private GetRequest getRequest;
private boolean onlyCount;
private BytesReference source;
private boolean unsafe;
private BytesReference docSource;
// Used internally in order to compute tookInMillis, TransportBroadcastOperationAction itself doesn't allow
// to hold it temporarily in an easy way
long startTime;
public PercolateRequest() {
}
public PercolateRequest(PercolateRequest request, BytesReference docSource) {
super(request.indices());
operationThreading(request.operationThreading());
this.documentType = request.documentType();
this.routing = request.routing();
this.preference = request.preference();
this.source = request.source;
this.docSource = docSource;
this.onlyCount = request.onlyCount;
this.startTime = request.startTime;
}
public String documentType() {
return documentType;
}
public void documentType(String type) {
this.documentType = type;
}
public String routing() {
return routing;
}
public PercolateRequest routing(String routing) {
this.routing = routing;
return this;
}
public String preference() {
return preference;
}
public PercolateRequest preference(String preference) {
this.preference = preference;
return this;
}
public GetRequest getRequest() {
return getRequest;
}
public void getRequest(GetRequest getRequest) {
this.getRequest = getRequest;
}
/**
* Before we fork on a local thread, make sure we copy over the bytes if they are unsafe
*/
@Override
public void beforeLocalFork() {
if (unsafe) {
source = source.copyBytesArray();
unsafe = false;
}
}
public BytesReference source() {
return source;
}
public PercolateRequest source(Map document) throws ElasticsearchGenerationException {
return source(document, contentType);
}
public PercolateRequest source(Map document, XContentType contentType) throws ElasticsearchGenerationException {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.map(document);
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + document + "]", e);
}
}
public PercolateRequest source(String document) {
this.source = new BytesArray(document);
this.unsafe = false;
return this;
}
public PercolateRequest source(XContentBuilder documentBuilder) {
source = documentBuilder.bytes();
unsafe = false;
return this;
}
public PercolateRequest source(byte[] document) {
return source(document, 0, document.length);
}
public PercolateRequest source(byte[] source, int offset, int length) {
return source(source, offset, length, false);
}
public PercolateRequest source(byte[] source, int offset, int length, boolean unsafe) {
return source(new BytesArray(source, offset, length), unsafe);
}
public PercolateRequest source(BytesReference source, boolean unsafe) {
this.source = source;
this.unsafe = unsafe;
return this;
}
public PercolateRequest source(PercolateSourceBuilder sourceBuilder) {
this.source = sourceBuilder.buildAsBytes(contentType);
this.unsafe = false;
return this;
}
public boolean onlyCount() {
return onlyCount;
}
public void onlyCount(boolean onlyCount) {
this.onlyCount = onlyCount;
}
BytesReference docSource() {
return docSource;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (documentType == null) {
validationException = addValidationError("type is missing", validationException);
}
if (source == null && getRequest == null) {
validationException = addValidationError("source or get is missing", validationException);
}
if (getRequest != null && getRequest.fields() != null) {
validationException = addValidationError("get fields option isn't supported via percolate request", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
startTime = in.readVLong();
documentType = in.readString();
routing = in.readOptionalString();
preference = in.readOptionalString();
unsafe = false;
source = in.readBytesReference();
docSource = in.readBytesReference();
if (in.readBoolean()) {
getRequest = new GetRequest(null);
getRequest.readFrom(in);
}
onlyCount = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVLong(startTime);
out.writeString(documentType);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeBytesReference(source);
out.writeBytesReference(docSource);
if (getRequest != null) {
out.writeBoolean(true);
getRequest.writeTo(out);
} else {
out.writeBoolean(false);
}
out.writeBoolean(onlyCount);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_percolate_PercolateRequest.java
|
528 |
class ShardFlushRequest extends BroadcastShardOperationRequest {
private boolean full;
private boolean force;
ShardFlushRequest() {
}
public ShardFlushRequest(String index, int shardId, FlushRequest request) {
super(index, shardId, request);
this.full = request.full();
this.force = request.force();
}
public boolean full() {
return this.full;
}
public boolean force() {
return this.force;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
full = in.readBoolean();
force = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(full);
out.writeBoolean(force);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_flush_ShardFlushRequest.java
|
1,091 |
public final class ODefaultSQLFunctionFactory implements OSQLFunctionFactory {
private static final Map<String, Object> FUNCTIONS = new HashMap<String, Object>();
static {
// MISC FUNCTIONS
FUNCTIONS.put(OSQLFunctionCoalesce.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionCoalesce());
FUNCTIONS.put(OSQLFunctionIf.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionIf());
FUNCTIONS.put(OSQLFunctionIfNull.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionIfNull());
FUNCTIONS.put(OSQLFunctionFormat.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionFormat());
FUNCTIONS.put(OSQLFunctionDate.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDate.class);
FUNCTIONS.put(OSQLFunctionSysdate.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionSysdate.class);
FUNCTIONS.put(OSQLFunctionCount.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionCount.class);
FUNCTIONS.put(OSQLFunctionDocument.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDocument.class);
FUNCTIONS.put(OSQLFunctionDistinct.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDistinct.class);
FUNCTIONS.put(OSQLFunctionUnion.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionUnion.class);
FUNCTIONS.put(OSQLFunctionIntersect.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionIntersect.class);
FUNCTIONS.put(OSQLFunctionDifference.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionDifference.class);
FUNCTIONS.put(OSQLFunctionFirst.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionFirst.class);
FUNCTIONS.put(OSQLFunctionLast.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionLast.class);
FUNCTIONS.put(OSQLFunctionList.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionList.class);
FUNCTIONS.put(OSQLFunctionSet.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionSet.class);
FUNCTIONS.put(OSQLFunctionMap.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionMap.class);
FUNCTIONS.put(OSQLFunctionEncode.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionEncode());
FUNCTIONS.put(OSQLFunctionDecode.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionDecode());
// MATH FUNCTIONS
FUNCTIONS.put(OSQLFunctionMin.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionMin.class);
FUNCTIONS.put(OSQLFunctionMax.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionMax.class);
FUNCTIONS.put(OSQLFunctionSum.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionSum.class);
FUNCTIONS.put(OSQLFunctionAverage.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionAverage.class);
FUNCTIONS.put(OSQLFunctionEval.NAME.toUpperCase(Locale.ENGLISH), OSQLFunctionEval.class);
// GEO FUNCTIONS
FUNCTIONS.put(OSQLFunctionDistance.NAME.toUpperCase(Locale.ENGLISH), new OSQLFunctionDistance());
}
@Override
public Set<String> getFunctionNames() {
return FUNCTIONS.keySet();
}
@Override
public boolean hasFunction(final String name) {
return FUNCTIONS.containsKey(name);
}
@Override
public OSQLFunction createFunction(final String name) {
final Object obj = FUNCTIONS.get(name);
if (obj == null)
throw new OCommandExecutionException("Unknowned function name :" + name);
if (obj instanceof OSQLFunction)
return (OSQLFunction) obj;
else {
// it's a class
final Class<?> clazz = (Class<?>) obj;
try {
return (OSQLFunction) clazz.newInstance();
} catch (Exception e) {
throw new OCommandExecutionException("Error in creation of function " + name
+ "(). Probably there is not an empty constructor or the constructor generates errors", e);
}
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_ODefaultSQLFunctionFactory.java
|
57 |
public class HttpPostCommand extends HttpCommand {
boolean nextLine;
boolean readyToReadData;
private ByteBuffer data;
private ByteBuffer line = ByteBuffer.allocate(500);
private String contentType;
private final SocketTextReader socketTextRequestReader;
private boolean chunked;
public HttpPostCommand(SocketTextReader socketTextRequestReader, String uri) {
super(TextCommandType.HTTP_POST, uri);
this.socketTextRequestReader = socketTextRequestReader;
}
/**
* POST /path HTTP/1.0
* User-Agent: HTTPTool/1.0
* Content-TextCommandType: application/x-www-form-urlencoded
* Content-Length: 45
* <next_line>
* <next_line>
* byte[45]
* <next_line>
*
* @param cb
* @return
*/
public boolean readFrom(ByteBuffer cb) {
boolean complete = doActualRead(cb);
while (!complete && readyToReadData && chunked && cb.hasRemaining()) {
complete = doActualRead(cb);
}
if (complete) {
if (data != null) {
data.flip();
}
}
return complete;
}
public byte[] getData() {
if (data == null) {
return null;
} else {
return data.array();
}
}
public byte[] getContentType() {
if (contentType == null) {
return null;
} else {
return stringToBytes(contentType);
}
}
public boolean doActualRead(ByteBuffer cb) {
if (readyToReadData) {
if (chunked && (data == null || !data.hasRemaining())) {
boolean hasLine = readLine(cb);
String lineStr = null;
if (hasLine) {
lineStr = toStringAndClear(line).trim();
}
if (hasLine) {
// hex string
int dataSize = lineStr.length() == 0 ? 0 : Integer.parseInt(lineStr, 16);
if (dataSize == 0) {
return true;
}
if (data != null) {
ByteBuffer newData = ByteBuffer.allocate(data.capacity() + dataSize);
newData.put(data.array());
data = newData;
} else {
data = ByteBuffer.allocate(dataSize);
}
}
}
IOUtil.copyToHeapBuffer(cb, data);
}
while (!readyToReadData && cb.hasRemaining()) {
byte b = cb.get();
char c = (char) b;
if (c == '\n') {
processLine(toStringAndClear(line).toLowerCase());
if (nextLine) {
readyToReadData = true;
}
nextLine = true;
} else if (c != '\r') {
nextLine = false;
line.put(b);
}
}
return !chunked && ((data != null) && !data.hasRemaining());
}
String toStringAndClear(ByteBuffer bb) {
if (bb == null) {
return "";
}
String result;
if (bb.position() == 0) {
result = "";
} else {
result = StringUtil.bytesToString(bb.array(), 0, bb.position());
}
bb.clear();
return result;
}
boolean readLine(ByteBuffer cb) {
while (cb.hasRemaining()) {
byte b = cb.get();
char c = (char) b;
if (c == '\n') {
return true;
} else if (c != '\r') {
line.put(b);
}
}
return false;
}
private void processLine(String currentLine) {
if (contentType == null && currentLine.startsWith(HEADER_CONTENT_TYPE)) {
contentType = currentLine.substring(currentLine.indexOf(' ') + 1);
} else if (data == null && currentLine.startsWith(HEADER_CONTENT_LENGTH)) {
data = ByteBuffer.allocate(Integer.parseInt(currentLine.substring(currentLine.indexOf(' ') + 1)));
} else if (!chunked && currentLine.startsWith(HEADER_CHUNKED)) {
chunked = true;
} else if (currentLine.startsWith(HEADER_EXPECT_100)) {
socketTextRequestReader.sendResponse(new NoOpCommand(RES_100));
}
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_rest_HttpPostCommand.java
|
124 |
public interface PageRuleProcessor {
/**
* Returns true if the passed in <code>Page</code> is valid according
* to this rule processor.
*
* @param page
* @return
*/
public boolean checkForMatch(PageDTO page, Map<String,Object> valueMap);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_service_PageRuleProcessor.java
|
335 |
new Thread() {
public void run() {
if (!map.tryLock("key1")) {
latch.countDown();
}
try {
if (map.tryLock("key1", 5, TimeUnit.SECONDS)) {
latch.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
268 |
public class ElasticsearchIllegalArgumentException extends ElasticsearchException {
public ElasticsearchIllegalArgumentException() {
super(null);
}
public ElasticsearchIllegalArgumentException(String msg) {
super(msg);
}
public ElasticsearchIllegalArgumentException(String msg, Throwable cause) {
super(msg, cause);
}
@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
}
}
| 0true
|
src_main_java_org_elasticsearch_ElasticsearchIllegalArgumentException.java
|
826 |
public class SetBackupOperation extends AtomicLongBaseOperation implements BackupOperation {
private long newValue;
public SetBackupOperation() {
}
public SetBackupOperation(String name, long newValue) {
super(name);
this.newValue = newValue;
}
@Override
public void run() throws Exception {
LongWrapper number = getNumber();
number.set(newValue);
}
@Override
public int getId() {
return AtomicLongDataSerializerHook.SET_BACKUP;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(newValue);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
newValue = in.readLong();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_operations_SetBackupOperation.java
|
3,451 |
public class LocalIndexShardGateway extends AbstractIndexShardComponent implements IndexShardGateway {
private final ThreadPool threadPool;
private final InternalIndexShard indexShard;
private final RecoveryStatus recoveryStatus = new RecoveryStatus();
private volatile ScheduledFuture flushScheduler;
private final TimeValue syncInterval;
@Inject
public LocalIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, IndexShard indexShard) {
super(shardId, indexSettings);
this.threadPool = threadPool;
this.indexShard = (InternalIndexShard) indexShard;
syncInterval = componentSettings.getAsTime("sync", TimeValue.timeValueSeconds(5));
if (syncInterval.millis() > 0) {
this.indexShard.translog().syncOnEachOperation(false);
flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, new Sync());
} else if (syncInterval.millis() == 0) {
flushScheduler = null;
this.indexShard.translog().syncOnEachOperation(true);
} else {
flushScheduler = null;
}
}
@Override
public String toString() {
return "local";
}
@Override
public RecoveryStatus recoveryStatus() {
return recoveryStatus;
}
@Override
public void recover(boolean indexShouldExists, RecoveryStatus recoveryStatus) throws IndexShardGatewayRecoveryException {
recoveryStatus.index().startTime(System.currentTimeMillis());
recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
long version = -1;
long translogId = -1;
try {
SegmentInfos si = null;
try {
si = Lucene.readSegmentInfos(indexShard.store().directory());
} catch (Throwable e) {
String files = "_unknown_";
try {
files = Arrays.toString(indexShard.store().directory().listAll());
} catch (Throwable e1) {
files += " (failure=" + ExceptionsHelper.detailedMessage(e1) + ")";
}
if (indexShouldExists && indexShard.store().indexStore().persistent()) {
throw new IndexShardGatewayRecoveryException(shardId(), "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
}
}
if (si != null) {
if (indexShouldExists) {
version = si.getVersion();
if (si.getUserData().containsKey(Translog.TRANSLOG_ID_KEY)) {
translogId = Long.parseLong(si.getUserData().get(Translog.TRANSLOG_ID_KEY));
} else {
translogId = version;
}
logger.trace("using existing shard data, translog id [{}]", translogId);
} else {
// it exists on the directory, but shouldn't exist on the FS, its a leftover (possibly dangling)
// its a "new index create" API, we have to do something, so better to clean it than use same data
logger.trace("cleaning existing shard, shouldn't exists");
IndexWriter writer = new IndexWriter(indexShard.store().directory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
writer.close();
}
}
} catch (Throwable e) {
throw new IndexShardGatewayRecoveryException(shardId(), "failed to fetch index version after copying it over", e);
}
recoveryStatus.index().updateVersion(version);
recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
// since we recover from local, just fill the files and size
try {
int numberOfFiles = 0;
long totalSizeInBytes = 0;
for (String name : indexShard.store().directory().listAll()) {
numberOfFiles++;
totalSizeInBytes += indexShard.store().directory().fileLength(name);
}
recoveryStatus.index().files(numberOfFiles, totalSizeInBytes, numberOfFiles, totalSizeInBytes);
} catch (Exception e) {
// ignore
}
recoveryStatus.start().startTime(System.currentTimeMillis());
recoveryStatus.updateStage(RecoveryStatus.Stage.START);
if (translogId == -1) {
// no translog files, bail
indexShard.postRecovery("post recovery from gateway, no translog");
// no index, just start the shard and bail
recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
return;
}
// move an existing translog, if exists, to "recovering" state, and start reading from it
FsTranslog translog = (FsTranslog) indexShard.translog();
String translogName = "translog-" + translogId;
String recoverTranslogName = translogName + ".recovering";
File recoveringTranslogFile = null;
for (File translogLocation : translog.locations()) {
File tmpRecoveringFile = new File(translogLocation, recoverTranslogName);
if (!tmpRecoveringFile.exists()) {
File tmpTranslogFile = new File(translogLocation, translogName);
if (tmpTranslogFile.exists()) {
for (int i = 0; i < 3; i++) {
if (tmpTranslogFile.renameTo(tmpRecoveringFile)) {
recoveringTranslogFile = tmpRecoveringFile;
break;
}
}
}
} else {
recoveringTranslogFile = tmpRecoveringFile;
break;
}
}
if (recoveringTranslogFile == null || !recoveringTranslogFile.exists()) {
// no translog to recovery from, start and bail
// no translog files, bail
indexShard.postRecovery("post recovery from gateway, no translog");
// no index, just start the shard and bail
recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
return;
}
// recover from the translog file
indexShard.performRecoveryPrepareForTranslog();
recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
recoveryStatus.translog().startTime(System.currentTimeMillis());
recoveryStatus.updateStage(RecoveryStatus.Stage.TRANSLOG);
FileInputStream fs = null;
try {
fs = new FileInputStream(recoveringTranslogFile);
InputStreamStreamInput si = new InputStreamStreamInput(fs);
while (true) {
Translog.Operation operation;
try {
int opSize = si.readInt();
operation = TranslogStreams.readTranslogOperation(si);
} catch (EOFException e) {
// ignore, not properly written the last op
break;
} catch (IOException e) {
// ignore, not properly written last op
break;
}
try {
indexShard.performRecoveryOperation(operation);
recoveryStatus.translog().addTranslogOperations(1);
} catch (ElasticsearchException e) {
if (e.status() == RestStatus.BAD_REQUEST) {
// mainly for MapperParsingException and Failure to detect xcontent
logger.info("ignoring recovery of a corrupt translog entry", e);
} else {
throw e;
}
}
}
} catch (Throwable e) {
// we failed to recovery, make sure to delete the translog file (and keep the recovering one)
indexShard.translog().closeWithDelete();
throw new IndexShardGatewayRecoveryException(shardId, "failed to recover shard", e);
} finally {
try {
fs.close();
} catch (IOException e) {
// ignore
}
}
indexShard.performRecoveryFinalization(true);
recoveringTranslogFile.delete();
recoveryStatus.translog().time(System.currentTimeMillis() - recoveryStatus.translog().startTime());
}
@Override
public String type() {
return "local";
}
@Override
public SnapshotStatus snapshot(Snapshot snapshot) {
return null;
}
@Override
public SnapshotStatus lastSnapshotStatus() {
return null;
}
@Override
public SnapshotStatus currentSnapshotStatus() {
return null;
}
@Override
public boolean requiresSnapshot() {
return false;
}
@Override
public boolean requiresSnapshotScheduling() {
return false;
}
@Override
public void close() {
if (flushScheduler != null) {
flushScheduler.cancel(false);
}
}
@Override
public SnapshotLock obtainSnapshotLock() throws Exception {
return NO_SNAPSHOT_LOCK;
}
class Sync implements Runnable {
@Override
public void run() {
// don't re-schedule if its closed..., we are done
if (indexShard.state() == IndexShardState.CLOSED) {
return;
}
if (indexShard.state() == IndexShardState.STARTED && indexShard.translog().syncNeeded()) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
@Override
public void run() {
try {
indexShard.translog().sync();
} catch (Exception e) {
if (indexShard.state() == IndexShardState.STARTED) {
logger.warn("failed to sync translog", e);
}
}
if (indexShard.state() != IndexShardState.CLOSED) {
flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this);
}
}
});
} else {
flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_gateway_local_LocalIndexShardGateway.java
|
669 |
constructors[COLLECTION_TXN_ADD_BACKUP] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionTxnAddBackupOperation();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
|
68 |
static class NodeHolder {
Node node;
public NodeHolder(Node node) {
this.node = node;
}
public NodeHolder getSub(String name) {
if (node != null) {
for (org.w3c.dom.Node node : new AbstractXmlConfigHelper.IterableNodeList(this.node.getChildNodes())) {
String nodeName = cleanNodeName(node.getNodeName());
if (name.equals(nodeName)) {
return new NodeHolder(node);
}
}
}
return new NodeHolder(null);
}
public List<NodeHolder> getSubNodes(String name) {
List<NodeHolder> list = new ArrayList<NodeHolder>();
if (node != null) {
for (org.w3c.dom.Node node : new AbstractXmlConfigHelper.IterableNodeList(this.node.getChildNodes())) {
String nodeName = cleanNodeName(node.getNodeName());
if (name.equals(nodeName)) {
list.add(new NodeHolder(node));
}
}
}
return list;
}
public List<String> getList(String name, AwsConfig awsConfig) {
List<String> list = new ArrayList<String>();
if (node == null) return list;
for (org.w3c.dom.Node node : new AbstractXmlConfigHelper.IterableNodeList(this.node.getChildNodes())) {
String nodeName = cleanNodeName(node.getNodeName());
if (!"item".equals(nodeName)) continue;
final NodeHolder nodeHolder = new NodeHolder(node);
final String state = getState(nodeHolder);
final String ip = getIp(name, nodeHolder);
final String instanceName = getInstanceName(nodeHolder);
if (ip != null) {
if (!acceptState(state)) {
logger.finest(format("Ignoring EC2 instance [%s][%s] reason: the instance is not running but %s", instanceName, ip, state));
} else if (!acceptTag(awsConfig, node)) {
logger.finest(format("Ignoring EC2 instance [%s][%s] reason: tag-key/tag-value don't match", instanceName, ip));
} else if (!acceptGroupName(awsConfig, node)) {
logger.finest(format("Ignoring EC2 instance [%s][%s] reason: security-group-name doesn't match", instanceName, ip));
} else {
list.add(ip);
logger.finest(format("Accepting EC2 instance [%s][%s]",instanceName, ip));
}
}
}
return list;
}
private boolean acceptState(String state) {
return "running".equals(state);
}
private static String getState(NodeHolder nodeHolder) {
final NodeHolder instancestate = nodeHolder.getSub("instancestate");
return instancestate.getSub("name").getNode().getFirstChild().getNodeValue();
}
private static String getInstanceName(NodeHolder nodeHolder) {
final NodeHolder tagSetNode = nodeHolder.getSub("tagset");
if (tagSetNode.getNode() == null) {
return null;
}
final NodeList childNodes = tagSetNode.getNode().getChildNodes();
for (int k = 0; k < childNodes.getLength(); k++) {
Node item = childNodes.item(k);
if (!item.getNodeName().equals("item")) continue;
NodeHolder itemHolder = new NodeHolder(item);
final Node keyNode = itemHolder.getSub("key").getNode();
if (keyNode == null || keyNode.getFirstChild() == null) continue;
final String nodeValue = keyNode.getFirstChild().getNodeValue();
if (!"Name".equals(nodeValue)) continue;
final Node valueNode = itemHolder.getSub("value").getNode();
if (valueNode == null || valueNode.getFirstChild() == null) continue;
return valueNode.getFirstChild().getNodeValue();
}
return null;
}
private static String getIp(String name, NodeHolder nodeHolder) {
final Node node1 = nodeHolder.getSub(name).getNode();
return node1 == null ? null : node1.getFirstChild().getNodeValue();
}
private boolean acceptTag(AwsConfig awsConfig, Node node) {
return applyTagFilter(node, awsConfig.getTagKey(), awsConfig.getTagValue());
}
private boolean acceptGroupName(AwsConfig awsConfig, Node node) {
return applyFilter(node, awsConfig.getSecurityGroupName(), "groupset", "groupname");
}
private boolean applyFilter(Node node, String filter, String set, String filterField) {
if (nullOrEmpty(filter)) {
return true;
} else {
for (NodeHolder group : new NodeHolder(node).getSub(set).getSubNodes("item")) {
NodeHolder nh = group.getSub(filterField);
if (nh != null && nh.getNode().getFirstChild() != null && filter.equals(nh.getNode().getFirstChild().getNodeValue())) {
return true;
}
}
return false;
}
}
private boolean applyTagFilter(Node node, String keyExpected, String valueExpected) {
if (nullOrEmpty(keyExpected)) {
return true;
} else {
for (NodeHolder group : new NodeHolder(node).getSub("tagset").getSubNodes("item")) {
if (keyEquals(keyExpected, group) &&
(nullOrEmpty(valueExpected) || valueEquals(valueExpected, group))) {
return true;
}
}
return false;
}
}
private boolean valueEquals(String valueExpected, NodeHolder group) {
NodeHolder nhValue = group.getSub("value");
return nhValue != null && nhValue.getNode().getFirstChild() != null && valueExpected.equals(nhValue.getNode().getFirstChild().getNodeValue());
}
private boolean nullOrEmpty(String keyExpected) {
return keyExpected == null || keyExpected.equals("");
}
private boolean keyEquals(String keyExpected, NodeHolder group) {
NodeHolder nhKey = group.getSub("key");
return nhKey != null && nhKey.getNode().getFirstChild() != null && keyExpected.equals(nhKey.getNode().getFirstChild().getNodeValue());
}
public Node getNode() {
return node;
}
}
| 1no label
|
hazelcast-cloud_src_main_java_com_hazelcast_aws_utility_CloudyUtility.java
|
52 |
final class NestedCompletionProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final Declaration dec;
private final int offset;
public NestedCompletionProposal(Declaration dec, int offset) {
super();
this.dec = dec;
this.offset = offset;
}
@Override
public void apply(IDocument document) {
try {
int len = 0;
while (isJavaIdentifierPart(document.getChar(offset+len))) {
len++;
}
document.replace(offset, len, getText(false));
}
catch (BadLocationException e) {
e.printStackTrace();
}
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public String getDisplayString() {
return getText(true);
}
@Override
public Image getImage() {
return getImageForDeclaration(dec);
}
@Override
public IContextInformation getContextInformation() {
return null;
}
private String getText(boolean description) {
StringBuilder sb = new StringBuilder()
.append(dec.getName());
if (dec instanceof Functional) {
appendPositionalArgs(dec, getUnit(),
sb, false, description);
}
return sb.toString();
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int currentOffset,
DocumentEvent event) {
if (event==null) {
return true;
}
else {
try {
String content = document.get(offset,
currentOffset-offset);
String filter = content.trim().toLowerCase();
if ((dec.getName().toLowerCase())
.startsWith(filter)) {
return true;
}
}
catch (BadLocationException e) {
// ignore concurrently modified document
}
return false;
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_RefinementCompletionProposal.java
|
49 |
@Component("blStructuredContentTypeCustomPersistenceHandler")
public class StructuredContentTypeCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
private final Log LOG = LogFactory.getLog(StructuredContentTypeCustomPersistenceHandler.class);
@Resource(name="blStructuredContentService")
protected StructuredContentService structuredContentService;
@Resource(name="blSandBoxService")
protected SandBoxService sandBoxService;
@Resource(name = "blDynamicFieldPersistenceHandlerHelper")
protected DynamicFieldPersistenceHandlerHelper dynamicFieldUtil;
@Override
public Boolean canHandleFetch(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
return
StructuredContentType.class.getName().equals(ceilingEntityFullyQualifiedClassname) &&
persistencePackage.getCustomCriteria() != null &&
persistencePackage.getCustomCriteria().length > 0 &&
persistencePackage.getCustomCriteria()[0].equals("constructForm");
}
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
@Override
public Boolean canHandleInspect(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
@Override
public Boolean canHandleRemove(PersistencePackage persistencePackage) {
return false;
}
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
protected SandBox getSandBox() {
return sandBoxService.retrieveSandboxById(SandBoxContext.getSandBoxContext().getSandBoxId());
}
@Override
public DynamicResultSet inspect(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, InspectHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
String structuredContentTypeId = persistencePackage.getCustomCriteria()[3];
StructuredContentType structuredContentType = structuredContentService.findStructuredContentTypeById(Long.valueOf(structuredContentTypeId));
ClassMetadata metadata = new ClassMetadata();
metadata.setCeilingType(StructuredContentType.class.getName());
ClassTree entities = new ClassTree(StructuredContentTypeImpl.class.getName());
metadata.setPolymorphicEntities(entities);
Property[] properties = dynamicFieldUtil.buildDynamicPropertyList(structuredContentType.getStructuredContentFieldTemplate().getFieldGroups(), StructuredContentType.class);
metadata.setProperties(properties);
DynamicResultSet results = new DynamicResultSet(metadata);
return results;
} catch (Exception e) {
throw new ServiceException("Unable to perform inspect for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
@Override
public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
String structuredContentId = persistencePackage.getCustomCriteria()[1];
Entity entity = fetchEntityBasedOnId(structuredContentId);
DynamicResultSet results = new DynamicResultSet(new Entity[]{entity}, 1);
return results;
} catch (Exception e) {
throw new ServiceException("Unable to perform fetch for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
protected Entity fetchEntityBasedOnId(String structuredContentId) throws Exception {
StructuredContent structuredContent = structuredContentService.findStructuredContentById(Long.valueOf(structuredContentId));
Map<String, StructuredContentField> structuredContentFieldMap = structuredContent.getStructuredContentFields();
Entity entity = new Entity();
entity.setType(new String[]{StructuredContentType.class.getName()});
List<Property> propertiesList = new ArrayList<Property>();
for (FieldGroup fieldGroup : structuredContent.getStructuredContentType().getStructuredContentFieldTemplate().getFieldGroups()) {
for (FieldDefinition definition : fieldGroup.getFieldDefinitions()) {
Property property = new Property();
propertiesList.add(property);
property.setName(definition.getName());
String value = null;
if (!MapUtils.isEmpty(structuredContentFieldMap)) {
StructuredContentField structuredContentField = structuredContentFieldMap.get(definition.getName());
if (structuredContentField != null) {
value = structuredContentField.getValue();
}
}
property.setValue(value);
}
}
Property property = new Property();
propertiesList.add(property);
property.setName("id");
property.setValue(structuredContentId);
entity.setProperties(propertiesList.toArray(new Property[]{}));
return entity;
}
/**
* Invoked when {@link StructuredContent} is saved in order to fill out the dynamic form for the structured content type
*/
@Override
public Entity update(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
return addOrUpdate(persistencePackage, dynamicEntityDao, helper);
}
/**
* Invoked when {@link StructuredContent} is saved in order to fill out the dynamic form for the structured content type
*/
@Override
public Entity add(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
return addOrUpdate(persistencePackage, dynamicEntityDao, helper);
}
protected Entity addOrUpdate(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
try {
String structuredContentId = persistencePackage.getCustomCriteria()[1];
StructuredContent structuredContent = structuredContentService.findStructuredContentById(Long.valueOf(structuredContentId));
Property[] properties = dynamicFieldUtil.buildDynamicPropertyList(structuredContent.getStructuredContentType().getStructuredContentFieldTemplate().getFieldGroups(), StructuredContentType.class);
Map<String, FieldMetadata> md = new HashMap<String, FieldMetadata>();
for (Property property : properties) {
md.put(property.getName(), property.getMetadata());
}
boolean validated = helper.validate(persistencePackage.getEntity(), null, md);
if (!validated) {
throw new ValidationException(persistencePackage.getEntity(), "Structured Content dynamic fields failed validation");
}
List<String> templateFieldNames = new ArrayList<String>(20);
for (FieldGroup group : structuredContent.getStructuredContentType().getStructuredContentFieldTemplate().getFieldGroups()) {
for (FieldDefinition definition: group.getFieldDefinitions()) {
templateFieldNames.add(definition.getName());
}
}
Map<String, StructuredContentField> structuredContentFieldMap = structuredContent.getStructuredContentFields();
for (Property property : persistencePackage.getEntity().getProperties()) {
if (templateFieldNames.contains(property.getName())) {
StructuredContentField structuredContentField = structuredContentFieldMap.get(property.getName());
if (structuredContentField != null) {
structuredContentField.setValue(property.getValue());
} else {
structuredContentField = new StructuredContentFieldImpl();
structuredContentFieldMap.put(property.getName(), structuredContentField);
structuredContentField.setFieldKey(property.getName());
structuredContentField.setStructuredContent(structuredContent);
structuredContentField.setValue(property.getValue());
}
}
}
List<String> removeItems = new ArrayList<String>();
for (String key : structuredContentFieldMap.keySet()) {
if (persistencePackage.getEntity().findProperty(key)==null) {
removeItems.add(key);
}
}
if (removeItems.size() > 0) {
for (String removeKey : removeItems) {
StructuredContentField structuredContentField = structuredContentFieldMap.remove(removeKey);
structuredContentField.setStructuredContent(null);
}
}
structuredContentService.updateStructuredContent(structuredContent, getSandBox());
return fetchEntityBasedOnId(structuredContentId);
} catch (ValidationException e) {
throw e;
} catch (Exception e) {
throw new ServiceException("Unable to perform fetch for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_StructuredContentTypeCustomPersistenceHandler.java
|
138 |
@Test
public class CharSerializerTest {
private static final int FIELD_SIZE = 2;
private static final Character OBJECT = (char) (new Random()).nextInt();
private OCharSerializer charSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
charSerializer = new OCharSerializer();
}
public void testFieldSize() {
Assert.assertEquals(charSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
charSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(charSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
charSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(charSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
charSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(charSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_serialization_types_CharSerializerTest.java
|
2,614 |
private static class FastStringCreator implements UTFEncoderDecoder.StringCreator {
private final Constructor<String> constructor;
private final boolean useOldStringConstructor;
public FastStringCreator(Constructor<String> constructor) {
this.constructor = constructor;
this.useOldStringConstructor = constructor.getParameterTypes().length == 3;
}
@Override
public String buildString(char[] chars) {
try {
if (useOldStringConstructor) {
return constructor.newInstance(0, chars.length, chars);
} else {
return constructor.newInstance(chars, Boolean.TRUE);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_UTFEncoderDecoder.java
|
191 |
public interface TransferQueue<E> extends BlockingQueue<E> {
/**
* Transfers the element to a waiting consumer immediately, if possible.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* otherwise returning {@code false} without enqueuing the element.
*
* @param e the element to transfer
* @return {@code true} if the element was transferred, else
* {@code false}
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
*/
boolean tryTransfer(E e);
/**
* Transfers the element to a consumer, waiting if necessary to do so.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else waits until the element is received by a consumer.
*
* @param e the element to transfer
* @throws InterruptedException if interrupted while waiting,
* in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
*/
void transfer(E e) throws InterruptedException;
/**
* Transfers the element to a consumer if it is possible to do so
* before the timeout elapses.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else waits until the element is received by a consumer,
* returning {@code false} if the specified wait time elapses
* before the element can be transferred.
*
* @param e the element to transfer
* @param timeout how long to wait before giving up, in units of
* {@code unit}
* @param unit a {@code TimeUnit} determining how to interpret the
* {@code timeout} parameter
* @return {@code true} if successful, or {@code false} if
* the specified waiting time elapses before completion,
* in which case the element is not left enqueued
* @throws InterruptedException if interrupted while waiting,
* in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
*/
boolean tryTransfer(E e, long timeout, TimeUnit unit)
throws InterruptedException;
/**
* Returns {@code true} if there is at least one consumer waiting
* to receive an element via {@link #take} or
* timed {@link #poll(long,TimeUnit) poll}.
* The return value represents a momentary state of affairs.
*
* @return {@code true} if there is at least one waiting consumer
*/
boolean hasWaitingConsumer();
/**
* Returns an estimate of the number of consumers waiting to
* receive elements via {@link #take} or timed
* {@link #poll(long,TimeUnit) poll}. The return value is an
* approximation of a momentary state of affairs, that may be
* inaccurate if consumers have completed or given up waiting.
* The value may be useful for monitoring and heuristics, but
* not for synchronization control. Implementations of this
* method are likely to be noticeably slower than those for
* {@link #hasWaitingConsumer}.
*
* @return the number of consumers waiting to receive elements
*/
int getWaitingConsumerCount();
}
| 0true
|
src_main_java_jsr166y_TransferQueue.java
|
644 |
@Component("blAuthenticationFailureRedirectStrategy")
public class BroadleafAuthenticationFailureRedirectStrategy implements RedirectStrategy {
private RedirectStrategy redirectStrategy = new DefaultRedirectStrategy();
@Override
public void sendRedirect(HttpServletRequest request, HttpServletResponse response, String url) throws IOException {
if (BroadleafControllerUtility.isAjaxRequest(request)) {
url = updateUrlForAjax(url);
}
redirectStrategy.sendRedirect(request, response, url);
}
public String updateUrlForAjax(String url) {
String blcAjax = BroadleafControllerUtility.BLC_AJAX_PARAMETER;
if (url != null && url.indexOf("?") > 0) {
url = url + "&" + blcAjax + "=true";
} else {
url = url + "?" + blcAjax + "=true";
}
return url;
}
public RedirectStrategy getRedirectStrategy() {
return redirectStrategy;
}
public void setRedirectStrategy(RedirectStrategy redirectStrategy) {
this.redirectStrategy = redirectStrategy;
}
}
| 0true
|
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_common_web_security_BroadleafAuthenticationFailureRedirectStrategy.java
|
1,011 |
private class ShardTransportHandler extends BaseTransportRequestHandler<ShardSingleOperationRequest> {
@Override
public ShardSingleOperationRequest newInstance() {
return new ShardSingleOperationRequest();
}
@Override
public String executor() {
return executor;
}
@Override
public void messageReceived(final ShardSingleOperationRequest request, final TransportChannel channel) throws Exception {
Response response = shardOperation(request.request(), request.shardId());
channel.sendResponse(response);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_single_custom_TransportSingleCustomOperationAction.java
|
1,491 |
public interface ShardsIterator {
/**
* Resets the iterator to its initial state.
*/
void reset();
/**
* The number of shard routing instances.
*
* @return number of shard routing instances in this iterator
*/
int size();
/**
* The number of active shard routing instances
*
* @return number of active shard routing instances
*/
int sizeActive();
/**
* Returns the number of replicas in this iterator that are not in the
* {@link ShardRoutingState#UNASSIGNED}. The returned double-counts replicas
* that are in the state {@link ShardRoutingState#RELOCATING}
*/
int assignedReplicasIncludingRelocating();
/**
* Returns the next shard, or <tt>null</tt> if none available.
*/
ShardRouting nextOrNull();
/**
* Returns the first shard, or <tt>null</tt>, without
* incrementing the iterator.
*
* @see ShardRouting#assignedToNode()
*/
ShardRouting firstOrNull();
/**
* Return the number of shards remaining in this {@link ShardsIterator}
*
* @return number of shard remaining
*/
int remaining();
@Override
int hashCode();
@Override
boolean equals(Object other);
Iterable<ShardRouting> asUnordered();
}
| 0true
|
src_main_java_org_elasticsearch_cluster_routing_ShardsIterator.java
|
420 |
public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> {
/**
* Constructs new restore snapshot request builder
*
* @param clusterAdminClient cluster admin client
*/
public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest());
}
/**
* Constructs new restore snapshot request builder with specified repository and snapshot names
*
* @param clusterAdminClient cluster admin client
* @param repository reposiory name
* @param name snapshot name
*/
public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String name) {
super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest(repository, name));
}
/**
* Sets snapshot name
*
* @param snapshot snapshot name
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSnapshot(String snapshot) {
request.snapshot(snapshot);
return this;
}
/**
* Sets repository name
*
* @param repository repository name
* @return this builder
*/
public RestoreSnapshotRequestBuilder setRepository(String repository) {
request.repository(repository);
return this;
}
/**
* Sets the list of indices that should be restored from snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
* indices in the snapshot.
*
* @param indices list of indices
* @return this builder
*/
public RestoreSnapshotRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
* @return this request
*/
public RestoreSnapshotRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request.indicesOptions(indicesOptions);
return this;
}
/**
* Sets rename pattern that should be applied to restored indices.
* <p/>
* Indices that match the rename pattern will be renamed according to {@link #setRenameReplacement(String)}. The
* rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)}
* The request will fail if two or more indices will be renamed into the same name.
*
* @param renamePattern rename pattern
* @return this builder
*/
public RestoreSnapshotRequestBuilder setRenamePattern(String renamePattern) {
request.renamePattern(renamePattern);
return this;
}
/**
* Sets rename replacement
* <p/>
* See {@link #setRenamePattern(String)} for more information.
*
* @param renameReplacement rename replacement
* @return
*/
public RestoreSnapshotRequestBuilder setRenameReplacement(String renameReplacement) {
request.renameReplacement(renameReplacement);
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Settings settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Settings.Builder settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific restore settings in JSON, YAML or properties format
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets repository-specific restore settings
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Map<String, Object> source) {
request.settings(source);
return this;
}
/**
* If this parameter is set to true the operation will wait for completion of restore process before returning.
*
* @param waitForCompletion if true the operation will wait for completion
* @return this builder
*/
public RestoreSnapshotRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
request.waitForCompletion(waitForCompletion);
return this;
}
/**
* If set to true the restore procedure will restore global cluster state.
* <p/>
* The global cluster state includes persistent settings and index template definitions.
*
* @param restoreGlobalState true if global state should be restored from the snapshot
* @return this request
*/
public RestoreSnapshotRequestBuilder setRestoreGlobalState(boolean restoreGlobalState) {
request.includeGlobalState(restoreGlobalState);
return this;
}
@Override
protected void doExecute(ActionListener<RestoreSnapshotResponse> listener) {
((ClusterAdminClient) client).restoreSnapshot(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_RestoreSnapshotRequestBuilder.java
|
1,181 |
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
UUID.randomUUID().toString();
}
latch.countDown();
}
});
| 0true
|
src_test_java_org_elasticsearch_benchmark_uuid_SimpleUuidBenchmark.java
|
39 |
@SuppressWarnings("unchecked")
public class OMVRBTreeSet<E> extends AbstractSet<E> implements ONavigableSet<E>, Cloneable, java.io.Serializable {
/**
* The backing map.
*/
private transient ONavigableMap<E, Object> m;
// Dummy value to associate with an Object in the backing Map
private static final Object PRESENT = new Object();
/**
* Constructs a set backed by the specified navigable map.
*/
OMVRBTreeSet(ONavigableMap<E, Object> m) {
this.m = m;
}
/**
* Constructs a new, empty tree set, sorted according to the natural ordering of its elements. All elements inserted into the set
* must implement the {@link Comparable} interface. Furthermore, all such elements must be <i>mutually comparable</i>:
* {@code e1.compareTo(e2)} must not throw a {@code ClassCastException} for any elements {@code e1} and {@code e2} in the set. If
* the user attempts to add an element to the set that violates this constraint (for example, the user attempts to add a string
* element to a set whose elements are integers), the {@code add} call will throw a {@code ClassCastException}.
*/
public OMVRBTreeSet() {
this(new OMVRBTreeMemory<E, Object>());
}
/**
* Constructs a new, empty tree set, sorted according to the specified comparator. All elements inserted into the set must be
* <i>mutually comparable</i> by the specified comparator: {@code comparator.compare(e1, e2)} must not throw a
* {@code ClassCastException} for any elements {@code e1} and {@code e2} in the set. If the user attempts to add an element to the
* set that violates this constraint, the {@code add} call will throw a {@code ClassCastException}.
*
* @param comparator
* the comparator that will be used to order this set. If {@code null}, the {@linkplain Comparable natural ordering} of
* the elements will be used.
*/
public OMVRBTreeSet(Comparator<? super E> comparator) {
this(new OMVRBTreeMemory<E, Object>(comparator));
}
/**
* Constructs a new tree set containing the elements in the specified collection, sorted according to the <i>natural ordering</i>
* of its elements. All elements inserted into the set must implement the {@link Comparable} interface. Furthermore, all such
* elements must be <i>mutually comparable</i>: {@code e1.compareTo(e2)} must not throw a {@code ClassCastException} for any
* elements {@code e1} and {@code e2} in the set.
*
* @param c
* collection whose elements will comprise the new set
* @throws ClassCastException
* if the elements in {@code c} are not {@link Comparable}, or are not mutually comparable
* @throws NullPointerException
* if the specified collection is null
*/
public OMVRBTreeSet(Collection<? extends E> c) {
this();
addAll(c);
}
/**
* Constructs a new tree set containing the same elements and using the same ordering as the specified sorted set.
*
* @param s
* sorted set whose elements will comprise the new set
* @throws NullPointerException
* if the specified sorted set is null
*/
public OMVRBTreeSet(SortedSet<E> s) {
this(s.comparator());
addAll(s);
}
/**
* Returns an iterator over the elements in this set in ascending order.
*
* @return an iterator over the elements in this set in ascending order
*/
@Override
public OLazyIterator<E> iterator() {
return m.navigableKeySet().iterator();
}
/**
* Returns an iterator over the elements in this set in descending order.
*
* @return an iterator over the elements in this set in descending order
* @since 1.6
*/
public OLazyIterator<E> descendingIterator() {
return m.descendingKeySet().iterator();
}
/**
* @since 1.6
*/
public ONavigableSet<E> descendingSet() {
return new OMVRBTreeSet<E>(m.descendingMap());
}
/**
* Returns the number of elements in this set (its cardinality).
*
* @return the number of elements in this set (its cardinality)
*/
@Override
public int size() {
return m.size();
}
/**
* Returns {@code true} if this set contains no elements.
*
* @return {@code true} if this set contains no elements
*/
@Override
public boolean isEmpty() {
return m.isEmpty();
}
/**
* Returns {@code true} if this set contains the specified element. More formally, returns {@code true} if and only if this set
* contains an element {@code e} such that <tt>(o==null ? e==null : o.equals(e))</tt>.
*
* @param o
* object to be checked for containment in this set
* @return {@code true} if this set contains the specified element
* @throws ClassCastException
* if the specified object cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
*/
@Override
public boolean contains(Object o) {
return m.containsKey(o);
}
/**
* Adds the specified element to this set if it is not already present. More formally, adds the specified element {@code e} to
* this set if the set contains no element {@code e2} such that <tt>(e==null ? e2==null : e.equals(e2))</tt>.
* If this set already contains the element, the call leaves the set unchanged and returns {@code false}.
*
* @param e
* element to be added to this set
* @return {@code true} if this set did not already contain the specified element
* @throws ClassCastException
* if the specified object cannot be compared with the elements currently in this set
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
*/
@Override
public boolean add(E e) {
return m.put(e, PRESENT) == null;
}
/**
* Removes the specified element from this set if it is present. More formally, removes an element {@code e} such that
* <tt>(o==null ? e==null : o.equals(e))</tt>, if this set contains such an element. Returns {@code true} if
* this set contained the element (or equivalently, if this set changed as a result of the call). (This set will not contain the
* element once the call returns.)
*
* @param o
* object to be removed from this set, if present
* @return {@code true} if this set contained the specified element
* @throws ClassCastException
* if the specified object cannot be compared with the elements currently in this set
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
*/
@Override
public boolean remove(Object o) {
return m.remove(o) == PRESENT;
}
/**
* Removes all of the elements from this set. The set will be empty after this call returns.
*/
@Override
public void clear() {
m.clear();
}
/**
* Adds all of the elements in the specified collection to this set.
*
* @param c
* collection containing elements to be added to this set
* @return {@code true} if this set changed as a result of the call
* @throws ClassCastException
* if the elements provided cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified collection is null or if any element is null and this set uses natural ordering, or its comparator
* does not permit null elements
*/
@Override
public boolean addAll(Collection<? extends E> c) {
// Use linear-time version if applicable
if (m.size() == 0 && c.size() > 0 && c instanceof SortedSet && m instanceof OMVRBTree) {
SortedSet<? extends E> set = (SortedSet<? extends E>) c;
OMVRBTree<E, Object> map = (OMVRBTree<E, Object>) m;
Comparator<? super E> cc = (Comparator<? super E>) set.comparator();
Comparator<? super E> mc = map.comparator();
if (cc == mc || (cc != null && cc.equals(mc))) {
map.addAllForOTreeSet(set, PRESENT);
return true;
}
}
return super.addAll(c);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} or {@code toElement} is null and this set uses natural ordering, or its comparator does not
* permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
* @since 1.6
*/
public ONavigableSet<E> subSet(E fromElement, boolean fromInclusive, E toElement, boolean toInclusive) {
return new OMVRBTreeSet<E>(m.subMap(fromElement, fromInclusive, toElement, toInclusive));
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code toElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
* @since 1.6
*/
public ONavigableSet<E> headSet(E toElement, boolean inclusive) {
return new OMVRBTreeSet<E>(m.headMap(toElement, inclusive));
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
* @since 1.6
*/
public ONavigableSet<E> tailSet(E fromElement, boolean inclusive) {
return new OMVRBTreeSet<E>(m.tailMap(fromElement, inclusive));
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} or {@code toElement} is null and this set uses natural ordering, or its comparator does not
* permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
*/
public SortedSet<E> subSet(E fromElement, E toElement) {
return subSet(fromElement, true, toElement, false);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code toElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
*/
public SortedSet<E> headSet(E toElement) {
return headSet(toElement, false);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
*/
public SortedSet<E> tailSet(E fromElement) {
return tailSet(fromElement, true);
}
public Comparator<? super E> comparator() {
return m.comparator();
}
/**
* @throws NoSuchElementException
* {@inheritDoc}
*/
public E first() {
return m.firstKey();
}
/**
* @throws NoSuchElementException
* {@inheritDoc}
*/
public E last() {
return m.lastKey();
}
// ONavigableSet API methods
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E lower(E e) {
return m.lowerKey(e);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E floor(E e) {
return m.floorKey(e);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E ceiling(E e) {
return m.ceilingKey(e);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E higher(E e) {
return m.higherKey(e);
}
/**
* @since 1.6
*/
public E pollFirst() {
Map.Entry<E, ?> e = m.pollFirstEntry();
return (e == null) ? null : e.getKey();
}
/**
* @since 1.6
*/
public E pollLast() {
Map.Entry<E, ?> e = m.pollLastEntry();
return (e == null) ? null : e.getKey();
}
/**
* Returns a shallow copy of this {@code OTreeSet} instance. (The elements themselves are not cloned.)
*
* @return a shallow copy of this set
*/
@Override
public Object clone() {
OMVRBTreeSet<E> clone = null;
try {
clone = (OMVRBTreeSet<E>) super.clone();
} catch (CloneNotSupportedException e) {
throw new InternalError();
}
clone.m = new OMVRBTreeMemory<E, Object>(m);
return clone;
}
/**
* Save the state of the {@code OTreeSet} instance to a stream (that is, serialize it).
*
* @serialData Emits the comparator used to order this set, or {@code null} if it obeys its elements' natural ordering (Object),
* followed by the size of the set (the number of elements it contains) (int), followed by all of its elements (each
* an Object) in order (as determined by the set's Comparator, or by the elements' natural ordering if the set has no
* Comparator).
*/
private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException {
// Write out any hidden stuff
s.defaultWriteObject();
// Write out Comparator
s.writeObject(m.comparator());
// Write out size
s.writeInt(m.size());
// Write out all elements in the proper order.
for (Iterator<E> i = m.keySet().iterator(); i.hasNext();)
s.writeObject(i.next());
}
/**
* Reconstitute the {@code OTreeSet} instance from a stream (that is, deserialize it).
*/
private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException {
// Read in any hidden stuff
s.defaultReadObject();
// Read in Comparator
Comparator<? super E> c = (Comparator<? super E>) s.readObject();
// Create backing OMVRBTree
OMVRBTree<E, Object> tm;
if (c == null)
tm = new OMVRBTreeMemory<E, Object>();
else
tm = new OMVRBTreeMemory<E, Object>(c);
m = tm;
// Read in size
int size = s.readInt();
tm.readOTreeSet(size, s, PRESENT);
}
private static final long serialVersionUID = -2479143000061671589L;
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTreeSet.java
|
2,934 |
public static final class Factory implements TokenFilterFactory {
private final int maxShingleSize;
private final boolean outputUnigrams;
private final boolean outputUnigramsIfNoShingles;
private final String tokenSeparator;
private int minShingleSize;
private final String name;
public Factory(String name) {
this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, false, ShingleFilter.TOKEN_SEPARATOR);
}
Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, String tokenSeparator) {
this.maxShingleSize = maxShingleSize;
this.outputUnigrams = outputUnigrams;
this.outputUnigramsIfNoShingles = outputUnigramsIfNoShingles;
this.tokenSeparator = tokenSeparator;
this.minShingleSize = minShingleSize;
this.name = name;
}
public TokenStream create(TokenStream tokenStream) {
ShingleFilter filter = new ShingleFilter(tokenStream, minShingleSize, maxShingleSize);
filter.setOutputUnigrams(outputUnigrams);
filter.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles);
filter.setTokenSeparator(tokenSeparator);
return filter;
}
public int getMaxShingleSize() {
return maxShingleSize;
}
public int getMinShingleSize() {
return minShingleSize;
}
public boolean getOutputUnigrams() {
return outputUnigrams;
}
public boolean getOutputUnigramsIfNoShingles() {
return outputUnigramsIfNoShingles;
}
@Override
public String name() {
return name;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_analysis_ShingleTokenFilterFactory.java
|
3,290 |
abstract class AbstractGeoPointIndexFieldData extends AbstractIndexFieldData<AtomicGeoPointFieldData<ScriptDocValues>> implements IndexGeoPointFieldData<AtomicGeoPointFieldData<ScriptDocValues>> {
protected static class Empty extends AtomicGeoPointFieldData<ScriptDocValues> {
private final int numDocs;
Empty(int numDocs) {
this.numDocs = numDocs;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return 0;
}
@Override
public long getMemorySizeInBytes() {
return 0;
}
@Override
public BytesValues getBytesValues(boolean needsHashes) {
return BytesValues.EMPTY;
}
@Override
public GeoPointValues getGeoPointValues() {
return GeoPointValues.EMPTY;
}
@Override
public ScriptDocValues getScriptValues() {
return ScriptDocValues.EMPTY;
}
@Override
public int getNumDocs() {
return numDocs;
}
@Override
public void close() {
// no-op
}
}
protected static class GeoPointEnum {
private final BytesRefIterator termsEnum;
private final GeoPoint next;
private final CharsRef spare;
protected GeoPointEnum(BytesRefIterator termsEnum) {
this.termsEnum = termsEnum;
next = new GeoPoint();
spare = new CharsRef();
}
public GeoPoint next() throws IOException {
final BytesRef term = termsEnum.next();
if (term == null) {
return null;
}
UnicodeUtil.UTF8toUTF16(term, spare);
int commaIndex = -1;
for (int i = 0; i < spare.length; i++) {
if (spare.chars[spare.offset + i] == ',') { // safes a string creation
commaIndex = i;
break;
}
}
if (commaIndex == -1) {
assert false;
return next.reset(0, 0);
}
final double lat = Double.parseDouble(new String(spare.chars, spare.offset, (commaIndex - spare.offset)));
final double lon = Double.parseDouble(new String(spare.chars, (spare.offset + (commaIndex + 1)), spare.length - ((commaIndex + 1) - spare.offset)));
return next.reset(lat, lon);
}
}
public AbstractGeoPointIndexFieldData(Index index, Settings indexSettings, Names fieldNames, FieldDataType fieldDataType, IndexFieldDataCache cache) {
super(index, indexSettings, fieldNames, fieldDataType, cache);
}
@Override
public boolean valuesOrdered() {
// because we might have single values? we can dynamically update a flag to reflect that
// based on the atomic field data loaded
return false;
}
@Override
public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
throw new ElasticsearchIllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_plain_AbstractGeoPointIndexFieldData.java
|
1,215 |
NONE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return none(c);
}
};
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java
|
851 |
public class GetAndSetRequest extends ModifyRequest {
public GetAndSetRequest() {
}
public GetAndSetRequest(String name, Data update) {
super(name, update);
}
@Override
protected Operation prepareOperation() {
return new GetAndSetOperation(name, update);
}
@Override
public int getClassId() {
return AtomicReferencePortableHook.GET_AND_SET;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_concurrent_atomicreference_client_GetAndSetRequest.java
|
3,022 |
public class QueueContainer implements IdentifiedDataSerializable {
private LinkedList<QueueItem> itemQueue;
private HashMap<Long, QueueItem> backupMap;
private final Map<Long, TxQueueItem> txMap = new HashMap<Long, TxQueueItem>();
private final HashMap<Long, Data> dataMap = new HashMap<Long, Data>();
private QueueConfig config;
private QueueStoreWrapper store;
private NodeEngine nodeEngine;
private QueueService service;
private ILogger logger;
private long idGenerator;
private final QueueWaitNotifyKey pollWaitNotifyKey;
private final QueueWaitNotifyKey offerWaitNotifyKey;
private String name;
private long minAge = Long.MAX_VALUE;
private long maxAge = Long.MIN_VALUE;
private long totalAge;
private long totalAgedCount;
private boolean isEvictionScheduled;
public QueueContainer(String name) {
this.name = name;
pollWaitNotifyKey = new QueueWaitNotifyKey(name, "poll");
offerWaitNotifyKey = new QueueWaitNotifyKey(name, "offer");
}
public QueueContainer(String name, QueueConfig config, NodeEngine nodeEngine, QueueService service) throws Exception {
this(name);
setConfig(config, nodeEngine, service);
}
public void init(boolean fromBackup) {
if (!fromBackup && store.isEnabled()) {
Set<Long> keys = store.loadAllKeys();
if (keys != null) {
long maxId = -1;
for (Long key : keys) {
QueueItem item = new QueueItem(this, key, null);
getItemQueue().offer(item);
maxId = Math.max(maxId, key);
}
idGenerator = maxId + 1;
}
}
}
//TX Methods
public boolean txnEnsureReserve(long itemId) {
if (txMap.get(itemId) == null) {
throw new TransactionException("No reserve for itemId: " + itemId);
}
return true;
}
//TX Poll
public QueueItem txnPollReserve(long reservedOfferId, String transactionId) {
QueueItem item = getItemQueue().peek();
if (item == null) {
TxQueueItem txItem = txMap.remove(reservedOfferId);
if (txItem == null) {
return null;
}
item = new QueueItem(this, txItem.getItemId(), txItem.getData());
return item;
}
if (store.isEnabled() && item.getData() == null) {
try {
load(item);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
getItemQueue().poll();
txMap.put(item.getItemId(), new TxQueueItem(item).setPollOperation(true).setTransactionId(transactionId));
return item;
}
public boolean txnPollBackupReserve(long itemId, String transactionId) {
QueueItem item = getBackupMap().remove(itemId);
if (item == null) {
throw new TransactionException("Backup reserve failed: " + itemId);
}
txMap.put(itemId, new TxQueueItem(item).setPollOperation(true).setTransactionId(transactionId));
return true;
}
public Data txnCommitPoll(long itemId) {
final Data result = txnCommitPollBackup(itemId);
scheduleEvictionIfEmpty();
return result;
}
public Data txnCommitPollBackup(long itemId) {
TxQueueItem item = txMap.remove(itemId);
if (item == null) {
logger.warning("txnCommitPoll operation-> No txn item for itemId: " + itemId);
return null;
}
if (store.isEnabled()) {
try {
store.delete(item.getItemId());
} catch (Exception e) {
logger.severe("Error during store delete: " + item.getItemId(), e);
}
}
return item.getData();
}
public boolean txnRollbackPoll(long itemId, boolean backup) {
QueueItem item = txMap.remove(itemId);
if (item == null) {
return false;
}
if (!backup) {
getItemQueue().offerFirst(item);
}
cancelEvictionIfExists();
return true;
}
//TX Offer
public long txnOfferReserve(String transactionId) {
TxQueueItem item = new TxQueueItem(this, nextId(), null).setTransactionId(transactionId).setPollOperation(false);
txMap.put(item.getItemId(), item);
return item.getItemId();
}
public void txnOfferBackupReserve(long itemId, String transactionId) {
QueueItem item = new QueueItem(this, itemId, null);
Object o = txMap.put(itemId, new TxQueueItem(item).setPollOperation(false).setTransactionId(transactionId));
if (o != null) {
logger.severe("txnOfferBackupReserve operation-> Item exists already at txMap for itemId: " + itemId);
}
}
public boolean txnCommitOffer(long itemId, Data data, boolean backup) {
QueueItem item = txMap.remove(itemId);
if (item == null && !backup) {
throw new TransactionException("No reserve :" + itemId);
} else if (item == null) {
item = new QueueItem(this, itemId, data);
}
item.setData(data);
if (!backup) {
getItemQueue().offer(item);
cancelEvictionIfExists();
} else {
getBackupMap().put(itemId, item);
}
if (store.isEnabled() && !backup) {
try {
store.store(item.getItemId(), data);
} catch (Exception e) {
logger.warning("Exception during store", e);
}
}
return true;
}
public boolean txnRollbackOffer(long itemId) {
final boolean result = txnRollbackOfferBackup(itemId);
scheduleEvictionIfEmpty();
return result;
}
public boolean txnRollbackOfferBackup(long itemId) {
QueueItem item = txMap.remove(itemId);
if (item == null) {
logger.warning("txnRollbackOffer operation-> No txn item for itemId: " + itemId);
return false;
}
return true;
}
public QueueItem txnPeek(long offerId, String transactionId) {
QueueItem item = getItemQueue().peek();
if (item == null) {
if (offerId == -1) {
return null;
}
TxQueueItem txItem = txMap.get(offerId);
if (txItem == null) {
return null;
}
item = new QueueItem(this, txItem.getItemId(), txItem.getData());
return item;
}
if (store.isEnabled() && item.getData() == null) {
try {
load(item);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
return item;
}
//TX Methods Ends
public long offer(Data data) {
QueueItem item = new QueueItem(this, nextId(), null);
if (store.isEnabled()) {
try {
store.store(item.getItemId(), data);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
if (!store.isEnabled() || store.getMemoryLimit() > getItemQueue().size()) {
item.setData(data);
}
getItemQueue().offer(item);
cancelEvictionIfExists();
return item.getItemId();
}
public void offerBackup(Data data, long itemId) {
QueueItem item = new QueueItem(this, itemId, null);
if (!store.isEnabled() || store.getMemoryLimit() > getItemQueue().size()) {
item.setData(data);
}
getBackupMap().put(itemId, item);
}
public Map<Long, Data> addAll(Collection<Data> dataList) {
Map<Long, Data> map = new HashMap<Long, Data>(dataList.size());
List<QueueItem> list = new ArrayList<QueueItem>(dataList.size());
for (Data data : dataList) {
QueueItem item = new QueueItem(this, nextId(), null);
if (!store.isEnabled() || store.getMemoryLimit() > getItemQueue().size()) {
item.setData(data);
}
map.put(item.getItemId(), data);
list.add(item);
}
if (store.isEnabled() && !map.isEmpty()) {
try {
store.storeAll(map);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
if (!list.isEmpty()) {
getItemQueue().addAll(list);
cancelEvictionIfExists();
}
return map;
}
public void addAllBackup(Map<Long, Data> dataMap) {
for (Map.Entry<Long, Data> entry : dataMap.entrySet()) {
QueueItem item = new QueueItem(this, entry.getKey(), null);
if (!store.isEnabled() || store.getMemoryLimit() > getItemQueue().size()) {
item.setData(entry.getValue());
}
getBackupMap().put(item.getItemId(), item);
}
}
public QueueItem peek() {
QueueItem item = getItemQueue().peek();
if (item == null) {
return null;
}
if (store.isEnabled() && item.getData() == null) {
try {
load(item);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
return item;
}
public QueueItem poll() {
QueueItem item = peek();
if (item == null) {
return null;
}
if (store.isEnabled()) {
try {
store.delete(item.getItemId());
} catch (Exception e) {
throw new HazelcastException(e);
}
}
getItemQueue().poll();
age(item, Clock.currentTimeMillis());
scheduleEvictionIfEmpty();
return item;
}
public void pollBackup(long itemId) {
QueueItem item = getBackupMap().remove(itemId);
if (item != null) {
//For Stats
age(item, Clock.currentTimeMillis());
}
}
public Map<Long, Data> drain(int maxSize) {
if (maxSize < 0 || maxSize > getItemQueue().size()) {
maxSize = getItemQueue().size();
}
LinkedHashMap<Long, Data> map = new LinkedHashMap<Long, Data>(maxSize);
Iterator<QueueItem> iter = getItemQueue().iterator();
for (int i = 0; i < maxSize; i++) {
QueueItem item = iter.next();
if (store.isEnabled() && item.getData() == null) {
try {
load(item);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
map.put(item.getItemId(), item.getData());
}
if (store.isEnabled() && maxSize != 0) {
try {
store.deleteAll(map.keySet());
} catch (Exception e) {
throw new HazelcastException(e);
}
}
long current = Clock.currentTimeMillis();
for (int i = 0; i < maxSize; i++) {
QueueItem item = getItemQueue().poll();
//For Stats
age(item, current);
}
if (maxSize != 0) {
scheduleEvictionIfEmpty();
}
return map;
}
public void drainFromBackup(Set<Long> itemIdSet) {
for (Long itemId : itemIdSet) {
pollBackup(itemId);
}
dataMap.clear();
}
public int size() {
return Math.min(config.getMaxSize(), getItemQueue().size());
}
public int backupSize() {
return getBackupMap().size();
}
public Map<Long, Data> clear() {
long current = Clock.currentTimeMillis();
LinkedHashMap<Long, Data> map = new LinkedHashMap<Long, Data>(getItemQueue().size());
for (QueueItem item : getItemQueue()) {
map.put(item.getItemId(), item.getData());
// For stats
age(item, current);
}
if (store.isEnabled() && !map.isEmpty()) {
try {
store.deleteAll(map.keySet());
} catch (Exception e) {
throw new HazelcastException(e);
}
}
getItemQueue().clear();
dataMap.clear();
scheduleEvictionIfEmpty();
return map;
}
public void clearBackup(Set<Long> itemIdSet) {
drainFromBackup(itemIdSet);
}
/**
* iterates all items, checks equality with data
* This method does not trigger store load.
*/
public long remove(Data data) {
Iterator<QueueItem> iter = getItemQueue().iterator();
while (iter.hasNext()) {
QueueItem item = iter.next();
if (data.equals(item.getData())) {
if (store.isEnabled()) {
try {
store.delete(item.getItemId());
} catch (Exception e) {
throw new HazelcastException(e);
}
}
iter.remove();
//For Stats
age(item, Clock.currentTimeMillis());
scheduleEvictionIfEmpty();
return item.getItemId();
}
}
return -1;
}
public void removeBackup(long itemId) {
getBackupMap().remove(itemId);
}
/**
* This method does not trigger store load.
*/
public boolean contains(Collection<Data> dataSet) {
for (Data data : dataSet) {
boolean contains = false;
for (QueueItem item : getItemQueue()) {
if (item.getData() != null && item.getData().equals(data)) {
contains = true;
break;
}
}
if (!contains) {
return false;
}
}
return true;
}
/**
* This method triggers store load.
*/
public List<Data> getAsDataList() {
List<Data> dataList = new ArrayList<Data>(getItemQueue().size());
for (QueueItem item : getItemQueue()) {
if (store.isEnabled() && item.getData() == null) {
try {
load(item);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
dataList.add(item.getData());
}
return dataList;
}
/**
* This method triggers store load
*/
public Map<Long, Data> compareAndRemove(Collection<Data> dataList, boolean retain) {
LinkedHashMap<Long, Data> map = new LinkedHashMap<Long, Data>();
for (QueueItem item : getItemQueue()) {
if (item.getData() == null && store.isEnabled()) {
try {
load(item);
} catch (Exception e) {
throw new HazelcastException(e);
}
}
boolean contains = dataList.contains(item.getData());
if ((retain && !contains) || (!retain && contains)) {
map.put(item.getItemId(), item.getData());
}
}
if (map.size() > 0) {
if (store.isEnabled()) {
try {
store.deleteAll(map.keySet());
} catch (Exception e) {
throw new HazelcastException(e);
}
}
Iterator<QueueItem> iter = getItemQueue().iterator();
while (iter.hasNext()) {
QueueItem item = iter.next();
if (map.containsKey(item.getItemId())) {
iter.remove();
//For Stats
age(item, Clock.currentTimeMillis());
}
}
scheduleEvictionIfEmpty();
}
return map;
}
public void compareAndRemoveBackup(Set<Long> itemIdSet) {
drainFromBackup(itemIdSet);
}
private void load(QueueItem item) throws Exception {
int bulkLoad = store.getBulkLoad();
bulkLoad = Math.min(getItemQueue().size(), bulkLoad);
if (bulkLoad == 1) {
item.setData(store.load(item.getItemId()));
} else if (bulkLoad > 1) {
ListIterator<QueueItem> iter = getItemQueue().listIterator();
HashSet<Long> keySet = new HashSet<Long>(bulkLoad);
for (int i = 0; i < bulkLoad; i++) {
keySet.add(iter.next().getItemId());
}
Map<Long, Data> values = store.loadAll(keySet);
dataMap.putAll(values);
item.setData(getDataFromMap(item.getItemId()));
}
}
public boolean hasEnoughCapacity() {
return hasEnoughCapacity(1);
}
public boolean hasEnoughCapacity(int delta) {
return (getItemQueue().size() + delta) <= config.getMaxSize();
}
LinkedList<QueueItem> getItemQueue() {
if (itemQueue == null) {
itemQueue = new LinkedList<QueueItem>();
if (backupMap != null && !backupMap.isEmpty()) {
List<QueueItem> values = new ArrayList<QueueItem>(backupMap.values());
Collections.sort(values);
itemQueue.addAll(values);
backupMap.clear();
backupMap = null;
}
}
return itemQueue;
}
Map<Long, QueueItem> getBackupMap() {
if (backupMap == null) {
backupMap = new HashMap<Long, QueueItem>();
if (itemQueue != null) {
for (QueueItem item : itemQueue) {
backupMap.put(item.getItemId(), item);
}
itemQueue.clear();
itemQueue = null;
}
}
return backupMap;
}
public Data getDataFromMap(long itemId) {
return dataMap.remove(itemId);
}
public void setConfig(QueueConfig config, NodeEngine nodeEngine, QueueService service) {
this.nodeEngine = nodeEngine;
this.service = service;
logger = nodeEngine.getLogger(QueueContainer.class);
store = new QueueStoreWrapper(nodeEngine.getSerializationService());
this.config = new QueueConfig(config);
QueueStoreConfig storeConfig = config.getQueueStoreConfig();
store.setConfig(storeConfig, name);
}
long nextId() {
return idGenerator++;
}
void setId(long itemId) {
idGenerator = Math.max(itemId + 1, idGenerator);
}
public QueueWaitNotifyKey getPollWaitNotifyKey() {
return pollWaitNotifyKey;
}
public QueueWaitNotifyKey getOfferWaitNotifyKey() {
return offerWaitNotifyKey;
}
public QueueConfig getConfig() {
return config;
}
private void age(QueueItem item, long currentTime) {
long elapsed = currentTime - item.getCreationTime();
if (elapsed <= 0) {
//elapsed time can not be a negative value, a system clock problem maybe. ignored
return;
}
totalAgedCount++;
totalAge += elapsed;
minAge = Math.min(minAge, elapsed);
maxAge = Math.max(maxAge, elapsed);
}
public void setStats(LocalQueueStatsImpl stats) {
stats.setMinAge(minAge);
stats.setMaxAge(maxAge);
long totalAgedCountVal = Math.max(totalAgedCount, 1);
stats.setAveAge(totalAge / totalAgedCountVal);
}
private void scheduleEvictionIfEmpty() {
final int emptyQueueTtl = config.getEmptyQueueTtl();
if (emptyQueueTtl < 0) {
return;
}
if (getItemQueue().isEmpty() && txMap.isEmpty() && !isEvictionScheduled) {
if (emptyQueueTtl == 0) {
nodeEngine.getProxyService().destroyDistributedObject(QueueService.SERVICE_NAME, name);
} else if (emptyQueueTtl > 0) {
service.scheduleEviction(name, TimeUnit.SECONDS.toMillis(emptyQueueTtl));
isEvictionScheduled = true;
}
}
}
public void cancelEvictionIfExists() {
if (isEvictionScheduled) {
service.cancelEviction(name);
isEvictionScheduled = false;
}
}
public boolean isEvictable() {
return getItemQueue().isEmpty() && txMap.isEmpty();
}
public void rollbackTransaction(String transactionId) {
final Iterator<TxQueueItem> iterator = txMap.values().iterator();
while (iterator.hasNext()) {
final TxQueueItem item = iterator.next();
if (transactionId.equals(item.getTransactionId())) {
iterator.remove();
if (item.isPollOperation()) {
getItemQueue().offerFirst(item);
cancelEvictionIfExists();
}
}
}
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(name);
out.writeInt(getItemQueue().size());
for (QueueItem item : getItemQueue()) {
out.writeObject(item);
}
out.writeInt(txMap.size());
for (TxQueueItem item : txMap.values()) {
item.writeData(out);
}
}
@Override
public void readData(ObjectDataInput in) throws IOException {
name = in.readUTF();
int size = in.readInt();
for (int j = 0; j < size; j++) {
QueueItem item = in.readObject();
getItemQueue().offer(item);
setId(item.getItemId());
}
int txSize = in.readInt();
for (int j = 0; j < txSize; j++) {
TxQueueItem item = new TxQueueItem(this, -1, null);
item.readData(in);
txMap.put(item.getItemId(), item);
setId(item.getItemId());
}
}
public void destroy() {
if (itemQueue != null) {
itemQueue.clear();
}
if (backupMap != null) {
backupMap.clear();
}
txMap.clear();
dataMap.clear();
}
@Override
public int getFactoryId() {
return QueueDataSerializerHook.F_ID;
}
@Override
public int getId() {
return QueueDataSerializerHook.QUEUE_CONTAINER;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_queue_QueueContainer.java
|
495 |
return scheduledExecutor.schedule(new Runnable() {
public void run() {
executeInternal(command);
}
}, delay, unit);
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientExecutionServiceImpl.java
|
450 |
trackedSet.addChangeListener(new OMultiValueChangeListener<String, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<String, String> event) {
changed.value = true;
}
});
| 0true
|
core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedSetTest.java
|
209 |
@Deprecated
public class HydratedCache extends Hashtable<String, Object> {
private static final long serialVersionUID = 1L;
private String cacheName;
private String cacheRegion;
public HydratedCache(String cacheRegion, String cacheName) {
this.cacheRegion = cacheRegion;
this.cacheName = cacheName;
}
public String getCacheName() {
return cacheName;
}
public String getCacheRegion() {
return cacheRegion;
}
public void setCacheRegion(String cacheRegion) {
this.cacheRegion = cacheRegion;
}
public HydratedCacheElement getCacheElement(String cacheRegion, String cacheName, Serializable key) {
return (HydratedCacheElement) get(cacheRegion + "_" + cacheName + "_" + key);
}
public HydratedCacheElement removeCacheElement(String cacheRegion, String cacheName, Serializable key) {
String myKey = cacheRegion + "_" + cacheName + "_" + key;
return (HydratedCacheElement) remove(myKey);
}
public void addCacheElement(String cacheRegion, String cacheName, Serializable key, Object value) {
String myKey = cacheRegion + "_" + cacheName + "_" + key;
put(myKey, value);
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_cache_engine_HydratedCache.java
|
72 |
public interface TitanVertexQuery<Q extends TitanVertexQuery<Q>> extends BaseVertexQuery<Q>, VertexQuery {
/* ---------------------------------------------------------------
* Query Specification (overwrite to merge BaseVertexQuery with Blueprint's VertexQuery)
* ---------------------------------------------------------------
*/
@Override
public Q adjacent(TitanVertex vertex);
@Override
public Q types(RelationType... type);
@Override
public Q labels(String... labels);
@Override
public Q keys(String... keys);
@Override
public Q direction(Direction d);
@Override
public Q has(PropertyKey key, Object value);
@Override
public Q has(EdgeLabel label, TitanVertex vertex);
@Override
public Q has(String key);
@Override
public Q hasNot(String key);
@Override
public Q has(String type, Object value);
@Override
public Q hasNot(String key, Object value);
@Override
public Q has(PropertyKey key, Predicate predicate, Object value);
@Override
public Q has(String key, Predicate predicate, Object value);
@Override
public <T extends Comparable<?>> Q interval(String key, T start, T end);
@Override
public <T extends Comparable<?>> Q interval(PropertyKey key, T start, T end);
@Override
public Q limit(int limit);
@Override
public Q orderBy(String key, Order order);
@Override
public Q orderBy(PropertyKey key, Order order);
/* ---------------------------------------------------------------
* Query execution
* ---------------------------------------------------------------
*/
/**
* Returns an iterable over all incident edges that match this query
*
* @return Iterable over all incident edges that match this query
*/
public Iterable<Edge> edges();
/**
* Returns an iterable over all incident edges that match this query. Returns edges as {@link TitanEdge}.
*
* @return Iterable over all incident edges that match this query
*/
public Iterable<TitanEdge> titanEdges();
/**
* Returns an iterable over all incident properties that match this query
*
* @return Iterable over all incident properties that match this query
*/
public Iterable<TitanProperty> properties();
/**
* Returns an iterable over all incident relations that match this query
*
* @return Iterable over all incident relations that match this query
*/
public Iterable<TitanRelation> relations();
/**
* Returns the number of edges that match this query
*
* @return Number of edges that match this query
*/
public long count();
/**
* Returns the number of properties that match this query
*
* @return Number of properties that match this query
*/
public long propertyCount();
/**
* Retrieves all vertices connected to this query's base vertex by edges
* matching the conditions defined in this query.
* <p/>
* The query engine will determine the most efficient way to retrieve the vertices that match this query.
*
* @return A list of all vertices connected to this query's base vertex by matching edges
*/
@Override
public VertexList vertexIds();
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanVertexQuery.java
|
979 |
public abstract class ShardReplicationOperationRequest<T extends ShardReplicationOperationRequest> extends ActionRequest<T> {
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
protected TimeValue timeout = DEFAULT_TIMEOUT;
protected String index;
private boolean threadedOperation = true;
private ReplicationType replicationType = ReplicationType.DEFAULT;
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
protected ShardReplicationOperationRequest() {
}
public ShardReplicationOperationRequest(ActionRequest request) {
super(request);
}
public ShardReplicationOperationRequest(T request) {
super(request);
this.timeout = request.timeout();
this.index = request.index();
this.threadedOperation = request.operationThreaded();
this.replicationType = request.replicationType();
this.consistencyLevel = request.consistencyLevel();
}
/**
* Controls if the operation will be executed on a separate thread when executed locally.
*/
public final boolean operationThreaded() {
return threadedOperation;
}
/**
* Controls if the operation will be executed on a separate thread when executed locally. Defaults
* to <tt>true</tt> when running in embedded mode.
*/
@SuppressWarnings("unchecked")
public final T operationThreaded(boolean threadedOperation) {
this.threadedOperation = threadedOperation;
return (T) this;
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
@SuppressWarnings("unchecked")
public final T timeout(TimeValue timeout) {
this.timeout = timeout;
return (T) this;
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
public final T timeout(String timeout) {
return timeout(TimeValue.parseTimeValue(timeout, null));
}
public TimeValue timeout() {
return timeout;
}
public String index() {
return this.index;
}
@SuppressWarnings("unchecked")
public final T index(String index) {
this.index = index;
return (T) this;
}
/**
* The replication type.
*/
public ReplicationType replicationType() {
return this.replicationType;
}
/**
* Sets the replication type.
*/
@SuppressWarnings("unchecked")
public final T replicationType(ReplicationType replicationType) {
this.replicationType = replicationType;
return (T) this;
}
/**
* Sets the replication type.
*/
public final T replicationType(String replicationType) {
return replicationType(ReplicationType.fromString(replicationType));
}
public WriteConsistencyLevel consistencyLevel() {
return this.consistencyLevel;
}
/**
* Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
*/
@SuppressWarnings("unchecked")
public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return (T) this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (index == null) {
validationException = addValidationError("index is missing", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
replicationType = ReplicationType.fromId(in.readByte());
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
timeout = TimeValue.readTimeValue(in);
index = in.readSharedString();
// no need to serialize threaded* parameters, since they only matter locally
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeByte(replicationType.id());
out.writeByte(consistencyLevel.id());
timeout.writeTo(out);
out.writeSharedString(index);
}
/**
* Called before the request gets forked into a local thread.
*/
public void beforeLocalFork() {
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_replication_ShardReplicationOperationRequest.java
|
524 |
public class OSecurityAccessException extends OSecurityException {
private static final long serialVersionUID = -8486291378415776372L;
private String databaseName;
public OSecurityAccessException(final String iDatabasename, final String message, final Throwable cause) {
super(message, cause);
databaseName = iDatabasename;
}
public OSecurityAccessException(final String iDatabasename, final String message) {
super(message);
databaseName = iDatabasename;
}
public OSecurityAccessException(final String message) {
super(message);
}
public String getDatabaseName() {
return databaseName;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_exception_OSecurityAccessException.java
|
456 |
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map2.put("foo-" + i, "bar2");
}
}
}, 60, EntryEventType.UPDATED, operations, 0.75, map1, map2);
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_replicatedmap_ClientReplicatedMapTest.java
|
2,219 |
public class BoostScoreFunction extends ScoreFunction {
private final float boost;
public BoostScoreFunction(float boost) {
super(CombineFunction.MULT);
this.boost = boost;
}
public float getBoost() {
return boost;
}
@Override
public void setNextReader(AtomicReaderContext context) {
// nothing to do here...
}
@Override
public double score(int docId, float subQueryScore) {
return boost;
}
@Override
public Explanation explainScore(int docId, Explanation subQueryExpl) {
Explanation exp = new Explanation(boost, "static boost factor");
exp.addDetail(new Explanation(boost, "boostFactor"));
return exp;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
BoostScoreFunction that = (BoostScoreFunction) o;
if (Float.compare(that.boost, boost) != 0)
return false;
return true;
}
@Override
public int hashCode() {
return (boost != +0.0f ? Float.floatToIntBits(boost) : 0);
}
@Override
public String toString() {
return "boost[" + boost + "]";
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_search_function_BoostScoreFunction.java
|
831 |
public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, SearchRequestBuilder> {
private SearchSourceBuilder sourceBuilder;
public SearchRequestBuilder(Client client) {
super((InternalClient) client, new SearchRequest());
}
/**
* Sets the indices the search will be executed on.
*/
public SearchRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
*/
public SearchRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
/**
* The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
*/
public SearchRequestBuilder setSearchType(SearchType searchType) {
request.searchType(searchType);
return this;
}
/**
* The a string representation search type to execute, defaults to {@link SearchType#DEFAULT}. Can be
* one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
* "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
*/
public SearchRequestBuilder setSearchType(String searchType) throws ElasticsearchIllegalArgumentException {
request.searchType(searchType);
return this;
}
/**
* If set, will enable scrolling of the search request.
*/
public SearchRequestBuilder setScroll(Scroll scroll) {
request.scroll(scroll);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public SearchRequestBuilder setScroll(TimeValue keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public SearchRequestBuilder setScroll(String keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* An optional timeout to control how long search is allowed to take.
*/
public SearchRequestBuilder setTimeout(TimeValue timeout) {
sourceBuilder().timeout(timeout);
return this;
}
/**
* An optional timeout to control how long search is allowed to take.
*/
public SearchRequestBuilder setTimeout(String timeout) {
sourceBuilder().timeout(timeout);
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public SearchRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public SearchRequestBuilder setRouting(String... routing) {
request.routing(routing);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public SearchRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* Controls the the search operation threading model.
*/
public SearchRequestBuilder setOperationThreading(SearchOperationThreading operationThreading) {
request.operationThreading(operationThreading);
return this;
}
/**
* Sets the string representation of the operation threading model. Can be one of
* "no_threads", "single_thread" and "thread_per_shard".
*/
public SearchRequestBuilder setOperationThreading(String operationThreading) {
request.operationThreading(operationThreading);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
public SearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return this;
}
/**
* Constructs a new search source builder with a search query.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public SearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
sourceBuilder().query(queryBuilder);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(String query) {
sourceBuilder().query(query);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(BytesReference queryBinary) {
sourceBuilder().query(queryBinary);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(byte[] queryBinary) {
sourceBuilder().query(queryBinary);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(byte[] queryBinary, int queryBinaryOffset, int queryBinaryLength) {
sourceBuilder().query(queryBinary, queryBinaryOffset, queryBinaryLength);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(XContentBuilder query) {
sourceBuilder().query(query);
return this;
}
/**
* Constructs a new search source builder with a raw search query.
*/
public SearchRequestBuilder setQuery(Map query) {
sourceBuilder().query(query);
return this;
}
/**
* Sets a filter that will be executed after the query has been executed and only has affect on the search hits
* (not aggregations or facets). This filter is always executed as last filtering mechanism.
*/
public SearchRequestBuilder setPostFilter(FilterBuilder postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(String postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(BytesReference postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(byte[] postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(byte[] postFilter, int postFilterOffset, int postFilterLength) {
sourceBuilder().postFilter(postFilter, postFilterOffset, postFilterLength);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(XContentBuilder postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets a filter on the query executed that only applies to the search query
* (and not facets for example).
*/
public SearchRequestBuilder setPostFilter(Map postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets the minimum score below which docs will be filtered out.
*/
public SearchRequestBuilder setMinScore(float minScore) {
sourceBuilder().minScore(minScore);
return this;
}
/**
* From index to start the search from. Defaults to <tt>0</tt>.
*/
public SearchRequestBuilder setFrom(int from) {
sourceBuilder().from(from);
return this;
}
/**
* The number of search hits to return. Defaults to <tt>10</tt>.
*/
public SearchRequestBuilder setSize(int size) {
sourceBuilder().size(size);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with an
* explanation of the hit (ranking).
*/
public SearchRequestBuilder setExplain(boolean explain) {
sourceBuilder().explain(explain);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with its
* version.
*/
public SearchRequestBuilder setVersion(boolean version) {
sourceBuilder().version(version);
return this;
}
/**
* Sets the boost a specific index will receive when the query is executeed against it.
*
* @param index The index to apply the boost against
* @param indexBoost The boost to apply to the index
*/
public SearchRequestBuilder addIndexBoost(String index, float indexBoost) {
sourceBuilder().indexBoost(index, indexBoost);
return this;
}
/**
* The stats groups this request will be aggregated under.
*/
public SearchRequestBuilder setStats(String... statsGroups) {
sourceBuilder().stats(statsGroups);
return this;
}
/**
* Sets no fields to be loaded, resulting in only id and type to be returned per field.
*/
public SearchRequestBuilder setNoFields() {
sourceBuilder().noFields();
return this;
}
/**
* Indicates whether the response should contain the stored _source for every hit
*
* @param fetch
* @return
*/
public SearchRequestBuilder setFetchSource(boolean fetch) {
sourceBuilder().fetchSource(fetch);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include An optional include (optionally wildcarded) pattern to filter the returned _source
* @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
*/
public SearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
sourceBuilder().fetchSource(include, exclude);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
* @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
*/
public SearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
sourceBuilder().fetchSource(includes, excludes);
return this;
}
/**
* Adds a field to load and return (note, it must be stored) as part of the search request.
* If none are specified, the source of the document will be return.
*/
public SearchRequestBuilder addField(String field) {
sourceBuilder().field(field);
return this;
}
/**
* Adds a field data based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The field to get from the field data cache
*/
public SearchRequestBuilder addFieldDataField(String name) {
sourceBuilder().fieldDataField(name);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param script The script to use
*/
public SearchRequestBuilder addScriptField(String name, String script) {
sourceBuilder().scriptField(name, script);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param script The script to use
* @param params Parameters that the script can use.
*/
public SearchRequestBuilder addScriptField(String name, String script, Map<String, Object> params) {
sourceBuilder().scriptField(name, script, params);
return this;
}
/**
* Adds a partial field based on _source, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @deprecated since 1.0.0
* use {@link org.elasticsearch.action.search.SearchRequestBuilder#setFetchSource(String, String)} instead
*
* @param name The name of the field
* @param include An optional include (optionally wildcarded) pattern from _source
* @param exclude An optional exclude (optionally wildcarded) pattern from _source
*/
@Deprecated
public SearchRequestBuilder addPartialField(String name, @Nullable String include, @Nullable String exclude) {
sourceBuilder().partialField(name, include, exclude);
return this;
}
/**
* Adds a partial field based on _source, with an "includes" and/or "excludes set which can include simple wildcard
* elements.
*
* @deprecated since 1.0.0
* use {@link org.elasticsearch.action.search.SearchRequestBuilder#setFetchSource(String[], String[])} instead
*
* @param name The name of the field
* @param includes An optional list of includes (optionally wildcarded) patterns from _source
* @param excludes An optional list of excludes (optionally wildcarded) patterns from _source
*/
@Deprecated
public SearchRequestBuilder addPartialField(String name, @Nullable String[] includes, @Nullable String[] excludes) {
sourceBuilder().partialField(name, includes, excludes);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param lang The language of the script
* @param script The script to use
* @param params Parameters that the script can use (can be <tt>null</tt>).
*/
public SearchRequestBuilder addScriptField(String name, String lang, String script, Map<String, Object> params) {
sourceBuilder().scriptField(name, lang, script, params);
return this;
}
/**
* Adds a sort against the given field name and the sort ordering.
*
* @param field The name of the field
* @param order The sort ordering
*/
public SearchRequestBuilder addSort(String field, SortOrder order) {
sourceBuilder().sort(field, order);
return this;
}
/**
* Adds a generic sort builder.
*
* @see org.elasticsearch.search.sort.SortBuilders
*/
public SearchRequestBuilder addSort(SortBuilder sort) {
sourceBuilder().sort(sort);
return this;
}
/**
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
* <tt>false</tt>.
*/
public SearchRequestBuilder setTrackScores(boolean trackScores) {
sourceBuilder().trackScores(trackScores);
return this;
}
/**
* Adds the fields to load and return as part of the search request. If none are specified,
* the source of the document will be returned.
*/
public SearchRequestBuilder addFields(String... fields) {
sourceBuilder().fields(fields);
return this;
}
/**
* Adds a facet to the search operation.
*/
public SearchRequestBuilder addFacet(FacetBuilder facet) {
sourceBuilder().facet(facet);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(BytesReference facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(byte[] facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(byte[] facets, int facetsOffset, int facetsLength) {
sourceBuilder().facets(facets, facetsOffset, facetsLength);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(XContentBuilder facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Sets a raw (xcontent) binary representation of facets to use.
*/
public SearchRequestBuilder setFacets(Map facets) {
sourceBuilder().facets(facets);
return this;
}
/**
* Adds an get to the search operation.
*/
public SearchRequestBuilder addAggregation(AbstractAggregationBuilder aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(BytesReference aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(byte[] aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(byte[] aggregations, int aggregationsOffset, int aggregationsLength) {
sourceBuilder().facets(aggregations, aggregationsOffset, aggregationsLength);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(XContentBuilder aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Sets a raw (xcontent) binary representation of addAggregation to use.
*/
public SearchRequestBuilder setAggregations(Map aggregations) {
sourceBuilder().aggregations(aggregations);
return this;
}
/**
* Adds a field to be highlighted with default fragment size of 100 characters, and
* default number of fragments of 5.
*
* @param name The field to highlight
*/
public SearchRequestBuilder addHighlightedField(String name) {
highlightBuilder().field(name);
return this;
}
/**
* Adds a field to be highlighted with a provided fragment size (in characters), and
* default number of fragments of 5.
*
* @param name The field to highlight
* @param fragmentSize The size of a fragment in characters
*/
public SearchRequestBuilder addHighlightedField(String name, int fragmentSize) {
highlightBuilder().field(name, fragmentSize);
return this;
}
/**
* Adds a field to be highlighted with a provided fragment size (in characters), and
* a provided (maximum) number of fragments.
*
* @param name The field to highlight
* @param fragmentSize The size of a fragment in characters
* @param numberOfFragments The (maximum) number of fragments
*/
public SearchRequestBuilder addHighlightedField(String name, int fragmentSize, int numberOfFragments) {
highlightBuilder().field(name, fragmentSize, numberOfFragments);
return this;
}
/**
* Adds a field to be highlighted with a provided fragment size (in characters),
* a provided (maximum) number of fragments and an offset for the highlight.
*
* @param name The field to highlight
* @param fragmentSize The size of a fragment in characters
* @param numberOfFragments The (maximum) number of fragments
*/
public SearchRequestBuilder addHighlightedField(String name, int fragmentSize, int numberOfFragments,
int fragmentOffset) {
highlightBuilder().field(name, fragmentSize, numberOfFragments, fragmentOffset);
return this;
}
/**
* Adds a highlighted field.
*/
public SearchRequestBuilder addHighlightedField(HighlightBuilder.Field field) {
highlightBuilder().field(field);
return this;
}
/**
* Set a tag scheme that encapsulates a built in pre and post tags. The allows schemes
* are <tt>styled</tt> and <tt>default</tt>.
*
* @param schemaName The tag scheme name
*/
public SearchRequestBuilder setHighlighterTagsSchema(String schemaName) {
highlightBuilder().tagsSchema(schemaName);
return this;
}
/**
* Explicitly set the pre tags that will be used for highlighting.
*/
public SearchRequestBuilder setHighlighterPreTags(String... preTags) {
highlightBuilder().preTags(preTags);
return this;
}
/**
* Explicitly set the post tags that will be used for highlighting.
*/
public SearchRequestBuilder setHighlighterPostTags(String... postTags) {
highlightBuilder().postTags(postTags);
return this;
}
/**
* The order of fragments per field. By default, ordered by the order in the
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
* by score of the fragments.
*/
public SearchRequestBuilder setHighlighterOrder(String order) {
highlightBuilder().order(order);
return this;
}
/**
* The encoder to set for highlighting
*/
public SearchRequestBuilder setHighlighterEncoder(String encoder) {
highlightBuilder().encoder(encoder);
return this;
}
/**
* Sets a query to be used for highlighting all fields instead of the search query.
*/
public SearchRequestBuilder setHighlighterQuery(QueryBuilder highlightQuery) {
highlightBuilder().highlightQuery(highlightQuery);
return this;
}
public SearchRequestBuilder setHighlighterRequireFieldMatch(boolean requireFieldMatch) {
highlightBuilder().requireFieldMatch(requireFieldMatch);
return this;
}
/**
* The highlighter type to use.
*/
public SearchRequestBuilder setHighlighterType(String type) {
highlightBuilder().highlighterType(type);
return this;
}
/**
* Sets the size of the fragment to return from the beginning of the field if there are no matches to
* highlight and the field doesn't also define noMatchSize.
* @param noMatchSize integer to set or null to leave out of request. default is null.
* @return this builder for chaining
*/
public SearchRequestBuilder setHighlighterNoMatchSize(Integer noMatchSize) {
highlightBuilder().noMatchSize(noMatchSize);
return this;
}
public SearchRequestBuilder setHighlighterOptions(Map<String, Object> options) {
highlightBuilder().options(options);
return this;
}
/**
* Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#setText(String)}.
*/
public SearchRequestBuilder setSuggestText(String globalText) {
suggestBuilder().setText(globalText);
return this;
}
/**
* Delegates to {@link org.elasticsearch.search.suggest.SuggestBuilder#addSuggestion(org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder)}.
*/
public SearchRequestBuilder addSuggestion(SuggestBuilder.SuggestionBuilder<?> suggestion) {
suggestBuilder().addSuggestion(suggestion);
return this;
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder.Rescorer, int)}.
* @param rescorer rescorer configuration
* @return this for chaining
*/
public SearchRequestBuilder setRescorer(RescoreBuilder.Rescorer rescorer) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer);
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder.Rescorer, int)}.
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public SearchRequestBuilder setRescorer(RescoreBuilder.Rescorer rescorer, int window) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer, window);
}
/**
* Adds a new rescorer.
* @param rescorer rescorer configuration
* @return this for chaining
*/
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer) {
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer));
return this;
}
/**
* Adds a new rescorer.
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer, int window) {
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer).windowSize(window));
return this;
}
/**
* Clears all rescorers from the builder.
* @return this for chaining
*/
public SearchRequestBuilder clearRescorers() {
sourceBuilder().clearRescorers();
return this;
}
/**
* Sets the rescore window for all rescorers that don't specify a window when added.
* @param window rescore window
* @return this for chaining
*/
public SearchRequestBuilder setRescoreWindow(int window) {
sourceBuilder().defaultRescoreWindowSize(window);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(String)}.
*/
public SearchRequestBuilder setSource(String source) {
request.source(source);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(String source) {
request.extraSource(source);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(BytesReference)}.
*/
public SearchRequestBuilder setSource(BytesReference source) {
request.source(source, false);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(BytesReference)}.
*/
public SearchRequestBuilder setSource(BytesReference source, boolean unsafe) {
request.source(source, unsafe);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(byte[])}.
*/
public SearchRequestBuilder setSource(byte[] source) {
request.source(source);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(BytesReference source) {
request.extraSource(source, false);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(BytesReference source, boolean unsafe) {
request.extraSource(source, unsafe);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(byte[] source) {
request.extraSource(source);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(byte[])}.
*/
public SearchRequestBuilder setSource(byte[] source, int offset, int length) {
request.source(source, offset, length);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(byte[] source, int offset, int length) {
request.extraSource(source, offset, length);
return this;
}
/**
* Sets the source of the request as a json string. Note, settings anything other
* than the search type will cause this source to be overridden, consider using
* {@link #setExtraSource(byte[])}.
*/
public SearchRequestBuilder setSource(XContentBuilder builder) {
request.source(builder);
return this;
}
/**
* Sets the source of the request as a json string. Allows to set other parameters.
*/
public SearchRequestBuilder setExtraSource(XContentBuilder builder) {
request.extraSource(builder);
return this;
}
/**
* Sets the source of the request as a map. Note, setting anything other than the
* search type will cause this source to be overridden, consider using
* {@link #setExtraSource(java.util.Map)}.
*/
public SearchRequestBuilder setSource(Map source) {
request.source(source);
return this;
}
public SearchRequestBuilder setExtraSource(Map source) {
request.extraSource(source);
return this;
}
/**
* Sets the source builder to be used with this request. Note, any operations done
* on this require builder before are discarded as this internal builder replaces
* what has been built up until this point.
*/
public SearchRequestBuilder internalBuilder(SearchSourceBuilder sourceBuilder) {
this.sourceBuilder = sourceBuilder;
return this;
}
/**
* Returns the internal search source builder used to construct the request.
*/
public SearchSourceBuilder internalBuilder() {
return sourceBuilder();
}
@Override
public String toString() {
return internalBuilder().toString();
}
@Override
public SearchRequest request() {
if (sourceBuilder != null) {
request.source(sourceBuilder());
}
return request;
}
@Override
protected void doExecute(ActionListener<SearchResponse> listener) {
if (sourceBuilder != null) {
request.source(sourceBuilder());
}
((Client) client).search(request, listener);
}
private SearchSourceBuilder sourceBuilder() {
if (sourceBuilder == null) {
sourceBuilder = new SearchSourceBuilder();
}
return sourceBuilder;
}
private HighlightBuilder highlightBuilder() {
return sourceBuilder().highlighter();
}
private SuggestBuilder suggestBuilder() {
return sourceBuilder().suggest();
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_SearchRequestBuilder.java
|
615 |
new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
|
381 |
public interface LocaleService {
/**
* @return the locale for the passed in code
*/
public Locale findLocaleByCode(String localeCode);
/**
* @return the default locale
*/
public Locale findDefaultLocale();
/**
* @return a list of all known locales
*/
public List<Locale> findAllLocales();
/**
* Persists the given locale
*
* @param locale
* @return the persisted locale
*/
public Locale save(Locale locale);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_locale_service_LocaleService.java
|
183 |
public class StaticAssetView implements View {
private static final Log LOG = LogFactory.getLog(StaticAssetView.class);
protected boolean browserAssetCachingEnabled = true;
protected long cacheSeconds = 60 * 60 * 24;
@Override
public String getContentType() {
return null;
}
@Override
public void render(Map<String, ?> model, HttpServletRequest request, HttpServletResponse response) throws Exception {
String cacheFilePath = (String) model.get("cacheFilePath");
BufferedInputStream bis = new BufferedInputStream(new FileInputStream(cacheFilePath));
try {
String mimeType = (String) model.get("mimeType");
response.setContentType(mimeType);
if (!browserAssetCachingEnabled) {
response.setHeader("Cache-Control","no-cache");
response.setHeader("Pragma","no-cache");
response.setDateHeader ("Expires", 0);
} else {
response.setHeader("Cache-Control","public");
response.setHeader("Pragma","cache");
if (!StringUtils.isEmpty(request.getHeader("If-Modified-Since"))) {
long lastModified = request.getDateHeader("If-Modified-Since");
Calendar last = Calendar.getInstance();
last.setTime(new Date(lastModified));
Calendar check = Calendar.getInstance();
check.add(Calendar.SECOND, -2 * new Long(cacheSeconds).intValue());
if (check.compareTo(last) < 0) {
response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
return;
}
} else {
Calendar check = Calendar.getInstance();
check.add(Calendar.SECOND, -1 * new Long(cacheSeconds).intValue());
response.setDateHeader ("Last-Modified", check.getTimeInMillis());
}
Calendar cal = Calendar.getInstance();
cal.add(Calendar.SECOND, new Long(cacheSeconds).intValue());
response.setDateHeader ("Expires", cal.getTimeInMillis());
}
OutputStream os = response.getOutputStream();
boolean eof = false;
while (!eof) {
int temp = bis.read();
if (temp < 0) {
eof = true;
} else {
os.write(temp);
}
}
os.flush();
} catch (Exception e) {
if (e.getCause() instanceof SocketException) {
if (LOG.isDebugEnabled()) {
LOG.debug("Unable to stream asset", e);
}
} else {
LOG.error("Unable to stream asset", e);
throw e;
}
} finally {
try {
bis.close();
} catch (Throwable e) {
//do nothing
}
}
}
public boolean isBrowserAssetCachingEnabled() {
return browserAssetCachingEnabled;
}
public void setBrowserAssetCachingEnabled(boolean browserAssetCachingEnabled) {
this.browserAssetCachingEnabled = browserAssetCachingEnabled;
}
public long getCacheSeconds() {
return cacheSeconds;
}
public void setCacheSeconds(long cacheSeconds) {
this.cacheSeconds = cacheSeconds;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_file_StaticAssetView.java
|
100 |
clientConfig.addListenerConfig(new ListenerConfig().setImplementation(new InitialMembershipListener() {
public void init(InitialMembershipEvent event) {
for (int i = 0; i < event.getMembers().size(); i++) {
latch.countDown();
}
}
public void memberAdded(MembershipEvent membershipEvent) {
}
public void memberRemoved(MembershipEvent membershipEvent) {
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
}));
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientIssueTest.java
|
271 |
public class NullEmailInfo extends EmailInfo {
private static final long serialVersionUID = 1L;
public NullEmailInfo() throws IOException {
super();
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_info_NullEmailInfo.java
|
523 |
public class BLCRequestUtils {
private static String OK_TO_USE_SESSION = "blOkToUseSession";
/**
* Broadleaf "Resolver" and "Filter" classes may need to know if they are allowed to utilize the session.
* BLC uses a pattern where we will store an attribute in the request indicating whether or not the
* session can be used. For example, when using the REST APIs, we typically do not want to utilize the
* session.
*
*/
public static boolean isOKtoUseSession(WebRequest request) {
Boolean useSessionForRequestProcessing = (Boolean) request.getAttribute(OK_TO_USE_SESSION, WebRequest.SCOPE_REQUEST);
if (useSessionForRequestProcessing == null) {
// by default we will use the session
return true;
} else {
return useSessionForRequestProcessing.booleanValue();
}
}
/**
* Sets whether or not Broadleaf can utilize the session in request processing. Used by the REST API
* flow so that RESTful calls do not utilize the session.
*
*/
public static void setOKtoUseSession(WebRequest request, Boolean value) {
request.setAttribute(OK_TO_USE_SESSION, value, WebRequest.SCOPE_REQUEST);
}
/**
* Get header or url parameter. Will obtain the parameter from a header variable or a URL parameter, preferring
* header values if they are set.
*
*/
public static String getURLorHeaderParameter(WebRequest request, String varName) {
String returnValue = request.getHeader(varName);
if (returnValue == null) {
returnValue = request.getParameter(varName);
}
return returnValue;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_util_BLCRequestUtils.java
|
3,424 |
private static class DistributedObjectFuture {
volatile DistributedObject proxy;
DistributedObject get() {
if (proxy == null) {
boolean interrupted = false;
synchronized (this) {
while (proxy == null) {
try {
wait();
} catch (InterruptedException e) {
interrupted = true;
}
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
}
return proxy;
}
void set(DistributedObject o) {
if (o == null) {
throw new IllegalArgumentException();
}
synchronized (this) {
proxy = o;
notifyAll();
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_ProxyServiceImpl.java
|
3,741 |
public class WanReplicationServiceImpl
implements WanReplicationService {
private final Node node;
private final ILogger logger;
private final Map<String, WanReplicationPublisherDelegate> wanReplications = initializeWanReplicationPublisherMapping();
public WanReplicationServiceImpl(Node node) {
this.node = node;
this.logger = node.getLogger(WanReplicationServiceImpl.class.getName());
}
@Override
@SuppressWarnings("SynchronizeOnThis")
public WanReplicationPublisher getWanReplicationPublisher(String name) {
WanReplicationPublisherDelegate wr = wanReplications.get(name);
if (wr != null) {
return wr;
}
synchronized (this) {
wr = wanReplications.get(name);
if (wr != null) {
return wr;
}
WanReplicationConfig wanReplicationConfig = node.getConfig().getWanReplicationConfig(name);
if (wanReplicationConfig == null) {
return null;
}
List<WanTargetClusterConfig> targets = wanReplicationConfig.getTargetClusterConfigs();
WanReplicationEndpoint[] targetEndpoints = new WanReplicationEndpoint[targets.size()];
int count = 0;
for (WanTargetClusterConfig targetClusterConfig : targets) {
WanReplicationEndpoint target;
if (targetClusterConfig.getReplicationImpl() != null) {
try {
target = ClassLoaderUtil
.newInstance(node.getConfigClassLoader(), targetClusterConfig.getReplicationImpl());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
} else {
target = new WanNoDelayReplication();
}
String groupName = targetClusterConfig.getGroupName();
String password = targetClusterConfig.getGroupPassword();
String[] addresses = new String[targetClusterConfig.getEndpoints().size()];
targetClusterConfig.getEndpoints().toArray(addresses);
target.init(node, groupName, password, addresses);
targetEndpoints[count++] = target;
}
wr = new WanReplicationPublisherDelegate(name, targetEndpoints);
wanReplications.put(name, wr);
return wr;
}
}
@Override
public void handleEvent(final Packet packet) {
// todo execute in which thread
node.nodeEngine.getExecutionService().execute("hz:wan", new Runnable() {
@Override
public void run() {
final Data data = packet.getData();
try {
WanReplicationEvent replicationEvent = (WanReplicationEvent) node.nodeEngine.toObject(data);
String serviceName = replicationEvent.getServiceName();
ReplicationSupportingService service = node.nodeEngine.getService(serviceName);
service.onReplicationEvent(replicationEvent);
} catch (Exception e) {
logger.severe(e);
}
}
});
}
@Override
public void shutdown() {
synchronized (this) {
for (WanReplicationPublisherDelegate wanReplication : wanReplications.values()) {
WanReplicationEndpoint[] wanReplicationEndpoints = wanReplication.getEndpoints();
if (wanReplicationEndpoints != null) {
for (WanReplicationEndpoint wanReplicationEndpoint : wanReplicationEndpoints) {
if (wanReplicationEndpoint != null) {
wanReplicationEndpoint.shutdown();
}
}
}
}
wanReplications.clear();
}
}
private ConcurrentHashMap<String, WanReplicationPublisherDelegate> initializeWanReplicationPublisherMapping() {
return new ConcurrentHashMap<String, WanReplicationPublisherDelegate>(2);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_wan_impl_WanReplicationServiceImpl.java
|
316 |
public class NodeHotThreads extends NodeOperationResponse {
private String hotThreads;
NodeHotThreads() {
}
public NodeHotThreads(DiscoveryNode node, String hotThreads) {
super(node);
this.hotThreads = hotThreads;
}
public String getHotThreads() {
return this.hotThreads;
}
public static NodeHotThreads readNodeHotThreads(StreamInput in) throws IOException {
NodeHotThreads node = new NodeHotThreads();
node.readFrom(in);
return node;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
hotThreads = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(hotThreads);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_NodeHotThreads.java
|
80 |
public class ClientDisconnectionOperation extends AbstractOperation implements UrgentSystemOperation {
private String clientUuid;
public ClientDisconnectionOperation() {
}
public ClientDisconnectionOperation(String clientUuid) {
this.clientUuid = clientUuid;
}
@Override
public void run() throws Exception {
ClientEngineImpl engine = getService();
Set<ClientEndpoint> endpoints = engine.getEndpoints(clientUuid);
for (ClientEndpoint endpoint : endpoints) {
Connection connection = endpoint.getConnection();
engine.removeEndpoint(connection, true);
}
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
nodeEngine.onClientDisconnected(clientUuid);
Collection<ClientAwareService> services = nodeEngine.getServices(ClientAwareService.class);
for (ClientAwareService service : services) {
service.clientDisconnected(clientUuid);
}
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeUTF(clientUuid);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
clientUuid = in.readUTF();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_client_ClientDisconnectionOperation.java
|
5,841 |
public class DocIdSetCollector extends XCollector {
private final DocSetCache docSetCache;
private final Collector collector;
private final List<ContextDocIdSet> docSets;
private boolean currentHasDocs;
private ContextDocIdSet currentContext;
private FixedBitSet currentSet;
public DocIdSetCollector(DocSetCache docSetCache, Collector collector) {
this.docSetCache = docSetCache;
this.collector = collector;
this.docSets = new ArrayList<ContextDocIdSet>();
}
public List<ContextDocIdSet> docSets() {
return docSets;
}
public void release() {
for (ContextDocIdSet docSet : docSets) {
docSetCache.release(docSet);
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
@Override
public void collect(int doc) throws IOException {
collector.collect(doc);
currentHasDocs = true;
currentSet.set(doc);
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
collector.setNextReader(context);
if (currentContext != null) {
if (currentHasDocs) {
docSets.add(currentContext);
} else {
docSetCache.release(currentContext);
}
}
currentContext = docSetCache.obtain(context);
currentSet = (FixedBitSet) currentContext.docSet;
currentHasDocs = false;
}
@Override
public void postCollection() {
if (collector instanceof XCollector) {
((XCollector) collector).postCollection();
}
if (currentContext != null) {
if (currentHasDocs) {
docSets.add(currentContext);
} else {
docSetCache.release(currentContext);
}
currentContext = null;
currentSet = null;
currentHasDocs = false;
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_internal_DocIdSetCollector.java
|
3,368 |
public abstract class PackedArrayAtomicFieldData extends AbstractAtomicNumericFieldData {
public static PackedArrayAtomicFieldData empty(int numDocs) {
return new Empty(numDocs);
}
private final int numDocs;
protected long size = -1;
public PackedArrayAtomicFieldData(int numDocs) {
super(false);
this.numDocs = numDocs;
}
@Override
public void close() {
}
@Override
public int getNumDocs() {
return numDocs;
}
static class Empty extends PackedArrayAtomicFieldData {
Empty(int numDocs) {
super(numDocs);
}
@Override
public LongValues getLongValues() {
return LongValues.EMPTY;
}
@Override
public DoubleValues getDoubleValues() {
return DoubleValues.EMPTY;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getMemorySizeInBytes() {
return 0;
}
@Override
public long getNumberUniqueValues() {
return 0;
}
@Override
public BytesValues getBytesValues(boolean needsHashes) {
return BytesValues.EMPTY;
}
@Override
public ScriptDocValues getScriptValues() {
return ScriptDocValues.EMPTY;
}
}
public static class WithOrdinals extends PackedArrayAtomicFieldData {
private final MonotonicAppendingLongBuffer values;
private final Ordinals ordinals;
public WithOrdinals(MonotonicAppendingLongBuffer values, int numDocs, Ordinals ordinals) {
super(numDocs);
this.values = values;
this.ordinals = ordinals;
}
@Override
public boolean isMultiValued() {
return ordinals.isMultiValued();
}
@Override
public boolean isValuesOrdered() {
return true;
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + values.ramBytesUsed() + ordinals.getMemorySizeInBytes();
}
return size;
}
@Override
public long getNumberUniqueValues() {
return ordinals.getNumOrds();
}
@Override
public LongValues getLongValues() {
return new LongValues(values, ordinals.ordinals());
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values, ordinals.ordinals());
}
static class LongValues extends org.elasticsearch.index.fielddata.LongValues.WithOrdinals {
private final MonotonicAppendingLongBuffer values;
LongValues(MonotonicAppendingLongBuffer values, Ordinals.Docs ordinals) {
super(ordinals);
this.values = values;
}
@Override
public long getValueByOrd(long ord) {
assert ord != Ordinals.MISSING_ORDINAL;
return values.get(ord - 1);
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues.WithOrdinals {
private final MonotonicAppendingLongBuffer values;
DoubleValues(MonotonicAppendingLongBuffer values, Ordinals.Docs ordinals) {
super(ordinals);
this.values = values;
}
@Override
public double getValueByOrd(long ord) {
assert ord != Ordinals.MISSING_ORDINAL;
return values.get(ord - 1);
}
}
}
/**
* A single valued case, where not all values are "set", so we have a special
* value which encodes the fact that the document has no value.
*/
public static class SingleSparse extends PackedArrayAtomicFieldData {
private final PackedInts.Mutable values;
private final long minValue;
private final long missingValue;
private final long numOrds;
public SingleSparse(PackedInts.Mutable values, long minValue, int numDocs, long missingValue, long numOrds) {
super(numDocs);
this.values = values;
this.minValue = minValue;
this.missingValue = missingValue;
this.numOrds = numOrds;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return numOrds;
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = values.ramBytesUsed() + 2 * RamUsageEstimator.NUM_BYTES_LONG;
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values, minValue, missingValue);
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values, minValue, missingValue);
}
static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
private final PackedInts.Mutable values;
private final long minValue;
private final long missingValue;
LongValues(PackedInts.Mutable values, long minValue, long missingValue) {
super(false);
this.values = values;
this.minValue = minValue;
this.missingValue = missingValue;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return values.get(docId) != missingValue ? 1 : 0;
}
@Override
public long nextValue() {
return minValue + values.get(docId);
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
private final PackedInts.Mutable values;
private final long minValue;
private final long missingValue;
DoubleValues(PackedInts.Mutable values, long minValue, long missingValue) {
super(false);
this.values = values;
this.minValue = minValue;
this.missingValue = missingValue;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return values.get(docId) != missingValue ? 1 : 0;
}
@Override
public double nextValue() {
return minValue + values.get(docId);
}
}
}
/**
* Assumes all the values are "set", and docId is used as the index to the value array.
*/
public static class Single extends PackedArrayAtomicFieldData {
private final PackedInts.Mutable values;
private final long minValue;
private final long numOrds;
/**
* Note, here, we assume that there is no offset by 1 from docId, so position 0
* is the value for docId 0.
*/
public Single(PackedInts.Mutable values, long minValue, int numDocs, long numOrds) {
super(numDocs);
this.values = values;
this.minValue = minValue;
this.numOrds = numOrds;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return numOrds;
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = values.ramBytesUsed();
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values, minValue);
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values, minValue);
}
static class LongValues extends DenseLongValues {
private final PackedInts.Mutable values;
private final long minValue;
LongValues(PackedInts.Mutable values, long minValue) {
super(false);
this.values = values;
this.minValue = minValue;
}
@Override
public long nextValue() {
return minValue + values.get(docId);
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
private final PackedInts.Mutable values;
private final long minValue;
DoubleValues(PackedInts.Mutable values, long minValue) {
super(false);
this.values = values;
this.minValue = minValue;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return 1;
}
@Override
public double nextValue() {
return minValue + values.get(docId);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_plain_PackedArrayAtomicFieldData.java
|
4,738 |
public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Repository> implements Repository, RateLimiterListener {
private ImmutableBlobContainer snapshotsBlobContainer;
protected final String repositoryName;
private static final String SNAPSHOT_PREFIX = "snapshot-";
private static final String SNAPSHOTS_FILE = "index";
private static final String METADATA_PREFIX = "metadata-";
private final BlobStoreIndexShardRepository indexShardRepository;
private final ToXContent.Params globalOnlyFormatParams;
private final RateLimiter snapshotRateLimiter;
private final RateLimiter restoreRateLimiter;
private final CounterMetric snapshotRateLimitingTimeInNanos = new CounterMetric();
private final CounterMetric restoreRateLimitingTimeInNanos = new CounterMetric();
/**
* Constructs new BlobStoreRepository
*
* @param repositoryName repository name
* @param repositorySettings repository settings
* @param indexShardRepository an instance of IndexShardRepository
*/
protected BlobStoreRepository(String repositoryName, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) {
super(repositorySettings.globalSettings());
this.repositoryName = repositoryName;
this.indexShardRepository = (BlobStoreIndexShardRepository) indexShardRepository;
Map<String, String> globalOnlyParams = Maps.newHashMap();
globalOnlyParams.put(MetaData.GLOBAL_PERSISTENT_ONLY_PARAM, "true");
globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams);
snapshotRateLimiter = getRateLimiter(repositorySettings, "max_snapshot_bytes_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB));
restoreRateLimiter = getRateLimiter(repositorySettings, "max_restore_bytes_per_sec", new ByteSizeValue(20, ByteSizeUnit.MB));
}
/**
* {@inheritDoc}
*/
@Override
protected void doStart() throws ElasticsearchException {
this.snapshotsBlobContainer = blobStore().immutableBlobContainer(basePath());
indexShardRepository.initialize(blobStore(), basePath(), chunkSize(), snapshotRateLimiter, restoreRateLimiter, this);
}
/**
* {@inheritDoc}
*/
@Override
protected void doStop() throws ElasticsearchException {
}
/**
* {@inheritDoc}
*/
@Override
protected void doClose() throws ElasticsearchException {
try {
blobStore().close();
} catch (Throwable t) {
logger.warn("cannot close blob store", t);
}
}
/**
* Returns initialized and ready to use BlobStore
* <p/>
* This method is first called in the {@link #doStart()} method.
*
* @return blob store
*/
abstract protected BlobStore blobStore();
/**
* Returns base path of the repository
*/
abstract protected BlobPath basePath();
/**
* Returns true if metadata and snapshot files should be compressed
*
* @return true if compression is needed
*/
protected boolean isCompress() {
return false;
}
/**
* Returns data file chunk size.
* <p/>
* This method should return null if no chunking is needed.
*
* @return chunk size
*/
protected ByteSizeValue chunkSize() {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void initializeSnapshot(SnapshotId snapshotId, ImmutableList<String> indices, MetaData metaData) {
try {
BlobStoreSnapshot blobStoreSnapshot = BlobStoreSnapshot.builder()
.name(snapshotId.getSnapshot())
.indices(indices)
.startTime(System.currentTimeMillis())
.build();
BytesStreamOutput bStream = writeSnapshot(blobStoreSnapshot);
String snapshotBlobName = snapshotBlobName(snapshotId);
if (snapshotsBlobContainer.blobExists(snapshotBlobName)) {
// TODO: Can we make it atomic?
throw new InvalidSnapshotNameException(snapshotId, "snapshot with such name already exists");
}
snapshotsBlobContainer.writeBlob(snapshotBlobName, bStream.bytes().streamInput(), bStream.bytes().length());
// Write Global MetaData
// TODO: Check if metadata needs to be written
bStream = writeGlobalMetaData(metaData);
snapshotsBlobContainer.writeBlob(metaDataBlobName(snapshotId), bStream.bytes().streamInput(), bStream.bytes().length());
for (String index : indices) {
IndexMetaData indexMetaData = metaData.index(index);
BlobPath indexPath = basePath().add("indices").add(index);
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
bStream = new BytesStreamOutput();
StreamOutput stream = bStream;
if (isCompress()) {
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
}
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
builder.startObject();
IndexMetaData.Builder.toXContent(indexMetaData, builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
builder.close();
indexMetaDataBlobContainer.writeBlob(snapshotBlobName(snapshotId), bStream.bytes().streamInput(), bStream.bytes().length());
}
} catch (IOException ex) {
throw new SnapshotCreationException(snapshotId, ex);
}
}
/**
* {@inheritDoc}
*/
@Override
public void deleteSnapshot(SnapshotId snapshotId) {
Snapshot snapshot = readSnapshot(snapshotId);
MetaData metaData = readSnapshotMetaData(snapshotId, snapshot.indices());
try {
String blobName = snapshotBlobName(snapshotId);
// Delete snapshot file first so we wouldn't end up with partially deleted snapshot that looks OK
snapshotsBlobContainer.deleteBlob(blobName);
snapshotsBlobContainer.deleteBlob(metaDataBlobName(snapshotId));
// Delete snapshot from the snapshot list
ImmutableList<SnapshotId> snapshotIds = snapshots();
if (snapshotIds.contains(snapshotId)) {
ImmutableList.Builder<SnapshotId> builder = ImmutableList.builder();
for (SnapshotId id : snapshotIds) {
if (!snapshotId.equals(id)) {
builder.add(id);
}
}
snapshotIds = builder.build();
}
writeSnapshotList(snapshotIds);
// Now delete all indices
for (String index : snapshot.indices()) {
BlobPath indexPath = basePath().add("indices").add(index);
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
try {
indexMetaDataBlobContainer.deleteBlob(blobName);
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to delete metadata", ex);
}
IndexMetaData indexMetaData = metaData.index(index);
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
indexShardRepository.delete(snapshotId, new ShardId(index, i));
}
}
} catch (IOException ex) {
throw new RepositoryException(this.repositoryName, "failed to update snapshot in repository", ex);
}
}
/**
* {@inheritDoc}
*/
@Override
public Snapshot finalizeSnapshot(SnapshotId snapshotId, String failure, int totalShards, ImmutableList<SnapshotShardFailure> shardFailures) {
BlobStoreSnapshot snapshot = (BlobStoreSnapshot) readSnapshot(snapshotId);
if (snapshot == null) {
throw new SnapshotMissingException(snapshotId);
}
if (snapshot.state().completed()) {
throw new SnapshotException(snapshotId, "snapshot is already closed");
}
try {
String blobName = snapshotBlobName(snapshotId);
BlobStoreSnapshot.Builder updatedSnapshot = BlobStoreSnapshot.builder().snapshot(snapshot);
if (failure == null) {
updatedSnapshot.success();
updatedSnapshot.failures(totalShards, shardFailures);
} else {
updatedSnapshot.failed(failure);
}
updatedSnapshot.endTime(System.currentTimeMillis());
snapshot = updatedSnapshot.build();
BytesStreamOutput bStream = writeSnapshot(snapshot);
snapshotsBlobContainer.writeBlob(blobName, bStream.bytes().streamInput(), bStream.bytes().length());
ImmutableList<SnapshotId> snapshotIds = snapshots();
if (!snapshotIds.contains(snapshotId)) {
snapshotIds = ImmutableList.<SnapshotId>builder().addAll(snapshotIds).add(snapshotId).build();
}
writeSnapshotList(snapshotIds);
return snapshot;
} catch (IOException ex) {
throw new RepositoryException(this.repositoryName, "failed to update snapshot in repository", ex);
}
}
/**
* {@inheritDoc}
*/
@Override
public ImmutableList<SnapshotId> snapshots() {
try {
List<SnapshotId> snapshots = newArrayList();
ImmutableMap<String, BlobMetaData> blobs;
try {
blobs = snapshotsBlobContainer.listBlobsByPrefix(SNAPSHOT_PREFIX);
} catch (UnsupportedOperationException ex) {
// Fall back in case listBlobsByPrefix isn't supported by the blob store
return readSnapshotList();
}
int prefixLength = SNAPSHOT_PREFIX.length();
for (BlobMetaData md : blobs.values()) {
String name = md.name().substring(prefixLength);
snapshots.add(new SnapshotId(repositoryName, name));
}
return ImmutableList.copyOf(snapshots);
} catch (IOException ex) {
throw new RepositoryException(repositoryName, "failed to list snapshots in repository", ex);
}
}
/**
* {@inheritDoc}
*/
@Override
public MetaData readSnapshotMetaData(SnapshotId snapshotId, ImmutableList<String> indices) {
MetaData metaData;
try {
byte[] data = snapshotsBlobContainer.readBlobFully(metaDataBlobName(snapshotId));
metaData = readMetaData(data);
} catch (FileNotFoundException ex) {
throw new SnapshotMissingException(snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
}
MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
for (String index : indices) {
BlobPath indexPath = basePath().add("indices").add(index);
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
XContentParser parser = null;
try {
byte[] data = indexMetaDataBlobContainer.readBlobFully(snapshotBlobName(snapshotId));
parser = XContentHelper.createParser(data, 0, data.length);
XContentParser.Token token;
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
IndexMetaData indexMetaData = IndexMetaData.Builder.fromXContent(parser);
if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
metaDataBuilder.put(indexMetaData, false);
continue;
}
}
throw new ElasticsearchParseException("unexpected token [" + token + "]");
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to read metadata", ex);
} finally {
if (parser != null) {
parser.close();
}
}
}
return metaDataBuilder.build();
}
/**
* {@inheritDoc}
*/
@Override
public Snapshot readSnapshot(SnapshotId snapshotId) {
try {
String blobName = snapshotBlobName(snapshotId);
byte[] data = snapshotsBlobContainer.readBlobFully(blobName);
return readSnapshot(data);
} catch (FileNotFoundException ex) {
throw new SnapshotMissingException(snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
}
}
/**
* Configures RateLimiter based on repository and global settings
* @param repositorySettings repository settings
* @param setting setting to use to configure rate limiter
* @param defaultRate default limiting rate
* @return rate limiter or null of no throttling is needed
*/
private RateLimiter getRateLimiter(RepositorySettings repositorySettings, String setting, ByteSizeValue defaultRate) {
ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.settings().getAsBytesSize(setting,
componentSettings.getAsBytesSize(setting, defaultRate));
if (maxSnapshotBytesPerSec.bytes() <= 0) {
return null;
} else {
return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.mbFrac());
}
}
/**
* Parses JSON containing snapshot description
*
* @param data snapshot description in JSON format
* @return parsed snapshot description
* @throws IOException parse exceptions
*/
private BlobStoreSnapshot readSnapshot(byte[] data) throws IOException {
XContentParser parser = null;
try {
parser = XContentHelper.createParser(data, 0, data.length);
XContentParser.Token token;
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
parser.nextToken();
BlobStoreSnapshot snapshot = BlobStoreSnapshot.Builder.fromXContent(parser);
if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
return snapshot;
}
}
}
throw new ElasticsearchParseException("unexpected token [" + token + "]");
} finally {
if (parser != null) {
parser.close();
}
}
}
/**
* Parses JSON containing cluster metadata
*
* @param data cluster metadata in JSON format
* @return parsed metadata
* @throws IOException parse exceptions
*/
private MetaData readMetaData(byte[] data) throws IOException {
XContentParser parser = null;
try {
parser = XContentHelper.createParser(data, 0, data.length);
XContentParser.Token token;
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
if ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) {
parser.nextToken();
MetaData metaData = MetaData.Builder.fromXContent(parser);
if ((token = parser.nextToken()) == XContentParser.Token.END_OBJECT) {
return metaData;
}
}
}
throw new ElasticsearchParseException("unexpected token [" + token + "]");
} finally {
if (parser != null) {
parser.close();
}
}
}
/**
* Returns name of snapshot blob
*
* @param snapshotId snapshot id
* @return name of snapshot blob
*/
private String snapshotBlobName(SnapshotId snapshotId) {
return SNAPSHOT_PREFIX + snapshotId.getSnapshot();
}
/**
* Returns name of metadata blob
*
* @param snapshotId snapshot id
* @return name of metadata blob
*/
private String metaDataBlobName(SnapshotId snapshotId) {
return METADATA_PREFIX + snapshotId.getSnapshot();
}
/**
* Serializes BlobStoreSnapshot into JSON
*
* @param snapshot - snapshot description
* @return BytesStreamOutput representing JSON serialized BlobStoreSnapshot
* @throws IOException
*/
private BytesStreamOutput writeSnapshot(BlobStoreSnapshot snapshot) throws IOException {
BytesStreamOutput bStream = new BytesStreamOutput();
StreamOutput stream = bStream;
if (isCompress()) {
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
}
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
builder.startObject();
BlobStoreSnapshot.Builder.toXContent(snapshot, builder, globalOnlyFormatParams);
builder.endObject();
builder.close();
return bStream;
}
/**
* Serializes global MetaData into JSON
*
* @param metaData - metaData
* @return BytesStreamOutput representing JSON serialized global MetaData
* @throws IOException
*/
private BytesStreamOutput writeGlobalMetaData(MetaData metaData) throws IOException {
BytesStreamOutput bStream = new BytesStreamOutput();
StreamOutput stream = bStream;
if (isCompress()) {
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
}
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
builder.startObject();
MetaData.Builder.toXContent(metaData, builder, globalOnlyFormatParams);
builder.endObject();
builder.close();
return bStream;
}
/**
* Writes snapshot index file
* <p/>
* This file can be used by read-only repositories that are unable to list files in the repository
*
* @param snapshots list of snapshot ids
* @throws IOException I/O errors
*/
protected void writeSnapshotList(ImmutableList<SnapshotId> snapshots) throws IOException {
BytesStreamOutput bStream = new BytesStreamOutput();
StreamOutput stream = bStream;
if (isCompress()) {
stream = CompressorFactory.defaultCompressor().streamOutput(stream);
}
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
builder.startObject();
builder.startArray("snapshots");
for (SnapshotId snapshot : snapshots) {
builder.value(snapshot.getSnapshot());
}
builder.endArray();
builder.endObject();
builder.close();
snapshotsBlobContainer.writeBlob(SNAPSHOTS_FILE, bStream.bytes().streamInput(), bStream.bytes().length());
}
/**
* Reads snapshot index file
* <p/>
* This file can be used by read-only repositories that are unable to list files in the repository
*
* @return list of snapshots in the repository
* @throws IOException I/O errors
*/
protected ImmutableList<SnapshotId> readSnapshotList() throws IOException {
byte[] data = snapshotsBlobContainer.readBlobFully(SNAPSHOTS_FILE);
ArrayList<SnapshotId> snapshots = new ArrayList<SnapshotId>();
XContentParser parser = null;
try {
parser = XContentHelper.createParser(data, 0, data.length);
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
if ("snapshots".equals(currentFieldName)) {
if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
snapshots.add(new SnapshotId(repositoryName, parser.text()));
}
}
}
}
}
} finally {
if (parser != null) {
parser.close();
}
}
return ImmutableList.copyOf(snapshots);
}
@Override
public void onRestorePause(long nanos) {
restoreRateLimitingTimeInNanos.inc(nanos);
}
@Override
public void onSnapshotPause(long nanos) {
snapshotRateLimitingTimeInNanos.inc(nanos);
}
@Override
public long snapshotThrottleTimeInNanos() {
return snapshotRateLimitingTimeInNanos.count();
}
@Override
public long restoreThrottleTimeInNanos() {
return restoreRateLimitingTimeInNanos.count();
}
}
| 1no label
|
src_main_java_org_elasticsearch_repositories_blobstore_BlobStoreRepository.java
|
576 |
public interface IndexEntriesResultListener {
boolean addResult(ODocument entry);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndex.java
|
5 |
public class AbbreviationsManager {
/** A regular expression used to separate alternative abbreviations. (\s == any whitespace) */
private static final Pattern ABBREVIATION_SEPARATOR = Pattern.compile("\\s*\\|\\s*");
/** A regular expression used to separate words. */
private static final Pattern WORD_SEPARATOR = Pattern.compile("\\s+");
private Map<String, List<String>> abbreviations = new HashMap<String, List<String>>();
/**
* Creates a new abbreviations manager configured with a set of abbreviation
* properties. Abbreviation properties are of the form:
* <pre>
* phrase = alt1 | alt2 | ...
* </pre>
* Whitespace around the "=" and "|" separators is removed. The phrase is
* converted to lower case, but the alternatives are used verbatim.
*
* @param abbreviationProperties the abbreviation properties
*/
public AbbreviationsManager(Properties abbreviationProperties) {
@SuppressWarnings("unchecked")
Enumeration<String> e = (Enumeration<String>) abbreviationProperties.propertyNames();
while (e.hasMoreElements()) {
String phrase = e.nextElement();
String lcPhrase = phrase.toLowerCase();
String[] alternatives = ABBREVIATION_SEPARATOR.split(abbreviationProperties.getProperty(phrase).trim());
List<String> abbreviationsForPhrase = new ArrayList<String>(Arrays.asList(alternatives));
Collections.sort(abbreviationsForPhrase, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return o1.length() - o2.length();
}
});
abbreviations.put(lcPhrase, abbreviationsForPhrase);
}
}
/**
* Gets the alternative abbreviations for a phrase. The original phrase is always the
* the first alternative returned. If no abbreviations are found for the phrase, returns
* a list with one element, the original phrase. The phrase is converted to lower case
* before looking up its alternatives.
*
* @param phrase the phrase to abbreviate
* @return a list of alternative abbreviations, with the original phrase as the first element
*/
public List<String> getAlternatives(String phrase) {
List<String> result = new ArrayList<String>();
result.add(phrase);
List<String> alternatives = abbreviations.get(phrase.toLowerCase());
if (alternatives != null) {
result.addAll(alternatives);
}
return result;
}
/**
* Finds the phrases within a string that can be abbreviated, and returns
* a structure with those phrases and the alternatives for each phrase.
* A phrase is a sequence of one or more words in the original string, where
* words are delimited by whitespace. At each point in the original string,
* the longest phrase for which there are abbreviations is found.
*
* @param s the string to find abbreviations for
* @return a structure describing the available abbreviations
*/
public Abbreviations getAbbreviations(String s) {
AbbreviationsImpl abbrev = new AbbreviationsImpl(s);
List<String> phrases = getPhrasesWithAbbreviations(s);
for (String phrase : phrases) {
abbrev.addPhrase(phrase, getAlternatives(phrase));
}
return abbrev;
}
/**
* Constructs a partition of a string into phrases, along word boundaries,
* where each phrase has one or more alternative abbreviations, and each
* phrase is the longest match against the abbreviations at that position
* in the original string.
*
* @param s the original string to partition into phrases
* @return a list of phrases
*/
private List<String> getPhrasesWithAbbreviations(String s) {
int phraseStart = 0;
List<String> phrasesWithAbbreviations = new ArrayList<String>();
Matcher wordBoundary = WORD_SEPARATOR.matcher(s);
while (phraseStart < s.length()) {
int phraseLength = getLongestPhraseLength(s.substring(phraseStart));
phrasesWithAbbreviations.add(s.substring(phraseStart, phraseStart + phraseLength));
if (wordBoundary.find(phraseStart + phraseLength)) {
phraseStart = wordBoundary.end();
} else {
phraseStart = s.length();
}
}
return phrasesWithAbbreviations;
}
/**
* Finds the longest phrase within a string that has abbreviations. The first word
* is always a possibility, even if no alternatives exist to that word.
*
* @param s the string for which to find the longest phrase with alternatives
* @return the length of the longest phrase with alternative abbreviations
*/
private int getLongestPhraseLength(String s) {
// If the entire string matches, then it is obviously the longest matching phrase.
if (abbreviations.containsKey(s.toLowerCase())) {
return s.length();
}
Matcher wordBoundary = WORD_SEPARATOR.matcher(s);
if (!wordBoundary.find()) {
// No word boundaries found. Entire string is only possible phrase.
return s.length();
}
// First word is always an abbreviation candidate, perhaps with no
// alternatives but itself.
int longestMatchLength = wordBoundary.start();
while (wordBoundary.find()) {
if (abbreviations.containsKey(s.substring(0, wordBoundary.start()).toLowerCase())) {
longestMatchLength = wordBoundary.start();
}
}
return longestMatchLength;
}
}
| 0true
|
tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationsManager.java
|
978 |
public enum ReplicationType {
/**
* Sync replication, wait till all replicas have performed the operation.
*/
SYNC((byte) 0),
/**
* Async replication. Will send the request to replicas, but will not wait for it
*/
ASYNC((byte) 1),
/**
* Use the default replication type configured for this node.
*/
DEFAULT((byte) 2);
private byte id;
ReplicationType(byte id) {
this.id = id;
}
/**
* The internal representation of the operation type.
*/
public byte id() {
return id;
}
/**
* Constructs the operation type from its internal representation.
*/
public static ReplicationType fromId(byte id) {
if (id == 0) {
return SYNC;
} else if (id == 1) {
return ASYNC;
} else if (id == 2) {
return DEFAULT;
} else {
throw new ElasticsearchIllegalArgumentException("No type match for [" + id + "]");
}
}
/**
* Parse the replication type from string.
*/
public static ReplicationType fromString(String type) {
if ("async".equals(type)) {
return ASYNC;
} else if ("sync".equals(type)) {
return SYNC;
} else if ("default".equals(type)) {
return DEFAULT;
}
throw new ElasticsearchIllegalArgumentException("No replication type match for [" + type + "], should be either `async`, or `sync`");
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_replication_ReplicationType.java
|
519 |
public class OGraphException extends OException {
private static final long serialVersionUID = -2655748565531836968L;
public OGraphException(String string) {
super(string);
}
public OGraphException(String message, Throwable cause) {
super(message, cause);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_exception_OGraphException.java
|
1,049 |
public class MultiTermVectorsTests extends AbstractTermVectorTests {
@Test
public void testDuelESLucene() throws Exception {
AbstractTermVectorTests.TestFieldSetting[] testFieldSettings = getFieldSettings();
createIndexBasedOnFieldSettings(testFieldSettings, -1);
AbstractTermVectorTests.TestDoc[] testDocs = generateTestDocs(5, testFieldSettings);
DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
AbstractTermVectorTests.TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
MultiTermVectorsRequestBuilder requestBuilder = client().prepareMultiTermVectors();
for (AbstractTermVectorTests.TestConfig test : testConfigs) {
requestBuilder.add(getRequestForConfig(test).request());
}
MultiTermVectorsItemResponse[] responseItems = requestBuilder.get().getResponses();
for (int i = 0; i < testConfigs.length; i++) {
TestConfig test = testConfigs[i];
try {
MultiTermVectorsItemResponse item = responseItems[i];
if (test.expectedException != null) {
assertTrue(item.isFailed());
continue;
} else if (item.isFailed()) {
fail(item.getFailure().getMessage());
}
Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
validateResponse(item.getResponse(), luceneTermVectors, test);
} catch (Throwable t) {
throw new Exception("Test exception while running " + test.toString(), t);
}
}
}
public void testMissingIndexThrowsMissingIndex() throws Exception {
TermVectorRequestBuilder requestBuilder = client().prepareTermVector("testX", "typeX", Integer.toString(1));
MultiTermVectorsRequestBuilder mtvBuilder = new MultiTermVectorsRequestBuilder(client());
mtvBuilder.add(requestBuilder.request());
MultiTermVectorsResponse response = mtvBuilder.execute().actionGet();
assertThat(response.getResponses().length, equalTo(1));
assertThat(response.getResponses()[0].getFailure().getMessage(), equalTo("[" + response.getResponses()[0].getIndex() + "] missing"));
}
}
| 0true
|
src_test_java_org_elasticsearch_action_termvector_MultiTermVectorsTests.java
|
2,768 |
public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpServerTransport> implements HttpServerTransport {
static {
NettyStaticSetup.setup();
}
private final NetworkService networkService;
final ByteSizeValue maxContentLength;
final ByteSizeValue maxInitialLineLength;
final ByteSizeValue maxHeaderSize;
final ByteSizeValue maxChunkSize;
private final int workerCount;
private final boolean blockingServer;
final boolean compression;
private final int compressionLevel;
final boolean resetCookies;
private final String port;
private final String bindHost;
private final String publishHost;
private final Boolean tcpNoDelay;
private final Boolean tcpKeepAlive;
private final Boolean reuseAddress;
private final ByteSizeValue tcpSendBufferSize;
private final ByteSizeValue tcpReceiveBufferSize;
private final ReceiveBufferSizePredictorFactory receiveBufferSizePredictorFactory;
final ByteSizeValue maxCumulationBufferCapacity;
final int maxCompositeBufferComponents;
private volatile ServerBootstrap serverBootstrap;
private volatile BoundTransportAddress boundAddress;
private volatile Channel serverChannel;
OpenChannelsHandler serverOpenChannels;
private volatile HttpServerAdapter httpServerAdapter;
@Inject
public NettyHttpServerTransport(Settings settings, NetworkService networkService) {
super(settings);
this.networkService = networkService;
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
}
ByteSizeValue maxContentLength = componentSettings.getAsBytesSize("max_content_length", settings.getAsBytesSize("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
this.maxChunkSize = componentSettings.getAsBytesSize("max_chunk_size", settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
this.maxHeaderSize = componentSettings.getAsBytesSize("max_header_size", settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
this.maxInitialLineLength = componentSettings.getAsBytesSize("max_initial_line_length", settings.getAsBytesSize("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB)));
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
this.resetCookies = componentSettings.getAsBoolean("reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
this.maxCumulationBufferCapacity = componentSettings.getAsBytesSize("max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = componentSettings.getAsInt("max_composite_buffer_components", -1);
this.workerCount = componentSettings.getAsInt("worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingServer = settings.getAsBoolean("http.blocking_server", settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
this.port = componentSettings.get("port", settings.get("http.port", "9200-9300"));
this.bindHost = componentSettings.get("bind_host", settings.get("http.bind_host", settings.get("http.host")));
this.publishHost = componentSettings.get("publish_host", settings.get("http.publish_host", settings.get("http.host")));
this.tcpNoDelay = componentSettings.getAsBoolean("tcp_no_delay", settings.getAsBoolean(TCP_NO_DELAY, true));
this.tcpKeepAlive = componentSettings.getAsBoolean("tcp_keep_alive", settings.getAsBoolean(TCP_KEEP_ALIVE, true));
this.reuseAddress = componentSettings.getAsBoolean("reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
this.tcpSendBufferSize = componentSettings.getAsBytesSize("tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
this.tcpReceiveBufferSize = componentSettings.getAsBytesSize("tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().mem().directMemoryMax().bytes() > 0) {
// we can guess a better default...
long l = (long) ((0.3 * JvmInfo.jvmInfo().mem().directMemoryMax().bytes()) / workerCount);
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
}
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
ByteSizeValue receivePredictorMin = componentSettings.getAsBytesSize("receive_predictor_min", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
ByteSizeValue receivePredictorMax = componentSettings.getAsBytesSize("receive_predictor_max", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
} else {
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
}
this.compression = settings.getAsBoolean("http.compression", false);
this.compressionLevel = settings.getAsInt("http.compression_level", 6);
// validate max content length
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
}
this.maxContentLength = maxContentLength;
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], receive_predictor[{}->{}]",
maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictorMin, receivePredictorMax);
}
public Settings settings() {
return this.settings;
}
public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {
this.httpServerAdapter = httpServerAdapter;
}
@Override
protected void doStart() throws ElasticsearchException {
this.serverOpenChannels = new OpenChannelsHandler(logger);
if (blockingServer) {
serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker"))
));
} else {
serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker")),
workerCount));
}
serverBootstrap.setPipelineFactory(new MyChannelPipelineFactory(this));
if (tcpNoDelay != null) {
serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay);
}
if (tcpKeepAlive != null) {
serverBootstrap.setOption("child.keepAlive", tcpKeepAlive);
}
if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
}
if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
}
serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
if (reuseAddress != null) {
serverBootstrap.setOption("reuseAddress", reuseAddress);
serverBootstrap.setOption("child.reuseAddress", reuseAddress);
}
// Bind and start to accept incoming connections.
InetAddress hostAddressX;
try {
hostAddressX = networkService.resolveBindHostAddress(bindHost);
} catch (IOException e) {
throw new BindHttpException("Failed to resolve host [" + bindHost + "]", e);
}
final InetAddress hostAddress = hostAddressX;
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
@Override
public boolean onPortNumber(int portNumber) {
try {
serverChannel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
} catch (Exception e) {
lastException.set(e);
return false;
}
return true;
}
});
if (!success) {
throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
}
InetSocketAddress boundAddress = (InetSocketAddress) serverChannel.getLocalAddress();
InetSocketAddress publishAddress;
try {
publishAddress = new InetSocketAddress(networkService.resolvePublishHostAddress(publishHost), boundAddress.getPort());
} catch (Exception e) {
throw new BindTransportException("Failed to resolve publish address", e);
}
this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
}
@Override
protected void doStop() throws ElasticsearchException {
if (serverChannel != null) {
serverChannel.close().awaitUninterruptibly();
serverChannel = null;
}
if (serverOpenChannels != null) {
serverOpenChannels.close();
serverOpenChannels = null;
}
if (serverBootstrap != null) {
serverBootstrap.releaseExternalResources();
serverBootstrap = null;
}
}
@Override
protected void doClose() throws ElasticsearchException {
}
public BoundTransportAddress boundAddress() {
return this.boundAddress;
}
@Override
public HttpInfo info() {
return new HttpInfo(boundAddress(), maxContentLength.bytes());
}
@Override
public HttpStats stats() {
OpenChannelsHandler channels = serverOpenChannels;
return new HttpStats(channels == null ? 0 : channels.numberOfOpenChannels(), channels == null ? 0 : channels.totalChannels());
}
void dispatchRequest(HttpRequest request, HttpChannel channel) {
httpServerAdapter.dispatchRequest(request, channel);
}
void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
if (e.getCause() instanceof ReadTimeoutException) {
if (logger.isTraceEnabled()) {
logger.trace("Connection timeout [{}]", ctx.getChannel().getRemoteAddress());
}
ctx.getChannel().close();
} else {
if (!lifecycle.started()) {
// ignore
return;
}
if (!NetworkExceptionHelper.isCloseConnectionException(e.getCause())) {
logger.warn("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
ctx.getChannel().close();
} else {
logger.debug("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
ctx.getChannel().close();
}
}
}
static class MyChannelPipelineFactory implements ChannelPipelineFactory {
private final NettyHttpServerTransport transport;
private final HttpRequestHandler requestHandler;
MyChannelPipelineFactory(NettyHttpServerTransport transport) {
this.transport = transport;
this.requestHandler = new HttpRequestHandler(transport);
}
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("openChannels", transport.serverOpenChannels);
HttpRequestDecoder requestDecoder = new HttpRequestDecoder(
(int) transport.maxInitialLineLength.bytes(),
(int) transport.maxHeaderSize.bytes(),
(int) transport.maxChunkSize.bytes()
);
if (transport.maxCumulationBufferCapacity != null) {
if (transport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
requestDecoder.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
} else {
requestDecoder.setMaxCumulationBufferCapacity((int) transport.maxCumulationBufferCapacity.bytes());
}
}
if (transport.maxCompositeBufferComponents != -1) {
requestDecoder.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
}
pipeline.addLast("decoder", requestDecoder);
if (transport.compression) {
pipeline.addLast("decoder_compress", new HttpContentDecompressor());
}
HttpChunkAggregator httpChunkAggregator = new HttpChunkAggregator((int) transport.maxContentLength.bytes());
if (transport.maxCompositeBufferComponents != -1) {
httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
}
pipeline.addLast("aggregator", httpChunkAggregator);
pipeline.addLast("encoder", new HttpResponseEncoder());
if (transport.compression) {
pipeline.addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel));
}
pipeline.addLast("handler", requestHandler);
return pipeline;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_http_netty_NettyHttpServerTransport.java
|
1,147 |
public class UpdateOrderMultishipOptionActivity extends BaseActivity<CartOperationContext> {
@Resource(name = "blOrderMultishipOptionService")
protected OrderMultishipOptionService orderMultishipOptionService;
@Resource(name = "blOrderItemService")
protected OrderItemService orderItemService;
@Override
public CartOperationContext execute(CartOperationContext context) throws Exception {
CartOperationRequest request = context.getSeedData();
Long orderItemId = request.getItemRequest().getOrderItemId();
Integer orderItemQuantityDelta = request.getOrderItemQuantityDelta();
if (orderItemQuantityDelta < 0) {
int numToDelete = -1 * orderItemQuantityDelta;
//find the qty in the default fg
OrderItem orderItem = orderItemService.readOrderItemById(orderItemId);
int qty = 0;
if (!CollectionUtils.isEmpty(orderItem.getOrder().getFulfillmentGroups())) {
FulfillmentGroup fg = orderItem.getOrder().getFulfillmentGroups().get(0);
if (fg.getAddress() == null && fg.getFulfillmentOption() == null) {
for (FulfillmentGroupItem fgItem : fg.getFulfillmentGroupItems()) {
if (fgItem.getOrderItem().getId() == orderItemId) {
qty += fgItem.getQuantity();
}
}
}
}
if (numToDelete >= qty) {
orderMultishipOptionService.deleteOrderItemOrderMultishipOptions(orderItemId, numToDelete - qty);
}
}
return context;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_workflow_update_UpdateOrderMultishipOptionActivity.java
|
4,276 |
public class SimpleFsTranslogFile implements FsTranslogFile {
private final long id;
private final ShardId shardId;
private final RafReference raf;
private final AtomicInteger operationCounter = new AtomicInteger();
private final AtomicLong lastPosition = new AtomicLong(0);
private final AtomicLong lastWrittenPosition = new AtomicLong(0);
private volatile long lastSyncPosition = 0;
public SimpleFsTranslogFile(ShardId shardId, long id, RafReference raf) throws IOException {
this.shardId = shardId;
this.id = id;
this.raf = raf;
raf.raf().setLength(0);
}
public long id() {
return this.id;
}
public int estimatedNumberOfOperations() {
return operationCounter.get();
}
public long translogSizeInBytes() {
return lastWrittenPosition.get();
}
public Translog.Location add(byte[] data, int from, int size) throws IOException {
long position = lastPosition.getAndAdd(size);
raf.channel().write(ByteBuffer.wrap(data, from, size), position);
lastWrittenPosition.getAndAdd(size);
operationCounter.incrementAndGet();
return new Translog.Location(id, position, size);
}
public byte[] read(Translog.Location location) throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(location.size);
raf.channel().read(buffer, location.translogLocation);
return buffer.array();
}
public void close(boolean delete) {
sync();
raf.decreaseRefCount(delete);
}
/**
* Returns a snapshot on this file, <tt>null</tt> if it failed to snapshot.
*/
public FsChannelSnapshot snapshot() throws TranslogException {
try {
if (!raf.increaseRefCount()) {
return null;
}
return new FsChannelSnapshot(this.id, raf, lastWrittenPosition.get(), operationCounter.get());
} catch (Exception e) {
throw new TranslogException(shardId, "Failed to snapshot", e);
}
}
@Override
public boolean syncNeeded() {
return lastWrittenPosition.get() != lastSyncPosition;
}
public void sync() {
try {
// check if we really need to sync here...
long last = lastWrittenPosition.get();
if (last == lastSyncPosition) {
return;
}
lastSyncPosition = last;
raf.channel().force(false);
} catch (Exception e) {
// ignore
}
}
@Override
public void reuse(FsTranslogFile other) {
// nothing to do there
}
@Override
public void updateBufferSize(int bufferSize) throws TranslogException {
// nothing to do here...
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_translog_fs_SimpleFsTranslogFile.java
|
397 |
public class ORecordMultiValueHelper {
public enum MULTIVALUE_CONTENT_TYPE {
EMPTY, ALL_RECORDS, ALL_RIDS, HYBRID
}
public static MULTIVALUE_CONTENT_TYPE updateContentType(final MULTIVALUE_CONTENT_TYPE iPreviousStatus, final Object iValue) {
if (iPreviousStatus == MULTIVALUE_CONTENT_TYPE.HYBRID) {
// DO NOTHING
} else if (iPreviousStatus == MULTIVALUE_CONTENT_TYPE.EMPTY) {
if (iValue instanceof ORID)
return MULTIVALUE_CONTENT_TYPE.ALL_RIDS;
else if (iValue instanceof ORecord<?>)
return MULTIVALUE_CONTENT_TYPE.ALL_RECORDS;
else
return MULTIVALUE_CONTENT_TYPE.HYBRID;
} else if (iPreviousStatus == MULTIVALUE_CONTENT_TYPE.ALL_RECORDS) {
if (iValue instanceof ORID)
return MULTIVALUE_CONTENT_TYPE.HYBRID;
} else if (iPreviousStatus == MULTIVALUE_CONTENT_TYPE.ALL_RIDS) {
if (!(iValue instanceof ORID))
return MULTIVALUE_CONTENT_TYPE.HYBRID;
}
return iPreviousStatus;
}
public static String toString(final ORecordLazyMultiValue iMultivalue) {
final boolean previousAutoConvertSetting = iMultivalue.isAutoConvertToRecord();
iMultivalue.setAutoConvertToRecord(false);
final String result = OMultiValue.toString(iMultivalue);
iMultivalue.setAutoConvertToRecord(previousAutoConvertSetting);
return result;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordMultiValueHelper.java
|
133 |
public enum SchemaStatus {
/**
* The index is installed in the system but not yet registered with all instances in the cluster
*/
INSTALLED,
/**
* The index is registered with all instances in the cluster but not (yet) enabled
*/
REGISTERED,
/**
* The index is enabled and in use
*/
ENABLED,
/**
* The index is disabled and no longer in use
*/
DISABLED;
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_schema_SchemaStatus.java
|
107 |
static final class ValuesView<K,V> extends CollectionView<K,V,V>
implements Collection<V>, java.io.Serializable {
private static final long serialVersionUID = 2249069246763182397L;
ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); }
public final boolean contains(Object o) {
return map.containsValue(o);
}
public final boolean remove(Object o) {
if (o != null) {
for (Iterator<V> it = iterator(); it.hasNext();) {
if (o.equals(it.next())) {
it.remove();
return true;
}
}
}
return false;
}
public final Iterator<V> iterator() {
ConcurrentHashMapV8<K,V> m = map;
Node<K,V>[] t;
int f = (t = m.table) == null ? 0 : t.length;
return new ValueIterator<K,V>(t, f, 0, f, m);
}
public final boolean add(V e) {
throw new UnsupportedOperationException();
}
public final boolean addAll(Collection<? extends V> c) {
throw new UnsupportedOperationException();
}
public ConcurrentHashMapSpliterator<V> spliteratorJSR166() {
Node<K,V>[] t;
ConcurrentHashMapV8<K,V> m = map;
long n = m.sumCount();
int f = (t = m.table) == null ? 0 : t.length;
return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
}
public void forEach(Action<? super V> action) {
if (action == null) throw new NullPointerException();
Node<K,V>[] t;
if ((t = map.table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; )
action.apply(p.val);
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
528 |
ex.execute(new Runnable() {
public void run() {
multiMap.put(key, "value");
final TransactionContext context = client.newTransactionContext();
try {
context.beginTransaction();
final TransactionalMultiMap txnMultiMap = context.getMultiMap(mapName);
txnMultiMap.put(key, "value");
txnMultiMap.put(key, "value1");
txnMultiMap.put(key, "value2");
assertEquals(3, txnMultiMap.get(key).size());
context.commitTransaction();
assertEquals(3, multiMap.get(key).size());
} catch (Exception e) {
error.compareAndSet(null, e);
} finally {
latch.countDown();
}
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_txn_ClientTxnMultiMapTest.java
|
1,411 |
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
List<String> indicesToClose = new ArrayList<String>();
for (String index : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexMissingException(new Index(index));
}
if (indexMetaData.state() != IndexMetaData.State.CLOSE) {
IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index);
for (IndexShardRoutingTable shard : indexRoutingTable) {
if (!shard.primaryAllocatedPostApi()) {
throw new IndexPrimaryShardNotAllocatedException(new Index(index));
}
}
indicesToClose.add(index);
}
}
if (indicesToClose.isEmpty()) {
return currentState;
}
logger.info("closing indices [{}]", indicesAsString);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
.blocks(currentState.blocks());
for (String index : indicesToClose) {
mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.CLOSE));
blocksBuilder.addIndexBlock(index, INDEX_CLOSED_BLOCK);
}
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
for (String index : indicesToClose) {
rtBuilder.remove(index);
}
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build());
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_MetaDataIndexStateService.java
|
468 |
public interface SandBox extends Serializable {
public Long getId();
public void setId(Long id);
/**
* The name of the sandbox.
* Certain sandbox names are reserved in the system. User created
* sandboxes cannot start with "", "approve_", or "deploy_".
*
* @return String sandbox name
*/
public String getName();
public void setName(String name);
public SandBoxType getSandBoxType();
public void setSandBoxType(SandBoxType sandBoxType);
public Site getSite();
public void setSite(Site site);
public Long getAuthor();
public void setAuthor(Long author);
public SandBox clone();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_sandbox_domain_SandBox.java
|
1,335 |
public interface ClusterStateUpdateListener {
/**
* Called when the cluster state update is acknowledged
*/
void onResponse(ClusterStateUpdateResponse response);
/**
* Called when any error is thrown during the cluster state update processing
*/
void onFailure(Throwable t);
}
| 0true
|
src_main_java_org_elasticsearch_cluster_ack_ClusterStateUpdateListener.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.