Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
288 | public abstract class GenericAction<Request extends ActionRequest, Response extends ActionResponse> {
private final String name;
/**
* @param name The name of the action, must be unique across actions.
*/
protected GenericAction(String name) {
this.name = name;
}
/**
* The name of the action. Must be unique across actions.
*/
public String name() {
return this.name;
}
/**
* Creates a new response instance.
*/
public abstract Response newResponse();
/**
* Optional request options for the action.
*/
public TransportRequestOptions transportOptions(Settings settings) {
return TransportRequestOptions.EMPTY;
}
@Override
public boolean equals(Object o) {
return name.equals(((GenericAction) o).name());
}
@Override
public int hashCode() {
return name.hashCode();
}
} | 0true
| src_main_java_org_elasticsearch_action_GenericAction.java |
1,360 | public abstract class OClusterMemory extends OSharedResourceAdaptive implements OCluster {
public static final String TYPE = "MEMORY";
private OStorage storage;
private int id;
private String name;
private int dataSegmentId;
public OClusterMemory() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean());
}
public void configure(final OStorage iStorage, final OStorageClusterConfiguration iConfig) throws IOException {
configure(iStorage, iConfig.getId(), iConfig.getName(), iConfig.getLocation(), iConfig.getDataSegmentId());
}
public void configure(final OStorage iStorage, final int iId, final String iClusterName, final String iLocation,
final int iDataSegmentId, final Object... iParameters) {
this.storage = iStorage;
this.id = iId;
this.name = iClusterName;
this.dataSegmentId = iDataSegmentId;
}
public int getDataSegmentId() {
acquireSharedLock();
try {
return dataSegmentId;
} finally {
releaseSharedLock();
}
}
@Override
public boolean useWal() {
return false;
}
@Override
public float recordGrowFactor() {
return 1;
}
@Override
public float recordOverflowGrowFactor() {
return 1;
}
@Override
public String compression() {
return ONothingCompression.NAME;
}
public OClusterEntryIterator absoluteIterator() {
return new OClusterEntryIterator(this);
}
public void close() {
acquireExclusiveLock();
try {
clear();
} finally {
releaseExclusiveLock();
}
}
@Override
public void close(boolean flush) throws IOException {
close();
}
@Override
public OPhysicalPosition createRecord(byte[] content, ORecordVersion recordVersion, byte recordType) throws IOException {
throw new UnsupportedOperationException("createRecord");
}
@Override
public boolean deleteRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("deleteRecord");
}
@Override
public void updateRecord(OClusterPosition clusterPosition, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
throw new UnsupportedOperationException("updateRecord");
}
@Override
public ORawBuffer readRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("readRecord");
}
@Override
public boolean exists() {
throw new UnsupportedOperationException("exists");
}
public void open() throws IOException {
}
public void create(final int iStartSize) throws IOException {
}
public void delete() throws IOException {
acquireExclusiveLock();
try {
close();
} finally {
releaseExclusiveLock();
}
}
public void truncate() throws IOException {
storage.checkForClusterPermissions(getName());
acquireExclusiveLock();
try {
clear();
} finally {
releaseExclusiveLock();
}
}
public void set(OCluster.ATTRIBUTES iAttribute, Object iValue) throws IOException {
if (iAttribute == null)
throw new IllegalArgumentException("attribute is null");
final String stringValue = iValue != null ? iValue.toString() : null;
switch (iAttribute) {
case NAME:
name = stringValue;
break;
case DATASEGMENT:
dataSegmentId = storage.getDataSegmentIdByName(stringValue);
break;
}
}
public int getId() {
return id;
}
public String getName() {
return name;
}
public void synch() {
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
}
@Override
public boolean wasSoftlyClosed() throws IOException {
return true;
}
public void lock() {
acquireSharedLock();
}
public void unlock() {
releaseSharedLock();
}
public String getType() {
return TYPE;
}
protected abstract void clear();
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_memory_OClusterMemory.java |
60 | final Iterator<String> keysToMangle = Iterators.filter(configuration.getKeys(), new Predicate<String>() {
@Override
public boolean apply(String key) {
if (null == key)
return false;
return p.matcher(key).matches();
}
}); | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_TitanFactory.java |
271 | public abstract class OCommandProcess<C extends OCommand, T, R> {
protected final C command;
protected T target;
/**
* Create the process defining command and target.
*/
public OCommandProcess(final C iCommand, final T iTarget) {
command = iCommand;
target = iTarget;
}
public abstract R process();
public T getTarget() {
return target;
}
@Override
public String toString() {
return target.toString();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_OCommandProcess.java |
208 | public class InterceptingXaLogicalLog extends XaLogicalLog
{
private final XaDataSource ds;
private final TransactionInterceptorProviders providers;
public InterceptingXaLogicalLog( File fileName, XaResourceManager xaRm,
XaCommandFactory cf, XaTransactionFactory xaTf,
TransactionInterceptorProviders providers,
Monitors monitors, FileSystemAbstraction fileSystem, Logging logging,
LogPruneStrategy pruneStrategy, TransactionStateFactory stateFactory,
KernelHealth kernelHealth, long rotateAtSize, InjectedTransactionValidator injectedTxValidator )
{
super( fileName, xaRm, cf, xaTf, fileSystem, monitors, logging, pruneStrategy,
stateFactory, kernelHealth, rotateAtSize, injectedTxValidator );
this.providers = providers;
this.ds = xaRm.getDataSource();
}
@Override
protected LogDeserializer getLogDeserializer( ReadableByteChannel byteChannel )
{
// This is created every time because transaction interceptors can be stateful
final TransactionInterceptor interceptor = providers.resolveChain( ds );
LogDeserializer toReturn = new LogDeserializer( byteChannel, bufferMonitor )
{
@Override
protected void intercept( List<LogEntry> entries )
{
for ( LogEntry entry : entries )
{
if ( entry instanceof LogEntry.Command )
{
LogEntry.Command commandEntry = (LogEntry.Command) entry;
if ( commandEntry.getXaCommand() instanceof Command )
{
( (Command) commandEntry.getXaCommand() ).accept( interceptor );
}
}
else if ( entry instanceof LogEntry.Start )
{
interceptor.setStartEntry( (LogEntry.Start) entry );
}
else if ( entry instanceof LogEntry.Commit )
{
interceptor.setCommitEntry( (LogEntry.Commit) entry );
}
}
interceptor.complete();
}
};
return toReturn;
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_InterceptingXaLogicalLog.java |
424 | trackedList.addChangeListener(new OMultiValueChangeListener<Integer, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<Integer, String> event) {
changed.value = true;
}
}); | 0true
| core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedListTest.java |
2,362 | private class PartitionProcessor
implements Runnable {
@Override
public void run() {
KeyValueSource<KeyIn, ValueIn> delegate = keyValueSource;
if (supervisor.getConfiguration().isCommunicateStats()) {
delegate = new KeyValueSourceFacade<KeyIn, ValueIn>(keyValueSource, supervisor);
}
while (true) {
if (cancelled.get()) {
return;
}
Integer partitionId = findNewPartitionProcessing();
if (partitionId == null) {
// Job's done
return;
}
// Migration event occurred, just retry
if (partitionId == -1) {
continue;
}
try {
// This call cannot be delegated
((PartitionIdAware) keyValueSource).setPartitionId(partitionId);
delegate.reset();
if (delegate.open(nodeEngine)) {
DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(MapCombineTask.this);
processMapping(partitionId, context, delegate);
delegate.close();
finalizeMapping(partitionId, context);
} else {
// Partition assignment might not be ready yet, postpone the processing and retry later
postponePartitionProcessing(partitionId);
}
} catch (Throwable t) {
handleProcessorThrowable(t);
}
}
}
private Integer findNewPartitionProcessing() {
try {
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new RequestPartitionMapping(name, jobId), name);
// JobSupervisor doesn't exists anymore on jobOwner, job done?
if (result.getResultState() == NO_SUPERVISOR) {
return null;
} else if (result.getResultState() == CHECK_STATE_FAILED) {
// retry
return -1;
} else if (result.getResultState() == NO_MORE_PARTITIONS) {
return null;
} else {
return result.getPartitionId();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_MapCombineTask.java |
2,049 | public interface ProviderInstanceBinding<T> extends Binding<T>, HasDependencies {
/**
* Returns the user-supplied, unscoped provider.
*/
Provider<? extends T> getProviderInstance();
/**
* Returns the field and method injection points of the provider, injected at injector-creation
* time only.
*
* @return a possibly empty set
*/
Set<InjectionPoint> getInjectionPoints();
} | 0true
| src_main_java_org_elasticsearch_common_inject_spi_ProviderInstanceBinding.java |
3,369 | static class Empty extends PackedArrayAtomicFieldData {
Empty(int numDocs) {
super(numDocs);
}
@Override
public LongValues getLongValues() {
return LongValues.EMPTY;
}
@Override
public DoubleValues getDoubleValues() {
return DoubleValues.EMPTY;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getMemorySizeInBytes() {
return 0;
}
@Override
public long getNumberUniqueValues() {
return 0;
}
@Override
public BytesValues getBytesValues(boolean needsHashes) {
return BytesValues.EMPTY;
}
@Override
public ScriptDocValues getScriptValues() {
return ScriptDocValues.EMPTY;
}
} | 1no label
| src_main_java_org_elasticsearch_index_fielddata_plain_PackedArrayAtomicFieldData.java |
795 | public class PercolateResponse extends BroadcastOperationResponse implements Iterable<PercolateResponse.Match>, ToXContent {
public static final Match[] EMPTY = new Match[0];
private long tookInMillis;
private Match[] matches;
private long count;
private InternalFacets facets;
private InternalAggregations aggregations;
public PercolateResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures,
Match[] matches, long count, long tookInMillis, InternalFacets facets, InternalAggregations aggregations) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.tookInMillis = tookInMillis;
this.matches = matches;
this.count = count;
this.facets = facets;
this.aggregations = aggregations;
}
public PercolateResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures, long tookInMillis, Match[] matches) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.tookInMillis = tookInMillis;
this.matches = matches;
}
PercolateResponse() {
}
public PercolateResponse(Match[] matches) {
this.matches = matches;
}
/**
* How long the percolate took.
*/
public TimeValue getTook() {
return new TimeValue(tookInMillis);
}
/**
* How long the percolate took in milliseconds.
*/
public long getTookInMillis() {
return tookInMillis;
}
/**
* @return The queries that match with the document being percolated. This can return <code>null</code> if th.
*/
public Match[] getMatches() {
return this.matches;
}
/**
* @return The total number of queries that have matched with the document being percolated.
*/
public long getCount() {
return count;
}
/**
* @return Any facet that has been executed on the query metadata. This can return <code>null</code>.
*/
public InternalFacets getFacets() {
return facets;
}
/**
* @return Any aggregations that has been executed on the query metadata. This can return <code>null</code>.
*/
public InternalAggregations getAggregations() {
return aggregations;
}
@Override
public Iterator<Match> iterator() {
return Arrays.asList(matches).iterator();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields.TOOK, tookInMillis);
RestActions.buildBroadcastShardsHeader(builder, this);
builder.field(Fields.TOTAL, count);
if (matches != null) {
builder.startArray(Fields.MATCHES);
boolean justIds = "ids".equals(params.param("percolate_format"));
if (justIds) {
for (PercolateResponse.Match match : matches) {
builder.value(match.getId());
}
} else {
for (PercolateResponse.Match match : matches) {
builder.startObject();
builder.field(Fields._INDEX, match.getIndex());
builder.field(Fields._ID, match.getId());
float score = match.getScore();
if (score != PercolatorService.NO_SCORE) {
builder.field(Fields._SCORE, match.getScore());
}
if (match.getHighlightFields() != null) {
builder.startObject(Fields.HIGHLIGHT);
for (HighlightField field : match.getHighlightFields().values()) {
builder.field(field.name());
if (field.fragments() == null) {
builder.nullValue();
} else {
builder.startArray();
for (Text fragment : field.fragments()) {
builder.value(fragment);
}
builder.endArray();
}
}
builder.endObject();
}
builder.endObject();
}
}
builder.endArray();
}
if (facets != null) {
facets.toXContent(builder, params);
}
if (aggregations != null) {
aggregations.toXContent(builder, params);
}
builder.endObject();
return builder;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
tookInMillis = in.readVLong();
count = in.readVLong();
int size = in.readVInt();
if (size != -1) {
matches = new Match[size];
for (int i = 0; i < size; i++) {
matches[i] = new Match();
matches[i].readFrom(in);
}
}
facets = InternalFacets.readOptionalFacets(in);
aggregations = InternalAggregations.readOptionalAggregations(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVLong(tookInMillis);
out.writeVLong(count);
if (matches == null) {
out.writeVInt(-1);
} else {
out.writeVInt(matches.length);
for (Match match : matches) {
match.writeTo(out);
}
}
out.writeOptionalStreamable(facets);
out.writeOptionalStreamable(aggregations);
}
public static class Match implements Streamable {
private Text index;
private Text id;
private float score;
private Map<String, HighlightField> hl;
public Match(Text index, Text id, float score, Map<String, HighlightField> hl) {
this.id = id;
this.score = score;
this.index = index;
this.hl = hl;
}
public Match(Text index, Text id, float score) {
this.id = id;
this.score = score;
this.index = index;
}
Match() {
}
public Text getIndex() {
return index;
}
public Text getId() {
return id;
}
public float getScore() {
return score;
}
public Map<String, HighlightField> getHighlightFields() {
return hl;
}
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readText();
index = in.readText();
score = in.readFloat();
int size = in.readVInt();
if (size > 0) {
hl = new HashMap<String, HighlightField>(size);
for (int j = 0; j < size; j++) {
hl.put(in.readString(), HighlightField.readHighlightField(in));
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeText(id);
out.writeText(index);
out.writeFloat(score);
if (hl != null) {
out.writeVInt(hl.size());
for (Map.Entry<String, HighlightField> entry : hl.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
} else {
out.writeVInt(0);
}
}
}
static final class Fields {
static final XContentBuilderString TOOK = new XContentBuilderString("took");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString MATCHES = new XContentBuilderString("matches");
static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
static final XContentBuilderString _ID = new XContentBuilderString("_id");
static final XContentBuilderString _SCORE = new XContentBuilderString("_score");
static final XContentBuilderString HIGHLIGHT = new XContentBuilderString("highlight");
}
} | 0true
| src_main_java_org_elasticsearch_action_percolate_PercolateResponse.java |
5,436 | public class ScriptBytesValues extends BytesValues implements ScriptValues {
final SearchScript script;
private Iterator<?> iter;
private Object value;
private BytesRef scratch = new BytesRef();
public ScriptBytesValues(SearchScript script) {
super(true); // assume multi-valued
this.script = script;
}
@Override
public SearchScript script() {
return script;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
script.setNextDocId(docId);
value = script.run();
if (value == null) {
iter = Iterators.emptyIterator();
return 0;
}
if (value.getClass().isArray()) {
final int length = Array.getLength(value);
// don't use Arrays.asList because the array may be an array of primitives?
iter = new Iterator<Object>() {
int i = 0;
@Override
public boolean hasNext() {
return i < length;
}
@Override
public Object next() {
return Array.get(value, i++);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
return length;
}
if (value instanceof Collection) {
final Collection<?> coll = (Collection<?>) value;
iter = coll.iterator();
return coll.size();
}
iter = Iterators.singletonIterator(value);
return 1;
}
@Override
public BytesRef nextValue() {
final String next = iter.next().toString();
scratch.copyChars(next);
return scratch;
}
} | 1no label
| src_main_java_org_elasticsearch_search_aggregations_support_bytes_ScriptBytesValues.java |
769 | @Deprecated
@Repository("blAvailabilityDao")
public class AvailabilityDaoImpl extends BatchRetrieveDao implements AvailabilityDao {
@PersistenceContext(unitName="blPU")
protected EntityManager em;
@Override
public List<SkuAvailability> readSKUAvailability(List<Long> skuIds, boolean realTime) {
Query query = em.createNamedQuery("BC_READ_SKU_AVAILABILITIES_BY_SKU_IDS");
if (! realTime) {
query.setHint(QueryHints.HINT_CACHEABLE, true);
}
return batchExecuteReadQuery(query, skuIds, "skuIds");
}
@Override
public List<SkuAvailability> readSKUAvailabilityForLocation(List<Long> skuIds, Long locationId, boolean realTime) {
Query query = em.createNamedQuery("BC_READ_SKU_AVAILABILITIES_BY_LOCATION_ID_AND_SKU_IDS");
if (! realTime) {
query.setHint(QueryHints.HINT_CACHEABLE, true);
}
query.setParameter("locationId", locationId);
return batchExecuteReadQuery(query, skuIds, "skuIds");
}
@Override
public void save(SkuAvailability skuAvailability) {
em.merge(skuAvailability);
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_inventory_dao_AvailabilityDaoImpl.java |
1,632 | public interface PropertyBuilder {
public Map<String, FieldMetadata> execute(Boolean overridePopulateManyToOne);
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_dao_PropertyBuilder.java |
536 | public class OJSONFetchListener implements OFetchListener {
public void processStandardField(final ORecordSchemaAware<?> iRecord, final Object iFieldValue, final String iFieldName,
final OFetchContext iContext, final Object iusObject, final String iFormat) {
try {
((OJSONFetchContext) iContext).getJsonWriter().writeAttribute(((OJSONFetchContext) iContext).getIndentLevel(), true,
iFieldName, iFieldValue);
} catch (IOException e) {
throw new OFetchException("Error processing field '" + iFieldValue + " of record " + iRecord.getIdentity(), e);
}
}
public void processStandardCollectionValue(final Object iFieldValue, final OFetchContext iContext) throws OFetchException {
try {
((OJSONFetchContext) iContext).getJsonWriter().writeValue(((OJSONFetchContext) iContext).getIndentLevel(), true,
OJSONWriter.encode(iFieldValue));
} catch (IOException e) {
e.printStackTrace();
}
}
public Object fetchLinked(final ORecordSchemaAware<?> iRecord, final Object iUserObject, final String iFieldName,
final ORecordSchemaAware<?> iLinked, final OFetchContext iContext) throws OFetchException {
return iLinked;
}
public Object fetchLinkedMapEntry(final ORecordSchemaAware<?> iRecord, final Object iUserObject, final String iFieldName,
final String iKey, final ORecordSchemaAware<?> iLinked, final OFetchContext iContext) throws OFetchException {
return iLinked;
}
public void parseLinked(final ORecordSchemaAware<?> iRootRecord, final OIdentifiable iLinked, final Object iUserObject,
final String iFieldName, final OFetchContext iContext) throws OFetchException {
try {
((OJSONFetchContext) iContext).writeLinkedAttribute(iLinked, iFieldName);
} catch (IOException e) {
throw new OFetchException("Error writing linked field " + iFieldName + " (record:" + iLinked.getIdentity() + ") of record "
+ iRootRecord.getIdentity(), e);
}
}
public void parseLinkedCollectionValue(ORecordSchemaAware<?> iRootRecord, OIdentifiable iLinked, Object iUserObject,
String iFieldName, OFetchContext iContext) throws OFetchException {
try {
if (((OJSONFetchContext) iContext).isInCollection(iRootRecord)) {
((OJSONFetchContext) iContext).writeLinkedValue(iLinked, iFieldName);
} else {
((OJSONFetchContext) iContext).writeLinkedAttribute(iLinked, iFieldName);
}
} catch (IOException e) {
throw new OFetchException("Error writing linked field " + iFieldName + " (record:" + iLinked.getIdentity() + ") of record "
+ iRootRecord.getIdentity(), e);
}
}
public Object fetchLinkedCollectionValue(ORecordSchemaAware<?> iRoot, Object iUserObject, String iFieldName,
ORecordSchemaAware<?> iLinked, OFetchContext iContext) throws OFetchException {
return iLinked;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_fetch_json_OJSONFetchListener.java |
3,242 | public final class BytesRefValComparator extends NestedWrappableComparator<BytesRef> {
private final IndexFieldData<?> indexFieldData;
private final SortMode sortMode;
private final BytesRef missingValue;
private final BytesRef[] values;
private BytesRef bottom;
private BytesValues docTerms;
BytesRefValComparator(IndexFieldData<?> indexFieldData, int numHits, SortMode sortMode, BytesRef missingValue) {
this.sortMode = sortMode;
values = new BytesRef[numHits];
this.indexFieldData = indexFieldData;
this.missingValue = missingValue;
}
@Override
public int compare(int slot1, int slot2) {
final BytesRef val1 = values[slot1];
final BytesRef val2 = values[slot2];
return compareValues(val1, val2);
}
@Override
public int compareBottom(int doc) throws IOException {
BytesRef val2 = sortMode.getRelevantValue(docTerms, doc, missingValue);
return compareValues(bottom, val2);
}
@Override
public void copy(int slot, int doc) throws IOException {
BytesRef relevantValue = sortMode.getRelevantValue(docTerms, doc, missingValue);
if (relevantValue == missingValue) {
values[slot] = missingValue;
} else {
if (values[slot] == null || values[slot] == missingValue) {
values[slot] = new BytesRef();
}
values[slot].copyBytes(relevantValue);
}
}
@Override
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
docTerms = indexFieldData.load(context).getBytesValues(false);
return this;
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public BytesRef value(int slot) {
return values[slot];
}
@Override
public int compareValues(BytesRef val1, BytesRef val2) {
if (val1 == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return val1.compareTo(val2);
}
@Override
public int compareDocToValue(int doc, BytesRef value) {
return sortMode.getRelevantValue(docTerms, doc, missingValue).compareTo(value);
}
@Override
public void missing(int slot) {
values[slot] = missingValue;
}
@Override
public int compareBottomMissing() {
return compareValues(bottom, missingValue);
}
} | 1no label
| src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_BytesRefValComparator.java |
298 | public abstract class LocalStoreManager extends AbstractStoreManager {
protected final File directory;
public LocalStoreManager(Configuration storageConfig) throws BackendException {
super(storageConfig);
String storageDir = storageConfig.get(STORAGE_DIRECTORY);
if (null == storageDir) {
directory = null;
} else {
directory = DirectoryUtil.getOrCreateDataDirectory(storageDir);
}
}
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_common_LocalStoreManager.java |
3,240 | perSegComp = new PerSegmentComparator(termsIndex) {
@Override
protected long getOrd(int doc) {
return getRelevantOrd(readerOrds, doc, sortMode);
}
}; | 0true
| src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_BytesRefOrdValComparator.java |
785 | @Repository("blOfferAuditDao")
public class OfferAuditDaoImpl implements OfferAuditDao {
protected static final Log LOG = LogFactory.getLog(OfferAuditDaoImpl.class);
@PersistenceContext(unitName="blPU")
protected EntityManager em;
@Resource(name="blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Override
public OfferAudit create() {
return ((OfferAudit) entityConfiguration.createEntityInstance(OfferAudit.class.getName()));
}
@Override
public void delete(final OfferAudit offerAudit) {
OfferAudit loa = offerAudit;
if (!em.contains(loa)) {
loa = readAuditById(offerAudit.getId());
}
em.remove(loa);
}
@Override
public OfferAudit save(final OfferAudit offerAudit) {
return em.merge(offerAudit);
}
@Override
public OfferAudit readAuditById(final Long offerAuditId) {
return em.find(OfferAuditImpl.class, offerAuditId);
}
@Override
public Long countUsesByCustomer(Long customerId, Long offerId) {
TypedQuery<Long> query = new TypedQueryBuilder<OfferAudit>(OfferAudit.class, "offerAudit")
.addRestriction("offerAudit.customerId", "=", customerId)
.addRestriction("offerAudit.offerId", "=", offerId)
.toCountQuery(em);
Long result = query.getSingleResult();
return result;
}
@Override
public Long countOfferCodeUses(Long offerCodeId) {
OfferAudit check = new OfferAuditImpl();
try {
check.getOfferCodeId();
} catch (UnsupportedOperationException e) {
LOG.warn("Checking for offer code max usage has not been enabled in your Broadleaf installation. This warning" +
" will only appear in the Broadleaf 3.0 line, versions 3.0.6-GA and above. In order to fix your" +
" version of Broadleaf to enable this functionality, refer to the OfferAuditWeaveImpl or directly to" +
" https://github.com/BroadleafCommerce/BroadleafCommerce/pull/195.");
LOG.warn("Returning unlimited usage for offer code ID " + offerCodeId);
return -1l;
}
TypedQuery<Long> query = new TypedQueryBuilder<OfferAudit>(OfferAudit.class, "offerAudit")
.addRestriction("offerAudit.offerCodeId", "=", offerCodeId)
.toCountQuery(em);
Long result = query.getSingleResult();
return result;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_dao_OfferAuditDaoImpl.java |
454 | public static class Builder {
private boolean unorderedScan;
private boolean orderedScan;
private boolean multiQuery;
private boolean locking;
private boolean batchMutation;
private boolean localKeyPartition;
private boolean keyOrdered;
private boolean distributed;
private boolean transactional;
private boolean timestamps;
private Timestamps preferredTimestamps;
private boolean cellLevelTTL;
private boolean storeLevelTTL;
private boolean visibility;
private boolean keyConsistent;
private Configuration keyConsistentTxConfig;
private Configuration localKeyConsistentTxConfig;
/**
* Construct a Builder with everything disabled/unsupported/false/null.
*/
public Builder() { }
/**
* Construct a Builder whose default values exactly match the values on
* the supplied {@code template}.
*/
public Builder(StoreFeatures template) {
unorderedScan(template.hasUnorderedScan());
orderedScan(template.hasOrderedScan());
multiQuery(template.hasMultiQuery());
locking(template.hasLocking());
batchMutation(template.hasBatchMutation());
localKeyPartition(template.hasLocalKeyPartition());
keyOrdered(template.isKeyOrdered());
distributed(template.isDistributed());
transactional(template.hasTxIsolation());
timestamps(template.hasTimestamps());
preferredTimestamps(template.getPreferredTimestamps());
cellTTL(template.hasCellTTL());
storeTTL(template.hasStoreTTL());
visibility(template.hasVisibility());
if (template.isKeyConsistent()) {
keyConsistent(template.getKeyConsistentTxConfig(), template.getLocalKeyConsistentTxConfig());
}
}
public Builder unorderedScan(boolean b) {
unorderedScan = b;
return this;
}
public Builder orderedScan(boolean b) {
orderedScan = b;
return this;
}
public Builder multiQuery(boolean b) {
multiQuery = b;
return this;
}
public Builder locking(boolean b) {
locking = b;
return this;
}
public Builder batchMutation(boolean b) {
batchMutation = b;
return this;
}
public Builder localKeyPartition(boolean b) {
localKeyPartition = b;
return this;
}
public Builder keyOrdered(boolean b) {
keyOrdered = b;
return this;
}
public Builder distributed(boolean b) {
distributed = b;
return this;
}
public Builder transactional(boolean b) {
transactional = b;
return this;
}
public Builder timestamps(boolean b) {
timestamps = b;
return this;
}
public Builder preferredTimestamps(Timestamps t) {
preferredTimestamps = t;
return this;
}
public Builder cellTTL(boolean b) {
cellLevelTTL = b;
return this;
}
public Builder storeTTL(boolean b) {
storeLevelTTL = b;
return this;
}
public Builder visibility(boolean b) {
visibility = b;
return this;
}
public Builder keyConsistent(Configuration c) {
keyConsistent = true;
keyConsistentTxConfig = c;
return this;
}
public Builder keyConsistent(Configuration global, Configuration local) {
keyConsistent = true;
keyConsistentTxConfig = global;
localKeyConsistentTxConfig = local;
return this;
}
public Builder notKeyConsistent() {
keyConsistent = false;
return this;
}
public StandardStoreFeatures build() {
return new StandardStoreFeatures(unorderedScan, orderedScan,
multiQuery, locking, batchMutation, localKeyPartition,
keyOrdered, distributed, transactional, keyConsistent,
timestamps, preferredTimestamps, cellLevelTTL,
storeLevelTTL, visibility, keyConsistentTxConfig,
localKeyConsistentTxConfig);
}
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_keycolumnvalue_StandardStoreFeatures.java |
334 | new Thread() {
public void run() {
map.tryPut("key1", "value2", 5, TimeUnit.SECONDS);
latch.countDown();
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java |
3,564 | public static class Builder extends AbstractFieldMapper.Builder<Builder, CompletionFieldMapper> {
private boolean preserveSeparators = Defaults.DEFAULT_PRESERVE_SEPARATORS;
private boolean payloads = Defaults.DEFAULT_HAS_PAYLOADS;
private boolean preservePositionIncrements = Defaults.DEFAULT_POSITION_INCREMENTS;
private int maxInputLength = Defaults.DEFAULT_MAX_INPUT_LENGTH;
public Builder(String name) {
super(name, new FieldType(Defaults.FIELD_TYPE));
builder = this;
}
public Builder payloads(boolean payloads) {
this.payloads = payloads;
return this;
}
public Builder preserveSeparators(boolean preserveSeparators) {
this.preserveSeparators = preserveSeparators;
return this;
}
public Builder preservePositionIncrements(boolean preservePositionIncrements) {
this.preservePositionIncrements = preservePositionIncrements;
return this;
}
public Builder maxInputLength(int maxInputLength) {
if (maxInputLength <= 0) {
throw new ElasticsearchIllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]");
}
this.maxInputLength = maxInputLength;
return this;
}
@Override
public CompletionFieldMapper build(Mapper.BuilderContext context) {
return new CompletionFieldMapper(buildNames(context), indexAnalyzer, searchAnalyzer, postingsProvider, similarity, payloads,
preserveSeparators, preservePositionIncrements, maxInputLength, multiFieldsBuilder.build(this, context), copyTo);
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_CompletionFieldMapper.java |
441 | public static class JvmStats implements Streamable, ToXContent {
ObjectIntOpenHashMap<JvmVersion> versions;
long threads;
long maxUptime;
long heapUsed;
long heapMax;
JvmStats() {
versions = new ObjectIntOpenHashMap<JvmVersion>();
threads = 0;
maxUptime = 0;
heapMax = 0;
heapUsed = 0;
}
public ObjectIntOpenHashMap<JvmVersion> getVersions() {
return versions;
}
/**
* The total number of threads in the cluster
*/
public long getThreads() {
return threads;
}
/**
* The maximum uptime of a node in the cluster
*/
public TimeValue getMaxUpTime() {
return new TimeValue(maxUptime);
}
/**
* Total heap used in the cluster
*/
public ByteSizeValue getHeapUsed() {
return new ByteSizeValue(heapUsed);
}
/**
* Maximum total heap available to the cluster
*/
public ByteSizeValue getHeapMax() {
return new ByteSizeValue(heapMax);
}
public void addNodeInfoStats(NodeInfo nodeInfo, NodeStats nodeStats) {
versions.addTo(new JvmVersion(nodeInfo.getJvm()), 1);
org.elasticsearch.monitor.jvm.JvmStats js = nodeStats.getJvm();
if (js == null) {
return;
}
if (js.threads() != null) {
threads += js.threads().count();
}
maxUptime = Math.max(maxUptime, js.uptime().millis());
if (js.mem() != null) {
heapUsed += js.mem().getHeapUsed().bytes();
heapMax += js.mem().getHeapMax().bytes();
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
versions = new ObjectIntOpenHashMap<JvmVersion>(size);
for (; size > 0; size--) {
versions.addTo(JvmVersion.readJvmVersion(in), in.readVInt());
}
threads = in.readVLong();
maxUptime = in.readVLong();
heapUsed = in.readVLong();
heapMax = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(versions.size());
for (ObjectIntCursor<JvmVersion> v : versions) {
v.key.writeTo(out);
out.writeVInt(v.value);
}
out.writeVLong(threads);
out.writeVLong(maxUptime);
out.writeVLong(heapUsed);
out.writeVLong(heapMax);
}
public static JvmStats readJvmStats(StreamInput in) throws IOException {
JvmStats jvmStats = new JvmStats();
jvmStats.readFrom(in);
return jvmStats;
}
static final class Fields {
static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString VM_NAME = new XContentBuilderString("vm_name");
static final XContentBuilderString VM_VERSION = new XContentBuilderString("vm_version");
static final XContentBuilderString VM_VENDOR = new XContentBuilderString("vm_vendor");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
static final XContentBuilderString THREADS = new XContentBuilderString("threads");
static final XContentBuilderString MAX_UPTIME = new XContentBuilderString("max_uptime");
static final XContentBuilderString MAX_UPTIME_IN_MILLIS = new XContentBuilderString("max_uptime_in_millis");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString HEAP_USED = new XContentBuilderString("heap_used");
static final XContentBuilderString HEAP_USED_IN_BYTES = new XContentBuilderString("heap_used_in_bytes");
static final XContentBuilderString HEAP_MAX = new XContentBuilderString("heap_max");
static final XContentBuilderString HEAP_MAX_IN_BYTES = new XContentBuilderString("heap_max_in_bytes");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime);
builder.startArray(Fields.VERSIONS);
for (ObjectIntCursor<JvmVersion> v : versions) {
builder.startObject();
builder.field(Fields.VERSION, v.key.version);
builder.field(Fields.VM_NAME, v.key.vmName);
builder.field(Fields.VM_VERSION, v.key.vmVersion);
builder.field(Fields.VM_VENDOR, v.key.vmVendor);
builder.field(Fields.COUNT, v.value);
builder.endObject();
}
builder.endArray();
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, heapUsed);
builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, heapMax);
builder.endObject();
builder.field(Fields.THREADS, threads);
return builder;
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java |
3,340 | final class GeoPointBinaryDVAtomicFieldData extends AtomicGeoPointFieldData<ScriptDocValues> {
private final AtomicReader reader;
private final BinaryDocValues values;
GeoPointBinaryDVAtomicFieldData(AtomicReader reader, BinaryDocValues values) {
super();
this.reader = reader;
this.values = values == null ? BinaryDocValues.EMPTY : values;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public int getNumDocs() {
return reader.maxDoc();
}
@Override
public long getNumberUniqueValues() {
return Long.MAX_VALUE;
}
@Override
public long getMemorySizeInBytes() {
return -1; // not exposed by Lucene
}
@Override
public ScriptDocValues getScriptValues() {
return new ScriptDocValues.GeoPoints(getGeoPointValues());
}
@Override
public void close() {
// no-op
}
@Override
public GeoPointValues getGeoPointValues() {
return new GeoPointValues(true) {
final BytesRef bytes = new BytesRef();
int i = Integer.MAX_VALUE;
int valueCount = 0;
final GeoPoint point = new GeoPoint();
@Override
public int setDocument(int docId) {
values.get(docId, bytes);
assert bytes.length % 16 == 0;
i = 0;
return valueCount = (bytes.length >>> 4);
}
@Override
public GeoPoint nextValue() {
assert i < 2 * valueCount;
final double lat = ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i++ * 8);
final double lon = ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i++ * 8);
return point.reset(lat, lon);
}
};
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_GeoPointBinaryDVAtomicFieldData.java |
2,746 | public abstract class SharedStorageGateway extends AbstractLifecycleComponent<Gateway> implements Gateway, ClusterStateListener {
private final ClusterService clusterService;
private final ThreadPool threadPool;
private ExecutorService writeStateExecutor;
private volatile MetaData currentMetaData;
private NodeEnvironment nodeEnv;
private NodeIndexDeletedAction nodeIndexDeletedAction;
public SharedStorageGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.writeStateExecutor = newSingleThreadExecutor(daemonThreadFactory(settings, "gateway#writeMetaData"));
clusterService.addLast(this);
logger.warn("shared gateway has been deprecated, please use the (default) local gateway");
}
@Inject
public void setNodeEnv(NodeEnvironment nodeEnv) {
this.nodeEnv = nodeEnv;
}
// here as setter injection not to break backward comp. with extensions of this class..
@Inject
public void setNodeIndexDeletedAction(NodeIndexDeletedAction nodeIndexDeletedAction) {
this.nodeIndexDeletedAction = nodeIndexDeletedAction;
}
@Override
protected void doStart() throws ElasticsearchException {
}
@Override
protected void doStop() throws ElasticsearchException {
}
@Override
protected void doClose() throws ElasticsearchException {
clusterService.remove(this);
writeStateExecutor.shutdown();
try {
writeStateExecutor.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
}
@Override
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
logger.debug("reading state from gateway {} ...", this);
StopWatch stopWatch = new StopWatch().start();
MetaData metaData;
try {
metaData = read();
logger.debug("read state from gateway {}, took {}", this, stopWatch.stop().totalTime());
if (metaData == null) {
logger.debug("no state read from gateway");
listener.onSuccess(ClusterState.builder().build());
} else {
listener.onSuccess(ClusterState.builder().metaData(metaData).build());
}
} catch (Exception e) {
logger.error("failed to read from gateway", e);
listener.onFailure(ExceptionsHelper.detailedMessage(e));
}
}
});
}
@Override
public void clusterChanged(final ClusterChangedEvent event) {
if (!lifecycle.started()) {
return;
}
// nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
if (event.state().blocks().disableStatePersistence()) {
this.currentMetaData = null;
return;
}
if (!event.metaDataChanged()) {
return;
}
writeStateExecutor.execute(new Runnable() {
@Override
public void run() {
Set<String> indicesDeleted = Sets.newHashSet();
if (event.localNodeMaster()) {
logger.debug("writing to gateway {} ...", this);
StopWatch stopWatch = new StopWatch().start();
try {
write(event.state().metaData());
logger.debug("wrote to gateway {}, took {}", this, stopWatch.stop().totalTime());
// TODO, we need to remember that we failed, maybe add a retry scheduler?
} catch (Exception e) {
logger.error("failed to write to gateway", e);
}
if (currentMetaData != null) {
for (IndexMetaData current : currentMetaData) {
if (!event.state().metaData().hasIndex(current.index())) {
delete(current);
indicesDeleted.add(current.index());
}
}
}
}
if (nodeEnv != null && nodeEnv.hasNodeFile()) {
if (currentMetaData != null) {
for (IndexMetaData current : currentMetaData) {
if (!event.state().metaData().hasIndex(current.index())) {
FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
indicesDeleted.add(current.index());
}
}
}
}
currentMetaData = event.state().metaData();
for (String indexDeleted : indicesDeleted) {
try {
nodeIndexDeletedAction.nodeIndexStoreDeleted(event.state(), indexDeleted, event.state().nodes().localNodeId());
} catch (Exception e) {
logger.debug("[{}] failed to notify master on local index store deletion", e, indexDeleted);
}
}
}
});
}
protected abstract MetaData read() throws ElasticsearchException;
protected abstract void write(MetaData metaData) throws ElasticsearchException;
protected abstract void delete(IndexMetaData indexMetaData) throws ElasticsearchException;
} | 0true
| src_main_java_org_elasticsearch_gateway_shared_SharedStorageGateway.java |
103 | static final class TreeBin<K,V> extends Node<K,V> {
TreeNode<K,V> root;
volatile TreeNode<K,V> first;
volatile Thread waiter;
volatile int lockState;
// values for lockState
static final int WRITER = 1; // set while holding write lock
static final int WAITER = 2; // set when waiting for write lock
static final int READER = 4; // increment value for setting read lock
/**
* Tie-breaking utility for ordering insertions when equal
* hashCodes and non-comparable. We don't require a total
* order, just a consistent insertion rule to maintain
* equivalence across rebalancings. Tie-breaking further than
* necessary simplifies testing a bit.
*/
static int tieBreakOrder(Object a, Object b) {
int d;
if (a == null || b == null ||
(d = a.getClass().getName().
compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
-1 : 1);
return d;
}
/**
* Creates bin with initial set of nodes headed by b.
*/
TreeBin(TreeNode<K,V> b) {
super(TREEBIN, null, null, null);
this.first = b;
TreeNode<K,V> r = null;
for (TreeNode<K,V> x = b, next; x != null; x = next) {
next = (TreeNode<K,V>)x.next;
x.left = x.right = null;
if (r == null) {
x.parent = null;
x.red = false;
r = x;
}
else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K,V> p = r;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
r = balanceInsertion(r, x);
break;
}
}
}
}
this.root = r;
assert checkInvariants(root);
}
/**
* Acquires write lock for tree restructuring.
*/
private final void lockRoot() {
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
contendedLock(); // offload to separate method
}
/**
* Releases write lock for tree restructuring.
*/
private final void unlockRoot() {
lockState = 0;
}
/**
* Possibly blocks awaiting root lock.
*/
private final void contendedLock() {
boolean waiting = false;
for (int s;;) {
if (((s = lockState) & ~WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
if (waiting)
waiter = null;
return;
}
}
else if ((s & WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
waiting = true;
waiter = Thread.currentThread();
}
}
else if (waiting)
LockSupport.park(this);
}
}
/**
* Returns matching node or null if none. Tries to search
* using tree comparisons from root, but continues linear
* search when lock not available.
*/
final Node<K,V> find(int h, Object k) {
if (k != null) {
for (Node<K,V> e = first; e != null; ) {
int s; K ek;
if (((s = lockState) & (WAITER|WRITER)) != 0) {
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
e = e.next;
}
else if (U.compareAndSwapInt(this, LOCKSTATE, s,
s + READER)) {
TreeNode<K,V> r, p;
try {
p = ((r = root) == null ? null :
r.findTreeNode(h, k, null));
} finally {
Thread w;
int ls;
do {} while (!U.compareAndSwapInt
(this, LOCKSTATE,
ls = lockState, ls - READER));
if (ls == (READER|WAITER) && (w = waiter) != null)
LockSupport.unpark(w);
}
return p;
}
}
}
return null;
}
/**
* Finds or adds a node.
* @return null if added
*/
final TreeNode<K,V> putTreeVal(int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
for (TreeNode<K,V> p = root;;) {
int dir, ph; K pk;
if (p == null) {
first = root = new TreeNode<K,V>(h, k, v, null, null);
break;
}
else if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K,V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.findTreeNode(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.findTreeNode(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
TreeNode<K,V> x, f = first;
first = x = new TreeNode<K,V>(h, k, v, f, xp);
if (f != null)
f.prev = x;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
if (!xp.red)
x.red = true;
else {
lockRoot();
try {
root = balanceInsertion(root, x);
} finally {
unlockRoot();
}
}
break;
}
}
assert checkInvariants(root);
return null;
}
/**
* Removes the given node, that must be present before this
* call. This is messier than typical red-black deletion code
* because we cannot swap the contents of an interior node
* with a leaf successor that is pinned by "next" pointers
* that are accessible independently of lock. So instead we
* swap the tree linkages.
*
* @return true if now too small, so should be untreeified
*/
final boolean removeTreeNode(TreeNode<K,V> p) {
TreeNode<K,V> next = (TreeNode<K,V>)p.next;
TreeNode<K,V> pred = p.prev; // unlink traversal pointers
TreeNode<K,V> r, rl;
if (pred == null)
first = next;
else
pred.next = next;
if (next != null)
next.prev = pred;
if (first == null) {
root = null;
return true;
}
if ((r = root) == null || r.right == null || // too small
(rl = r.left) == null || rl.left == null)
return true;
lockRoot();
try {
TreeNode<K,V> replacement;
TreeNode<K,V> pl = p.left;
TreeNode<K,V> pr = p.right;
if (pl != null && pr != null) {
TreeNode<K,V> s = pr, sl;
while ((sl = s.left) != null) // find successor
s = sl;
boolean c = s.red; s.red = p.red; p.red = c; // swap colors
TreeNode<K,V> sr = s.right;
TreeNode<K,V> pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
}
else {
TreeNode<K,V> sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left)
sp.left = p;
else
sp.right = p;
}
if ((s.right = pr) != null)
pr.parent = s;
}
p.left = null;
if ((p.right = sr) != null)
sr.parent = p;
if ((s.left = pl) != null)
pl.parent = s;
if ((s.parent = pp) == null)
r = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
}
else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K,V> pp = replacement.parent = p.parent;
if (pp == null)
r = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
}
root = (p.red) ? r : balanceDeletion(r, replacement);
if (p == replacement) { // detach pointers
TreeNode<K,V> pp;
if ((pp = p.parent) != null) {
if (p == pp.left)
pp.left = null;
else if (p == pp.right)
pp.right = null;
p.parent = null;
}
}
} finally {
unlockRoot();
}
assert checkInvariants(root);
return false;
}
/* ------------------------------------------------------------ */
// Red-black tree methods, all adapted from CLR
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> r, pp, rl;
if (p != null && (r = p.right) != null) {
if ((rl = p.right = r.left) != null)
rl.parent = p;
if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
}
return root;
}
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null)
lr.parent = p;
if ((pp = l.parent = p.parent) == null)
(root = l).red = false;
else if (pp.right == p)
pp.right = l;
else
pp.left = l;
l.right = p;
p.parent = l;
}
return root;
}
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
TreeNode<K,V> x) {
x.red = true;
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.right) {
root = rotateLeft(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateRight(root, xpp);
}
}
}
}
else {
if (xppl != null && xppl.red) {
xppl.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateLeft(root, xpp);
}
}
}
}
}
}
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
TreeNode<K,V> x) {
for (TreeNode<K,V> xp, xpl, xpr;;) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (x.red) {
x.red = false;
return root;
}
else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
}
if (xpr == null)
x = xp;
else {
TreeNode<K,V> sl = xpr.left, sr = xpr.right;
if ((sr == null || !sr.red) &&
(sl == null || !sl.red)) {
xpr.red = true;
x = xp;
}
else {
if (sr == null || !sr.red) {
if (sl != null)
sl.red = false;
xpr.red = true;
root = rotateRight(root, xpr);
xpr = (xp = x.parent) == null ?
null : xp.right;
}
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateLeft(root, xp);
}
x = root;
}
}
}
else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
}
if (xpl == null)
x = xp;
else {
TreeNode<K,V> sl = xpl.left, sr = xpl.right;
if ((sl == null || !sl.red) &&
(sr == null || !sr.red)) {
xpl.red = true;
x = xp;
}
else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ?
null : xp.left;
}
if (xpl != null) {
xpl.red = (xp == null) ? false : xp.red;
if ((sl = xpl.left) != null)
sl.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateRight(root, xp);
}
x = root;
}
}
}
}
}
/**
* Recursive invariant check
*/
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
tb = t.prev, tn = (TreeNode<K,V>)t.next;
if (tb != null && tb.next != t)
return false;
if (tn != null && tn.prev != t)
return false;
if (tp != null && t != tp.left && t != tp.right)
return false;
if (tl != null && (tl.parent != t || tl.hash > t.hash))
return false;
if (tr != null && (tr.parent != t || tr.hash < t.hash))
return false;
if (t.red && tl != null && tl.red && tr != null && tr.red)
return false;
if (tl != null && !checkInvariants(tl))
return false;
if (tr != null && !checkInvariants(tr))
return false;
return true;
}
private static final sun.misc.Unsafe U;
private static final long LOCKSTATE;
static {
try {
U = getUnsafe();
Class<?> k = TreeBin.class;
LOCKSTATE = U.objectFieldOffset
(k.getDeclaredField("lockState"));
} catch (Exception e) {
throw new Error(e);
}
}
} | 0true
| src_main_java_jsr166e_ConcurrentHashMapV8.java |
1,833 | @Component("blMatchesFieldValidator")
public class MatchesFieldValidator extends ValidationConfigurationBasedPropertyValidator {
@Override
public boolean validateInternal(Entity entity,
Serializable instance,
Map<String, FieldMetadata> entityFieldMetadata,
Map<String, String> validationConfiguration,
BasicFieldMetadata propertyMetadata,
String propertyName,
String value) {
String otherField = validationConfiguration.get("otherField");
return StringUtils.equals(entity.getPMap().get(otherField).getValue(), value);
}
} | 1no label
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_persistence_validation_MatchesFieldValidator.java |
2,575 | clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
if (!masterNode.id().equals(currentState.nodes().masterNodeId())) {
// master got switched on us, no need to send anything
return currentState;
}
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(currentState.nodes())
// make sure the old master node, which has failed, is not part of the nodes we publish
.remove(masterNode.id())
.masterNodeId(null).build();
if (!electMaster.hasEnoughMasterNodes(discoveryNodes)) {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "not enough master nodes after master left (reason = " + reason + ")");
}
final DiscoveryNode electedMaster = electMaster.electMaster(discoveryNodes); // elect master
if (localNode.equals(electedMaster)) {
master = true;
masterFD.stop("got elected as new master since master left (reason = " + reason + ")");
nodesFD.start();
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(localNode.id()).build();
latestDiscoNodes = discoveryNodes;
return ClusterState.builder(currentState).nodes(latestDiscoNodes).build();
} else {
nodesFD.stop();
if (electedMaster != null) {
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(electedMaster.id()).build();
masterFD.restart(electedMaster, "possible elected master since master left (reason = " + reason + ")");
latestDiscoNodes = discoveryNodes;
return ClusterState.builder(currentState)
.nodes(latestDiscoNodes)
.build();
} else {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master_left and no other node elected to become master");
}
}
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
}
}); | 1no label
| src_main_java_org_elasticsearch_discovery_zen_ZenDiscovery.java |
1,014 | public interface NullOrderFactory {
public Order getNullOrder();
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_NullOrderFactory.java |
180 | public class LinkedTransferQueue<E> extends AbstractQueue<E>
implements TransferQueue<E>, java.io.Serializable {
private static final long serialVersionUID = -3223113410248163686L;
/*
* *** Overview of Dual Queues with Slack ***
*
* Dual Queues, introduced by Scherer and Scott
* (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
* (linked) queues in which nodes may represent either data or
* requests. When a thread tries to enqueue a data node, but
* encounters a request node, it instead "matches" and removes it;
* and vice versa for enqueuing requests. Blocking Dual Queues
* arrange that threads enqueuing unmatched requests block until
* other threads provide the match. Dual Synchronous Queues (see
* Scherer, Lea, & Scott
* http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
* additionally arrange that threads enqueuing unmatched data also
* block. Dual Transfer Queues support all of these modes, as
* dictated by callers.
*
* A FIFO dual queue may be implemented using a variation of the
* Michael & Scott (M&S) lock-free queue algorithm
* (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
* It maintains two pointer fields, "head", pointing to a
* (matched) node that in turn points to the first actual
* (unmatched) queue node (or null if empty); and "tail" that
* points to the last node on the queue (or again null if
* empty). For example, here is a possible queue with four data
* elements:
*
* head tail
* | |
* v v
* M -> U -> U -> U -> U
*
* The M&S queue algorithm is known to be prone to scalability and
* overhead limitations when maintaining (via CAS) these head and
* tail pointers. This has led to the development of
* contention-reducing variants such as elimination arrays (see
* Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
* optimistic back pointers (see Ladan-Mozes & Shavit
* http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
* However, the nature of dual queues enables a simpler tactic for
* improving M&S-style implementations when dual-ness is needed.
*
* In a dual queue, each node must atomically maintain its match
* status. While there are other possible variants, we implement
* this here as: for a data-mode node, matching entails CASing an
* "item" field from a non-null data value to null upon match, and
* vice-versa for request nodes, CASing from null to a data
* value. (Note that the linearization properties of this style of
* queue are easy to verify -- elements are made available by
* linking, and unavailable by matching.) Compared to plain M&S
* queues, this property of dual queues requires one additional
* successful atomic operation per enq/deq pair. But it also
* enables lower cost variants of queue maintenance mechanics. (A
* variation of this idea applies even for non-dual queues that
* support deletion of interior elements, such as
* j.u.c.ConcurrentLinkedQueue.)
*
* Once a node is matched, its match status can never again
* change. We may thus arrange that the linked list of them
* contain a prefix of zero or more matched nodes, followed by a
* suffix of zero or more unmatched nodes. (Note that we allow
* both the prefix and suffix to be zero length, which in turn
* means that we do not use a dummy header.) If we were not
* concerned with either time or space efficiency, we could
* correctly perform enqueue and dequeue operations by traversing
* from a pointer to the initial node; CASing the item of the
* first unmatched node on match and CASing the next field of the
* trailing node on appends. (Plus some special-casing when
* initially empty). While this would be a terrible idea in
* itself, it does have the benefit of not requiring ANY atomic
* updates on head/tail fields.
*
* We introduce here an approach that lies between the extremes of
* never versus always updating queue (head and tail) pointers.
* This offers a tradeoff between sometimes requiring extra
* traversal steps to locate the first and/or last unmatched
* nodes, versus the reduced overhead and contention of fewer
* updates to queue pointers. For example, a possible snapshot of
* a queue is:
*
* head tail
* | |
* v v
* M -> M -> U -> U -> U -> U
*
* The best value for this "slack" (the targeted maximum distance
* between the value of "head" and the first unmatched node, and
* similarly for "tail") is an empirical matter. We have found
* that using very small constants in the range of 1-3 work best
* over a range of platforms. Larger values introduce increasing
* costs of cache misses and risks of long traversal chains, while
* smaller values increase CAS contention and overhead.
*
* Dual queues with slack differ from plain M&S dual queues by
* virtue of only sometimes updating head or tail pointers when
* matching, appending, or even traversing nodes; in order to
* maintain a targeted slack. The idea of "sometimes" may be
* operationalized in several ways. The simplest is to use a
* per-operation counter incremented on each traversal step, and
* to try (via CAS) to update the associated queue pointer
* whenever the count exceeds a threshold. Another, that requires
* more overhead, is to use random number generators to update
* with a given probability per traversal step.
*
* In any strategy along these lines, because CASes updating
* fields may fail, the actual slack may exceed targeted
* slack. However, they may be retried at any time to maintain
* targets. Even when using very small slack values, this
* approach works well for dual queues because it allows all
* operations up to the point of matching or appending an item
* (hence potentially allowing progress by another thread) to be
* read-only, thus not introducing any further contention. As
* described below, we implement this by performing slack
* maintenance retries only after these points.
*
* As an accompaniment to such techniques, traversal overhead can
* be further reduced without increasing contention of head
* pointer updates: Threads may sometimes shortcut the "next" link
* path from the current "head" node to be closer to the currently
* known first unmatched node, and similarly for tail. Again, this
* may be triggered with using thresholds or randomization.
*
* These ideas must be further extended to avoid unbounded amounts
* of costly-to-reclaim garbage caused by the sequential "next"
* links of nodes starting at old forgotten head nodes: As first
* described in detail by Boehm
* (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
* delays noticing that any arbitrarily old node has become
* garbage, all newer dead nodes will also be unreclaimed.
* (Similar issues arise in non-GC environments.) To cope with
* this in our implementation, upon CASing to advance the head
* pointer, we set the "next" link of the previous head to point
* only to itself; thus limiting the length of connected dead lists.
* (We also take similar care to wipe out possibly garbage
* retaining values held in other Node fields.) However, doing so
* adds some further complexity to traversal: If any "next"
* pointer links to itself, it indicates that the current thread
* has lagged behind a head-update, and so the traversal must
* continue from the "head". Traversals trying to find the
* current tail starting from "tail" may also encounter
* self-links, in which case they also continue at "head".
*
* It is tempting in slack-based scheme to not even use CAS for
* updates (similarly to Ladan-Mozes & Shavit). However, this
* cannot be done for head updates under the above link-forgetting
* mechanics because an update may leave head at a detached node.
* And while direct writes are possible for tail updates, they
* increase the risk of long retraversals, and hence long garbage
* chains, which can be much more costly than is worthwhile
* considering that the cost difference of performing a CAS vs
* write is smaller when they are not triggered on each operation
* (especially considering that writes and CASes equally require
* additional GC bookkeeping ("write barriers") that are sometimes
* more costly than the writes themselves because of contention).
*
* *** Overview of implementation ***
*
* We use a threshold-based approach to updates, with a slack
* threshold of two -- that is, we update head/tail when the
* current pointer appears to be two or more steps away from the
* first/last node. The slack value is hard-wired: a path greater
* than one is naturally implemented by checking equality of
* traversal pointers except when the list has only one element,
* in which case we keep slack threshold at one. Avoiding tracking
* explicit counts across method calls slightly simplifies an
* already-messy implementation. Using randomization would
* probably work better if there were a low-quality dirt-cheap
* per-thread one available, but even ThreadLocalRandom is too
* heavy for these purposes.
*
* With such a small slack threshold value, it is not worthwhile
* to augment this with path short-circuiting (i.e., unsplicing
* interior nodes) except in the case of cancellation/removal (see
* below).
*
* We allow both the head and tail fields to be null before any
* nodes are enqueued; initializing upon first append. This
* simplifies some other logic, as well as providing more
* efficient explicit control paths instead of letting JVMs insert
* implicit NullPointerExceptions when they are null. While not
* currently fully implemented, we also leave open the possibility
* of re-nulling these fields when empty (which is complicated to
* arrange, for little benefit.)
*
* All enqueue/dequeue operations are handled by the single method
* "xfer" with parameters indicating whether to act as some form
* of offer, put, poll, take, or transfer (each possibly with
* timeout). The relative complexity of using one monolithic
* method outweighs the code bulk and maintenance problems of
* using separate methods for each case.
*
* Operation consists of up to three phases. The first is
* implemented within method xfer, the second in tryAppend, and
* the third in method awaitMatch.
*
* 1. Try to match an existing node
*
* Starting at head, skip already-matched nodes until finding
* an unmatched node of opposite mode, if one exists, in which
* case matching it and returning, also if necessary updating
* head to one past the matched node (or the node itself if the
* list has no other unmatched nodes). If the CAS misses, then
* a loop retries advancing head by two steps until either
* success or the slack is at most two. By requiring that each
* attempt advances head by two (if applicable), we ensure that
* the slack does not grow without bound. Traversals also check
* if the initial head is now off-list, in which case they
* start at the new head.
*
* If no candidates are found and the call was untimed
* poll/offer, (argument "how" is NOW) return.
*
* 2. Try to append a new node (method tryAppend)
*
* Starting at current tail pointer, find the actual last node
* and try to append a new node (or if head was null, establish
* the first node). Nodes can be appended only if their
* predecessors are either already matched or are of the same
* mode. If we detect otherwise, then a new node with opposite
* mode must have been appended during traversal, so we must
* restart at phase 1. The traversal and update steps are
* otherwise similar to phase 1: Retrying upon CAS misses and
* checking for staleness. In particular, if a self-link is
* encountered, then we can safely jump to a node on the list
* by continuing the traversal at current head.
*
* On successful append, if the call was ASYNC, return.
*
* 3. Await match or cancellation (method awaitMatch)
*
* Wait for another thread to match node; instead cancelling if
* the current thread was interrupted or the wait timed out. On
* multiprocessors, we use front-of-queue spinning: If a node
* appears to be the first unmatched node in the queue, it
* spins a bit before blocking. In either case, before blocking
* it tries to unsplice any nodes between the current "head"
* and the first unmatched node.
*
* Front-of-queue spinning vastly improves performance of
* heavily contended queues. And so long as it is relatively
* brief and "quiet", spinning does not much impact performance
* of less-contended queues. During spins threads check their
* interrupt status and generate a thread-local random number
* to decide to occasionally perform a Thread.yield. While
* yield has underdefined specs, we assume that it might help,
* and will not hurt, in limiting impact of spinning on busy
* systems. We also use smaller (1/2) spins for nodes that are
* not known to be front but whose predecessors have not
* blocked -- these "chained" spins avoid artifacts of
* front-of-queue rules which otherwise lead to alternating
* nodes spinning vs blocking. Further, front threads that
* represent phase changes (from data to request node or vice
* versa) compared to their predecessors receive additional
* chained spins, reflecting longer paths typically required to
* unblock threads during phase changes.
*
*
* ** Unlinking removed interior nodes **
*
* In addition to minimizing garbage retention via self-linking
* described above, we also unlink removed interior nodes. These
* may arise due to timed out or interrupted waits, or calls to
* remove(x) or Iterator.remove. Normally, given a node that was
* at one time known to be the predecessor of some node s that is
* to be removed, we can unsplice s by CASing the next field of
* its predecessor if it still points to s (otherwise s must
* already have been removed or is now offlist). But there are two
* situations in which we cannot guarantee to make node s
* unreachable in this way: (1) If s is the trailing node of list
* (i.e., with null next), then it is pinned as the target node
* for appends, so can only be removed later after other nodes are
* appended. (2) We cannot necessarily unlink s given a
* predecessor node that is matched (including the case of being
* cancelled): the predecessor may already be unspliced, in which
* case some previous reachable node may still point to s.
* (For further explanation see Herlihy & Shavit "The Art of
* Multiprocessor Programming" chapter 9). Although, in both
* cases, we can rule out the need for further action if either s
* or its predecessor are (or can be made to be) at, or fall off
* from, the head of list.
*
* Without taking these into account, it would be possible for an
* unbounded number of supposedly removed nodes to remain
* reachable. Situations leading to such buildup are uncommon but
* can occur in practice; for example when a series of short timed
* calls to poll repeatedly time out but never otherwise fall off
* the list because of an untimed call to take at the front of the
* queue.
*
* When these cases arise, rather than always retraversing the
* entire list to find an actual predecessor to unlink (which
* won't help for case (1) anyway), we record a conservative
* estimate of possible unsplice failures (in "sweepVotes").
* We trigger a full sweep when the estimate exceeds a threshold
* ("SWEEP_THRESHOLD") indicating the maximum number of estimated
* removal failures to tolerate before sweeping through, unlinking
* cancelled nodes that were not unlinked upon initial removal.
* We perform sweeps by the thread hitting threshold (rather than
* background threads or by spreading work to other threads)
* because in the main contexts in which removal occurs, the
* caller is already timed-out, cancelled, or performing a
* potentially O(n) operation (e.g. remove(x)), none of which are
* time-critical enough to warrant the overhead that alternatives
* would impose on other threads.
*
* Because the sweepVotes estimate is conservative, and because
* nodes become unlinked "naturally" as they fall off the head of
* the queue, and because we allow votes to accumulate even while
* sweeps are in progress, there are typically significantly fewer
* such nodes than estimated. Choice of a threshold value
* balances the likelihood of wasted effort and contention, versus
* providing a worst-case bound on retention of interior nodes in
* quiescent queues. The value defined below was chosen
* empirically to balance these under various timeout scenarios.
*
* Note that we cannot self-link unlinked interior nodes during
* sweeps. However, the associated garbage chains terminate when
* some successor ultimately falls off the head of the list and is
* self-linked.
*/
/** True if on multiprocessor */
private static final boolean MP =
Runtime.getRuntime().availableProcessors() > 1;
/**
* The number of times to spin (with randomly interspersed calls
* to Thread.yield) on multiprocessor before blocking when a node
* is apparently the first waiter in the queue. See above for
* explanation. Must be a power of two. The value is empirically
* derived -- it works pretty well across a variety of processors,
* numbers of CPUs, and OSes.
*/
private static final int FRONT_SPINS = 1 << 7;
/**
* The number of times to spin before blocking when a node is
* preceded by another node that is apparently spinning. Also
* serves as an increment to FRONT_SPINS on phase changes, and as
* base average frequency for yielding during spins. Must be a
* power of two.
*/
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
/**
* The maximum number of estimated removal failures (sweepVotes)
* to tolerate before sweeping through the queue unlinking
* cancelled nodes that were not unlinked upon initial
* removal. See above for explanation. The value must be at least
* two to avoid useless sweeps when removing trailing nodes.
*/
static final int SWEEP_THRESHOLD = 32;
/**
* Queue nodes. Uses Object, not E, for items to allow forgetting
* them after use. Relies heavily on Unsafe mechanics to minimize
* unnecessary ordering constraints: Writes that are intrinsically
* ordered wrt other accesses or CASes use simple relaxed forms.
*/
static final class Node {
final boolean isData; // false if this is a request node
volatile Object item; // initially non-null if isData; CASed to match
volatile Node next;
volatile Thread waiter; // null until waiting
// CAS methods for fields
final boolean casNext(Node cmp, Node val) {
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
}
final boolean casItem(Object cmp, Object val) {
// assert cmp == null || cmp.getClass() != Node.class;
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
}
/**
* Constructs a new node. Uses relaxed write because item can
* only be seen after publication via casNext.
*/
Node(Object item, boolean isData) {
UNSAFE.putObject(this, itemOffset, item); // relaxed write
this.isData = isData;
}
/**
* Links node to itself to avoid garbage retention. Called
* only after CASing head field, so uses relaxed write.
*/
final void forgetNext() {
UNSAFE.putObject(this, nextOffset, this);
}
/**
* Sets item to self and waiter to null, to avoid garbage
* retention after matching or cancelling. Uses relaxed writes
* because order is already constrained in the only calling
* contexts: item is forgotten only after volatile/atomic
* mechanics that extract items. Similarly, clearing waiter
* follows either CAS or return from park (if ever parked;
* else we don't care).
*/
final void forgetContents() {
UNSAFE.putObject(this, itemOffset, this);
UNSAFE.putObject(this, waiterOffset, null);
}
/**
* Returns true if this node has been matched, including the
* case of artificial matches due to cancellation.
*/
final boolean isMatched() {
Object x = item;
return (x == this) || ((x == null) == isData);
}
/**
* Returns true if this is an unmatched request node.
*/
final boolean isUnmatchedRequest() {
return !isData && item == null;
}
/**
* Returns true if a node with the given mode cannot be
* appended to this node because this node is unmatched and
* has opposite data mode.
*/
final boolean cannotPrecede(boolean haveData) {
boolean d = isData;
Object x;
return d != haveData && (x = item) != this && (x != null) == d;
}
/**
* Tries to artificially match a data node -- used by remove.
*/
final boolean tryMatchData() {
// assert isData;
Object x = item;
if (x != null && x != this && casItem(x, null)) {
LockSupport.unpark(waiter);
return true;
}
return false;
}
private static final long serialVersionUID = -3375979862319811754L;
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long itemOffset;
private static final long nextOffset;
private static final long waiterOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = Node.class;
itemOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("item"));
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
waiterOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("waiter"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/** head of the queue; null until first enqueue */
transient volatile Node head;
/** tail of the queue; null until first append */
private transient volatile Node tail;
/** The number of apparent failures to unsplice removed nodes */
private transient volatile int sweepVotes;
// CAS methods for fields
private boolean casTail(Node cmp, Node val) {
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
}
private boolean casHead(Node cmp, Node val) {
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
}
private boolean casSweepVotes(int cmp, int val) {
return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
}
/*
* Possible values for "how" argument in xfer method.
*/
private static final int NOW = 0; // for untimed poll, tryTransfer
private static final int ASYNC = 1; // for offer, put, add
private static final int SYNC = 2; // for transfer, take
private static final int TIMED = 3; // for timed poll, tryTransfer
@SuppressWarnings("unchecked")
static <E> E cast(Object item) {
// assert item == null || item.getClass() != Node.class;
return (E) item;
}
/**
* Implements all queuing methods. See above for explanation.
*
* @param e the item or null for take
* @param haveData true if this is a put, else a take
* @param how NOW, ASYNC, SYNC, or TIMED
* @param nanos timeout in nanosecs, used only if mode is TIMED
* @return an item if matched, else e
* @throws NullPointerException if haveData mode but e is null
*/
private E xfer(E e, boolean haveData, int how, long nanos) {
if (haveData && (e == null))
throw new NullPointerException();
Node s = null; // the node to append, if needed
retry:
for (;;) { // restart on append race
for (Node h = head, p = h; p != null;) { // find & match first node
boolean isData = p.isData;
Object item = p.item;
if (item != p && (item != null) == isData) { // unmatched
if (isData == haveData) // can't match
break;
if (p.casItem(item, e)) { // match
for (Node q = p; q != h;) {
Node n = q.next; // update by 2 unless singleton
if (head == h && casHead(h, n == null ? q : n)) {
h.forgetNext();
break;
} // advance and retry
if ((h = head) == null ||
(q = h.next) == null || !q.isMatched())
break; // unless slack < 2
}
LockSupport.unpark(p.waiter);
return LinkedTransferQueue.<E>cast(item);
}
}
Node n = p.next;
p = (p != n) ? n : (h = head); // Use head if p offlist
}
if (how != NOW) { // No matches available
if (s == null)
s = new Node(e, haveData);
Node pred = tryAppend(s, haveData);
if (pred == null)
continue retry; // lost race vs opposite mode
if (how != ASYNC)
return awaitMatch(s, pred, e, (how == TIMED), nanos);
}
return e; // not waiting
}
}
/**
* Tries to append node s as tail.
*
* @param s the node to append
* @param haveData true if appending in data mode
* @return null on failure due to losing race with append in
* different mode, else s's predecessor, or s itself if no
* predecessor
*/
private Node tryAppend(Node s, boolean haveData) {
for (Node t = tail, p = t;;) { // move p to last node and append
Node n, u; // temps for reads of next & tail
if (p == null && (p = head) == null) {
if (casHead(null, s))
return s; // initialize
}
else if (p.cannotPrecede(haveData))
return null; // lost race vs opposite mode
else if ((n = p.next) != null) // not last; keep traversing
p = p != t && t != (u = tail) ? (t = u) : // stale tail
(p != n) ? n : null; // restart if off list
else if (!p.casNext(null, s))
p = p.next; // re-read on CAS failure
else {
if (p != t) { // update if slack now >= 2
while ((tail != t || !casTail(t, s)) &&
(t = tail) != null &&
(s = t.next) != null && // advance and retry
(s = s.next) != null && s != t);
}
return p;
}
}
}
/**
* Spins/yields/blocks until node s is matched or caller gives up.
*
* @param s the waiting node
* @param pred the predecessor of s, or s itself if it has no
* predecessor, or null if unknown (the null case does not occur
* in any current calls but may in possible future extensions)
* @param e the comparison value for checking match
* @param timed if true, wait only until timeout elapses
* @param nanos timeout in nanosecs, used only if timed is true
* @return matched item, or e if unmatched on interrupt or timeout
*/
private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
long lastTime = timed ? System.nanoTime() : 0L;
Thread w = Thread.currentThread();
int spins = -1; // initialized after first item and cancel checks
ThreadLocalRandom randomYields = null; // bound if needed
for (;;) {
Object item = s.item;
if (item != e) { // matched
// assert item != s;
s.forgetContents(); // avoid garbage
return LinkedTransferQueue.<E>cast(item);
}
if ((w.isInterrupted() || (timed && nanos <= 0)) &&
s.casItem(e, s)) { // cancel
unsplice(pred, s);
return e;
}
if (spins < 0) { // establish spins at/near front
if ((spins = spinsFor(pred, s.isData)) > 0)
randomYields = ThreadLocalRandom.current();
}
else if (spins > 0) { // spin
--spins;
if (randomYields.nextInt(CHAINED_SPINS) == 0)
Thread.yield(); // occasionally yield
}
else if (s.waiter == null) {
s.waiter = w; // request unpark then recheck
}
else if (timed) {
long now = System.nanoTime();
if ((nanos -= now - lastTime) > 0)
LockSupport.parkNanos(this, nanos);
lastTime = now;
}
else {
LockSupport.park(this);
}
}
}
/**
* Returns spin/yield value for a node with given predecessor and
* data mode. See above for explanation.
*/
private static int spinsFor(Node pred, boolean haveData) {
if (MP && pred != null) {
if (pred.isData != haveData) // phase change
return FRONT_SPINS + CHAINED_SPINS;
if (pred.isMatched()) // probably at front
return FRONT_SPINS;
if (pred.waiter == null) // pred apparently spinning
return CHAINED_SPINS;
}
return 0;
}
/* -------------- Traversal methods -------------- */
/**
* Returns the successor of p, or the head node if p.next has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node succ(Node p) {
Node next = p.next;
return (p == next) ? head : next;
}
/**
* Returns the first unmatched node of the given mode, or null if
* none. Used by methods isEmpty, hasWaitingConsumer.
*/
private Node firstOfMode(boolean isData) {
for (Node p = head; p != null; p = succ(p)) {
if (!p.isMatched())
return (p.isData == isData) ? p : null;
}
return null;
}
/**
* Returns the item in the first unmatched node with isData; or
* null if none. Used by peek.
*/
private E firstDataItem() {
for (Node p = head; p != null; p = succ(p)) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p)
return LinkedTransferQueue.<E>cast(item);
}
else if (item == null)
return null;
}
return null;
}
/**
* Traverses and counts unmatched nodes of the given mode.
* Used by methods size and getWaitingConsumerCount.
*/
private int countOfMode(boolean data) {
int count = 0;
for (Node p = head; p != null; ) {
if (!p.isMatched()) {
if (p.isData != data)
return 0;
if (++count == Integer.MAX_VALUE) // saturated
break;
}
Node n = p.next;
if (n != p)
p = n;
else {
count = 0;
p = head;
}
}
return count;
}
final class Itr implements Iterator<E> {
private Node nextNode; // next node to return item for
private E nextItem; // the corresponding item
private Node lastRet; // last returned node, to support remove
private Node lastPred; // predecessor to unlink lastRet
/**
* Moves to next node after prev, or first node if prev null.
*/
private void advance(Node prev) {
/*
* To track and avoid buildup of deleted nodes in the face
* of calls to both Queue.remove and Itr.remove, we must
* include variants of unsplice and sweep upon each
* advance: Upon Itr.remove, we may need to catch up links
* from lastPred, and upon other removes, we might need to
* skip ahead from stale nodes and unsplice deleted ones
* found while advancing.
*/
Node r, b; // reset lastPred upon possible deletion of lastRet
if ((r = lastRet) != null && !r.isMatched())
lastPred = r; // next lastPred is old lastRet
else if ((b = lastPred) == null || b.isMatched())
lastPred = null; // at start of list
else {
Node s, n; // help with removal of lastPred.next
while ((s = b.next) != null &&
s != b && s.isMatched() &&
(n = s.next) != null && n != s)
b.casNext(s, n);
}
this.lastRet = prev;
for (Node p = prev, s, n;;) {
s = (p == null) ? head : p.next;
if (s == null)
break;
else if (s == p) {
p = null;
continue;
}
Object item = s.item;
if (s.isData) {
if (item != null && item != s) {
nextItem = LinkedTransferQueue.<E>cast(item);
nextNode = s;
return;
}
}
else if (item == null)
break;
// assert s.isMatched();
if (p == null)
p = s;
else if ((n = s.next) == null)
break;
else if (s == n)
p = null;
else
p.casNext(s, n);
}
nextNode = null;
nextItem = null;
}
Itr() {
advance(null);
}
public final boolean hasNext() {
return nextNode != null;
}
public final E next() {
Node p = nextNode;
if (p == null) throw new NoSuchElementException();
E e = nextItem;
advance(p);
return e;
}
public final void remove() {
final Node lastRet = this.lastRet;
if (lastRet == null)
throw new IllegalStateException();
this.lastRet = null;
if (lastRet.tryMatchData())
unsplice(lastPred, lastRet);
}
}
/* -------------- Removal methods -------------- */
/**
* Unsplices (now or later) the given deleted/cancelled node with
* the given predecessor.
*
* @param pred a node that was at one time known to be the
* predecessor of s, or null or s itself if s is/was at head
* @param s the node to be unspliced
*/
final void unsplice(Node pred, Node s) {
s.forgetContents(); // forget unneeded fields
/*
* See above for rationale. Briefly: if pred still points to
* s, try to unlink s. If s cannot be unlinked, because it is
* trailing node or pred might be unlinked, and neither pred
* nor s are head or offlist, add to sweepVotes, and if enough
* votes have accumulated, sweep.
*/
if (pred != null && pred != s && pred.next == s) {
Node n = s.next;
if (n == null ||
(n != s && pred.casNext(s, n) && pred.isMatched())) {
for (;;) { // check if at, or could be, head
Node h = head;
if (h == pred || h == s || h == null)
return; // at head or list empty
if (!h.isMatched())
break;
Node hn = h.next;
if (hn == null)
return; // now empty
if (hn != h && casHead(h, hn))
h.forgetNext(); // advance head
}
if (pred.next != pred && s.next != s) { // recheck if offlist
for (;;) { // sweep now if enough votes
int v = sweepVotes;
if (v < SWEEP_THRESHOLD) {
if (casSweepVotes(v, v + 1))
break;
}
else if (casSweepVotes(v, 0)) {
sweep();
break;
}
}
}
}
}
}
/**
* Unlinks matched (typically cancelled) nodes encountered in a
* traversal from head.
*/
private void sweep() {
for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
if (!s.isMatched())
// Unmatched nodes are never self-linked
p = s;
else if ((n = s.next) == null) // trailing node is pinned
break;
else if (s == n) // stale
// No need to also check for p == s, since that implies s == n
p = head;
else
p.casNext(s, n);
}
}
/**
* Main implementation of remove(Object)
*/
private boolean findAndRemove(Object e) {
if (e != null) {
for (Node pred = null, p = head; p != null; ) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p && e.equals(item) &&
p.tryMatchData()) {
unsplice(pred, p);
return true;
}
}
else if (item == null)
break;
pred = p;
if ((p = p.next) == pred) { // stale
pred = null;
p = head;
}
}
}
return false;
}
/**
* Creates an initially empty {@code LinkedTransferQueue}.
*/
public LinkedTransferQueue() {
}
/**
* Creates a {@code LinkedTransferQueue}
* initially containing the elements of the given collection,
* added in traversal order of the collection's iterator.
*
* @param c the collection of elements to initially contain
* @throws NullPointerException if the specified collection or any
* of its elements are null
*/
public LinkedTransferQueue(Collection<? extends E> c) {
this();
addAll(c);
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never block.
*
* @throws NullPointerException if the specified element is null
*/
public void put(E e) {
xfer(e, true, ASYNC, 0);
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never block or
* return {@code false}.
*
* @return {@code true} (as specified by
* {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
* BlockingQueue.offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e, long timeout, TimeUnit unit) {
xfer(e, true, ASYNC, 0);
return true;
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Queue#offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e) {
xfer(e, true, ASYNC, 0);
return true;
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never throw
* {@link IllegalStateException} or return {@code false}.
*
* @return {@code true} (as specified by {@link Collection#add})
* @throws NullPointerException if the specified element is null
*/
public boolean add(E e) {
xfer(e, true, ASYNC, 0);
return true;
}
/**
* Transfers the element to a waiting consumer immediately, if possible.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* otherwise returning {@code false} without enqueuing the element.
*
* @throws NullPointerException if the specified element is null
*/
public boolean tryTransfer(E e) {
return xfer(e, true, NOW, 0) == null;
}
/**
* Transfers the element to a consumer, waiting if necessary to do so.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else inserts the specified element at the tail of this queue
* and waits until the element is received by a consumer.
*
* @throws NullPointerException if the specified element is null
*/
public void transfer(E e) throws InterruptedException {
if (xfer(e, true, SYNC, 0) != null) {
Thread.interrupted(); // failure possible only due to interrupt
throw new InterruptedException();
}
}
/**
* Transfers the element to a consumer if it is possible to do so
* before the timeout elapses.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else inserts the specified element at the tail of this queue
* and waits until the element is received by a consumer,
* returning {@code false} if the specified wait time elapses
* before the element can be transferred.
*
* @throws NullPointerException if the specified element is null
*/
public boolean tryTransfer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
return true;
if (!Thread.interrupted())
return false;
throw new InterruptedException();
}
public E take() throws InterruptedException {
E e = xfer(null, false, SYNC, 0);
if (e != null)
return e;
Thread.interrupted();
throw new InterruptedException();
}
public E poll(long timeout, TimeUnit unit) throws InterruptedException {
E e = xfer(null, false, TIMED, unit.toNanos(timeout));
if (e != null || !Thread.interrupted())
return e;
throw new InterruptedException();
}
public E poll() {
return xfer(null, false, NOW, 0);
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
public int drainTo(Collection<? super E> c) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
for (E e; (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
public int drainTo(Collection<? super E> c, int maxElements) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
for (E e; n < maxElements && (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
/**
* Returns an iterator over the elements in this queue in proper sequence.
* The elements will be returned in order from first (head) to last (tail).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this queue in proper sequence
*/
public Iterator<E> iterator() {
return new Itr();
}
public E peek() {
return firstDataItem();
}
/**
* Returns {@code true} if this queue contains no elements.
*
* @return {@code true} if this queue contains no elements
*/
public boolean isEmpty() {
for (Node p = head; p != null; p = succ(p)) {
if (!p.isMatched())
return !p.isData;
}
return true;
}
public boolean hasWaitingConsumer() {
return firstOfMode(false) != null;
}
/**
* Returns the number of elements in this queue. If this queue
* contains more than {@code Integer.MAX_VALUE} elements, returns
* {@code Integer.MAX_VALUE}.
*
* <p>Beware that, unlike in most collections, this method is
* <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these queues, determining the current
* number of elements requires an O(n) traversal.
*
* @return the number of elements in this queue
*/
public int size() {
return countOfMode(true);
}
public int getWaitingConsumerCount() {
return countOfMode(false);
}
/**
* Removes a single instance of the specified element from this queue,
* if it is present. More formally, removes an element {@code e} such
* that {@code o.equals(e)}, if this queue contains one or more such
* elements.
* Returns {@code true} if this queue contained the specified element
* (or equivalently, if this queue changed as a result of the call).
*
* @param o element to be removed from this queue, if present
* @return {@code true} if this queue changed as a result of the call
*/
public boolean remove(Object o) {
return findAndRemove(o);
}
/**
* Returns {@code true} if this queue contains the specified element.
* More formally, returns {@code true} if and only if this queue contains
* at least one element {@code e} such that {@code o.equals(e)}.
*
* @param o object to be checked for containment in this queue
* @return {@code true} if this queue contains the specified element
*/
public boolean contains(Object o) {
if (o == null) return false;
for (Node p = head; p != null; p = succ(p)) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p && o.equals(item))
return true;
}
else if (item == null)
break;
}
return false;
}
/**
* Always returns {@code Integer.MAX_VALUE} because a
* {@code LinkedTransferQueue} is not capacity constrained.
*
* @return {@code Integer.MAX_VALUE} (as specified by
* {@link java.util.concurrent.BlockingQueue#remainingCapacity()
* BlockingQueue.remainingCapacity})
*/
public int remainingCapacity() {
return Integer.MAX_VALUE;
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @serialData All of the elements (each an {@code E}) in
* the proper order, followed by a null
* @param s the stream
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
for (E e : this)
s.writeObject(e);
// Use trailing null as sentinel
s.writeObject(null);
}
/**
* Reconstitutes the Queue instance from a stream (that is,
* deserializes it).
*
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
for (;;) {
@SuppressWarnings("unchecked")
E item = (E) s.readObject();
if (item == null)
break;
else
offer(item);
}
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long headOffset;
private static final long tailOffset;
private static final long sweepVotesOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = LinkedTransferQueue.class;
headOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("head"));
tailOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("tail"));
sweepVotesOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("sweepVotes"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
} | 0true
| src_main_java_jsr166y_LinkedTransferQueue.java |
1,642 | @ClusterScope(scope=Scope.SUITE, numNodes=2)
public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
switch(nodeOrdinal) {
case 1:
return settingsBuilder().put("node.tag", "B").build();
case 0:
return settingsBuilder().put("node.tag", "A").build();
}
return super.nodeSettings(nodeOrdinal);
}
@Test
public void testSingleShardAllocation() throws Exception {
client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
.put("index.number_of_shards", "1").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
ensureGreen();
ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
assertThat(response.getGroups().length, equalTo(1));
assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
assertThat(response.getGroups()[0].getShardId(), equalTo(0));
assertThat(response.getGroups()[0].getShards().length, equalTo(1));
assertThat(response.getNodes().length, equalTo(1));
assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
response = client().admin().cluster().prepareSearchShards("test").setRouting("A").execute().actionGet();
assertThat(response.getGroups().length, equalTo(1));
assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
assertThat(response.getGroups()[0].getShardId(), equalTo(0));
assertThat(response.getGroups()[0].getShards().length, equalTo(1));
assertThat(response.getNodes().length, equalTo(1));
assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
}
@Test
public void testMultipleShardsSingleNodeAllocation() throws Exception {
client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
.put("index.number_of_shards", "4").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
ensureGreen();
ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
assertThat(response.getGroups().length, equalTo(4));
assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
assertThat(response.getNodes().length, equalTo(1));
assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
response = client().admin().cluster().prepareSearchShards("test").setRouting("ABC").execute().actionGet();
assertThat(response.getGroups().length, equalTo(1));
response = client().admin().cluster().prepareSearchShards("test").setPreference("_shards:2").execute().actionGet();
assertThat(response.getGroups().length, equalTo(1));
assertThat(response.getGroups()[0].getShardId(), equalTo(2));
}
@Test
public void testMultipleIndicesAllocation() throws Exception {
client().admin().indices().prepareCreate("test1").setSettings(settingsBuilder()
.put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
client().admin().indices().prepareCreate("test2").setSettings(settingsBuilder()
.put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
client().admin().indices().prepareAliases()
.addAliasAction(AliasAction.newAddAliasAction("test1", "routing_alias").routing("ABC"))
.addAliasAction(AliasAction.newAddAliasAction("test2", "routing_alias").routing("EFG"))
.execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("routing_alias").execute().actionGet();
assertThat(response.getGroups().length, equalTo(2));
assertThat(response.getGroups()[0].getShards().length, equalTo(2));
assertThat(response.getGroups()[1].getShards().length, equalTo(2));
boolean seenTest1 = false;
boolean seenTest2 = false;
for (ClusterSearchShardsGroup group : response.getGroups()) {
if (group.getIndex().equals("test1")) {
seenTest1 = true;
assertThat(group.getShards().length, equalTo(2));
} else if (group.getIndex().equals("test2")) {
seenTest2 = true;
assertThat(group.getShards().length, equalTo(2));
} else {
fail();
}
}
assertThat(seenTest1, equalTo(true));
assertThat(seenTest2, equalTo(true));
assertThat(response.getNodes().length, equalTo(2));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_shards_ClusterSearchShardsTests.java |
759 | public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetShardRequest> {
private int shardId;
private String preference;
Boolean realtime;
boolean refresh;
IntArrayList locations;
List<String> types;
List<String> ids;
List<String[]> fields;
LongArrayList versions;
List<VersionType> versionTypes;
List<FetchSourceContext> fetchSourceContexts;
MultiGetShardRequest() {
}
MultiGetShardRequest(String index, int shardId) {
super(index);
this.shardId = shardId;
locations = new IntArrayList();
types = new ArrayList<String>();
ids = new ArrayList<String>();
fields = new ArrayList<String[]>();
versions = new LongArrayList();
versionTypes = new ArrayList<VersionType>();
fetchSourceContexts = new ArrayList<FetchSourceContext>();
}
public int shardId() {
return this.shardId;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public MultiGetShardRequest preference(String preference) {
this.preference = preference;
return this;
}
public String preference() {
return this.preference;
}
public boolean realtime() {
return this.realtime == null ? true : this.realtime;
}
public MultiGetShardRequest realtime(Boolean realtime) {
this.realtime = realtime;
return this;
}
public boolean refresh() {
return this.refresh;
}
public MultiGetShardRequest refresh(boolean refresh) {
this.refresh = refresh;
return this;
}
public void add(int location, @Nullable String type, String id, String[] fields, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
this.locations.add(location);
this.types.add(type);
this.ids.add(id);
this.fields.add(fields);
this.versions.add(version);
this.versionTypes.add(versionType);
this.fetchSourceContexts.add(fetchSourceContext);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
locations = new IntArrayList(size);
types = new ArrayList<String>(size);
ids = new ArrayList<String>(size);
fields = new ArrayList<String[]>(size);
versions = new LongArrayList(size);
versionTypes = new ArrayList<VersionType>(size);
fetchSourceContexts = new ArrayList<FetchSourceContext>(size);
for (int i = 0; i < size; i++) {
locations.add(in.readVInt());
if (in.readBoolean()) {
types.add(in.readSharedString());
} else {
types.add(null);
}
ids.add(in.readString());
int size1 = in.readVInt();
if (size1 > 0) {
String[] fields = new String[size1];
for (int j = 0; j < size1; j++) {
fields[j] = in.readString();
}
this.fields.add(fields);
} else {
fields.add(null);
}
versions.add(in.readVLong());
versionTypes.add(VersionType.fromValue(in.readByte()));
fetchSourceContexts.add(FetchSourceContext.optionalReadFromStream(in));
}
preference = in.readOptionalString();
refresh = in.readBoolean();
byte realtime = in.readByte();
if (realtime == 0) {
this.realtime = false;
} else if (realtime == 1) {
this.realtime = true;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(types.size());
for (int i = 0; i < types.size(); i++) {
out.writeVInt(locations.get(i));
if (types.get(i) == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeSharedString(types.get(i));
}
out.writeString(ids.get(i));
if (fields.get(i) == null) {
out.writeVInt(0);
} else {
out.writeVInt(fields.get(i).length);
for (String field : fields.get(i)) {
out.writeString(field);
}
}
out.writeVLong(versions.get(i));
out.writeByte(versionTypes.get(i).getValue());
FetchSourceContext fetchSourceContext = fetchSourceContexts.get(i);
FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
}
out.writeOptionalString(preference);
out.writeBoolean(refresh);
if (realtime == null) {
out.writeByte((byte) -1);
} else if (realtime == false) {
out.writeByte((byte) 0);
} else {
out.writeByte((byte) 1);
}
}
} | 1no label
| src_main_java_org_elasticsearch_action_get_MultiGetShardRequest.java |
72 | public final class AuthenticationRequest extends CallableClientRequest {
private Credentials credentials;
private ClientPrincipal principal;
private boolean reAuth;
private boolean firstConnection;
public AuthenticationRequest() {
}
public AuthenticationRequest(Credentials credentials) {
this.credentials = credentials;
}
public AuthenticationRequest(Credentials credentials, ClientPrincipal principal) {
this.credentials = credentials;
this.principal = principal;
}
public Object call() throws Exception {
boolean authenticated = authenticate();
if (authenticated) {
return handleAuthenticated();
} else {
return handleUnauthenticated();
}
}
private boolean authenticate() {
ClientEngineImpl clientEngine = getService();
Connection connection = endpoint.getConnection();
ILogger logger = clientEngine.getLogger(getClass());
boolean authenticated;
if (credentials == null) {
authenticated = false;
logger.severe("Could not retrieve Credentials object!");
} else if (clientEngine.getSecurityContext() != null) {
authenticated = authenticate(clientEngine.getSecurityContext());
} else if (credentials instanceof UsernamePasswordCredentials) {
UsernamePasswordCredentials usernamePasswordCredentials = (UsernamePasswordCredentials) credentials;
authenticated = authenticate(usernamePasswordCredentials);
} else {
authenticated = false;
logger.severe("Hazelcast security is disabled.\nUsernamePasswordCredentials or cluster "
+ "group-name and group-password should be used for authentication!\n"
+ "Current credentials type is: " + credentials.getClass().getName());
}
logger.log((authenticated ? Level.INFO : Level.WARNING), "Received auth from " + connection
+ ", " + (authenticated ? "successfully authenticated" : "authentication failed"));
return authenticated;
}
private boolean authenticate(UsernamePasswordCredentials credentials) {
ClientEngineImpl clientEngine = getService();
GroupConfig groupConfig = clientEngine.getConfig().getGroupConfig();
String nodeGroupName = groupConfig.getName();
String nodeGroupPassword = groupConfig.getPassword();
boolean usernameMatch = nodeGroupName.equals(credentials.getUsername());
boolean passwordMatch = nodeGroupPassword.equals(credentials.getPassword());
return usernameMatch && passwordMatch;
}
private boolean authenticate(SecurityContext securityContext) {
Connection connection = endpoint.getConnection();
credentials.setEndpoint(connection.getInetAddress().getHostAddress());
try {
LoginContext lc = securityContext.createClientLoginContext(credentials);
lc.login();
endpoint.setLoginContext(lc);
return true;
} catch (LoginException e) {
ILogger logger = clientEngine.getLogger(getClass());
logger.warning(e);
return false;
}
}
private Object handleUnauthenticated() {
ClientEngineImpl clientEngine = getService();
clientEngine.removeEndpoint(endpoint.getConnection());
return new AuthenticationException("Invalid credentials!");
}
private Object handleAuthenticated() {
ClientEngineImpl clientEngine = getService();
if (principal != null && reAuth) {
principal = new ClientPrincipal(principal.getUuid(), clientEngine.getLocalMember().getUuid());
reAuthLocal();
Collection<MemberImpl> members = clientEngine.getClusterService().getMemberList();
for (MemberImpl member : members) {
if (!member.localMember()) {
ClientReAuthOperation op = new ClientReAuthOperation(principal.getUuid(), firstConnection);
clientEngine.sendOperation(op, member.getAddress());
}
}
}
if (principal == null) {
principal = new ClientPrincipal(endpoint.getUuid(), clientEngine.getLocalMember().getUuid());
}
endpoint.authenticated(principal, firstConnection);
clientEngine.bind(endpoint);
return new SerializableCollection(clientEngine.toData(clientEngine.getThisAddress()), clientEngine.toData(principal));
}
private void reAuthLocal() {
final Set<ClientEndpoint> endpoints = clientEngine.getEndpoints(principal.getUuid());
for (ClientEndpoint endpoint : endpoints) {
endpoint.authenticated(principal, firstConnection);
}
}
public String getServiceName() {
return ClientEngineImpl.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return ClientPortableHook.ID;
}
@Override
public int getClassId() {
return ClientPortableHook.AUTH;
}
public void setReAuth(boolean reAuth) {
this.reAuth = reAuth;
}
public boolean isFirstConnection() {
return firstConnection;
}
public void setFirstConnection(boolean firstConnection) {
this.firstConnection = firstConnection;
}
@Override
public void write(PortableWriter writer) throws IOException {
writer.writePortable("credentials", (Portable) credentials);
if (principal != null) {
writer.writePortable("principal", principal);
} else {
writer.writeNullPortable("principal", ClientPortableHook.ID, ClientPortableHook.PRINCIPAL);
}
writer.writeBoolean("reAuth", reAuth);
writer.writeBoolean("firstConnection", firstConnection);
}
@Override
public void read(PortableReader reader) throws IOException {
credentials = (Credentials) reader.readPortable("credentials");
principal = reader.readPortable("principal");
reAuth = reader.readBoolean("reAuth");
firstConnection = reader.readBoolean("firstConnection");
}
@Override
public Permission getRequiredPermission() {
return null;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_client_AuthenticationRequest.java |
1,363 | @Deprecated
public interface CodeTypeService {
public List<CodeType> findAllCodeTypes();
public CodeType save(CodeType codeType);
public List<CodeType> lookupCodeTypeByKey(String key);
public void deleteCodeType(CodeType codeTypeId);
public CodeType lookupCodeTypeById(Long codeTypeId);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_util_service_CodeTypeService.java |
1,810 | class ConstructorBindingImpl<T> extends BindingImpl<T> implements ConstructorBinding<T> {
private final Factory<T> factory;
private ConstructorBindingImpl(Injector injector, Key<T> key, Object source,
InternalFactory<? extends T> scopedFactory, Scoping scoping, Factory<T> factory) {
super(injector, key, source, scopedFactory, scoping);
this.factory = factory;
}
static <T> ConstructorBindingImpl<T> create(
InjectorImpl injector, Key<T> key, Object source, Scoping scoping) {
Factory<T> factoryFactory = new Factory<T>();
InternalFactory<? extends T> scopedFactory
= Scopes.scope(key, injector, factoryFactory, scoping);
return new ConstructorBindingImpl<T>(
injector, key, source, scopedFactory, scoping, factoryFactory);
}
public void initialize(InjectorImpl injector, Errors errors) throws ErrorsException {
factory.constructorInjector = injector.constructors.get(getKey().getTypeLiteral(), errors);
}
public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
checkState(factory.constructorInjector != null, "not initialized");
return visitor.visit(this);
}
public InjectionPoint getConstructor() {
checkState(factory.constructorInjector != null, "Binding is not ready");
return factory.constructorInjector.getConstructionProxy().getInjectionPoint();
}
public Set<InjectionPoint> getInjectableMembers() {
checkState(factory.constructorInjector != null, "Binding is not ready");
return factory.constructorInjector.getInjectableMembers();
}
public Set<Dependency<?>> getDependencies() {
return Dependency.forInjectionPoints(new ImmutableSet.Builder<InjectionPoint>()
.add(getConstructor())
.addAll(getInjectableMembers())
.build());
}
public void applyTo(Binder binder) {
throw new UnsupportedOperationException("This element represents a synthetic binding.");
}
@Override
public String toString() {
return new ToStringBuilder(ConstructorBinding.class)
.add("key", getKey())
.add("source", getSource())
.add("scope", getScoping())
.toString();
}
private static class Factory<T> implements InternalFactory<T> {
private ConstructorInjector<T> constructorInjector;
@SuppressWarnings("unchecked")
public T get(Errors errors, InternalContext context, Dependency<?> dependency)
throws ErrorsException {
checkState(constructorInjector != null, "Constructor not ready");
// This may not actually be safe because it could return a super type of T (if that's all the
// client needs), but it should be OK in practice thanks to the wonders of erasure.
return (T) constructorInjector.construct(errors, context, dependency.getKey().getRawType());
}
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_ConstructorBindingImpl.java |
17 | private class ResponseThreadRunnable implements Runnable {
private final BlockingQueue<TextCommand> blockingQueue = new ArrayBlockingQueue<TextCommand>(200);
private final Object stopObject = new Object();
@edu.umd.cs.findbugs.annotations.SuppressWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE")
public void sendResponse(TextCommand textCommand) {
blockingQueue.offer(textCommand);
}
@Override
public void run() {
while (running) {
try {
TextCommand textCommand = blockingQueue.take();
if (TextCommandConstants.TextCommandType.STOP == textCommand.getType()) {
synchronized (stopObject) {
stopObject.notify();
}
} else {
SocketTextWriter socketTextWriter = textCommand.getSocketTextWriter();
socketTextWriter.enqueue(textCommand);
}
} catch (InterruptedException e) {
return;
} catch (OutOfMemoryError e) {
OutOfMemoryErrorDispatcher.onOutOfMemory(e);
throw e;
}
}
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE")
void stop() {
running = false;
synchronized (stopObject) {
try {
blockingQueue.offer(new AbstractTextCommand(TextCommandConstants.TextCommandType.STOP) {
@Override
public boolean readFrom(ByteBuffer cb) {
return true;
}
@Override
public boolean writeTo(ByteBuffer bb) {
return true;
}
});
//noinspection WaitNotInLoop
stopObject.wait(1000);
} catch (Exception ignored) {
}
}
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_ascii_TextCommandServiceImpl.java |
1,482 | public class Hibernate3CacheKeySerializerHook
implements SerializerHook {
private static final String SKIP_INIT_MSG = "Hibernate3 not available, skipping serializer initialization";
private final Class<?> cacheKeyClass;
public Hibernate3CacheKeySerializerHook() {
Class<?> cacheKeyClass = null;
if (UnsafeHelper.UNSAFE_AVAILABLE) {
try {
cacheKeyClass = Class.forName("org.hibernate.cache.CacheKey");
} catch (Exception e) {
Logger.getLogger(Hibernate3CacheKeySerializerHook.class).finest(SKIP_INIT_MSG);
}
}
this.cacheKeyClass = cacheKeyClass;
}
@Override
public Class getSerializationType() {
return cacheKeyClass;
}
@Override
public Serializer createSerializer() {
if (cacheKeyClass != null) {
return new Hibernate3CacheKeySerializer();
}
return null;
}
@Override
public boolean isOverwritable() {
return true;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_serialization_Hibernate3CacheKeySerializerHook.java |
2,057 | public abstract class LockAwareOperation extends KeyBasedMapOperation implements WaitSupport {
protected LockAwareOperation(String name, Data dataKey) {
super(name, dataKey);
}
protected LockAwareOperation(String name, Data dataKey, long ttl) {
super(name, dataKey, ttl);
}
protected LockAwareOperation(String name, Data dataKey, Data dataValue, long ttl) {
super(name, dataKey, dataValue, ttl);
}
protected LockAwareOperation() {
}
public boolean shouldWait() {
return !recordStore.canAcquireLock(dataKey, getCallerUuid(), getThreadId());
}
public abstract void onWaitExpire();
public final WaitNotifyKey getWaitKey() {
return new LockWaitNotifyKey(new DefaultObjectNamespace(MapService.SERVICE_NAME, name), dataKey);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_map_operation_LockAwareOperation.java |
6,271 | public class IsTrueAssertion extends Assertion {
private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class);
public IsTrueAssertion(String field) {
super(field, true);
}
@Override
protected void doAssert(Object actualValue, Object expectedValue) {
logger.trace("assert that [{}] has a true value", actualValue);
String errorMessage = errorMessage();
assertThat(errorMessage, actualValue, notNullValue());
String actualString = actualValue.toString();
assertThat(errorMessage, actualString, not(equalTo("")));
assertThat(errorMessage, actualString, not(equalToIgnoringCase(Boolean.FALSE.toString())));
assertThat(errorMessage, actualString, not(equalTo("0")));
}
private String errorMessage() {
return "field [" + getField() + "] doesn't have a true value";
}
} | 1no label
| src_test_java_org_elasticsearch_test_rest_section_IsTrueAssertion.java |
334 | public static enum STATUS {
OPEN, CLOSED, IMPORTING
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_ODatabase.java |
2,040 | Factory<Method> METHODS = new Factory<Method>() {
public Method[] getMembers(Class<?> type) {
return type.getDeclaredMethods();
}
public InjectionPoint create(TypeLiteral<?> typeLiteral, Method member, Errors errors) {
checkForMisplacedBindingAnnotations(member, errors);
return new InjectionPoint(typeLiteral, member);
}
}; | 0true
| src_main_java_org_elasticsearch_common_inject_spi_InjectionPoint.java |
227 | XPostingsHighlighter highlighter = new XPostingsHighlighter(10000) {
@Override
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
assertThat(fields.length, equalTo(1));
assertThat(docids.length, equalTo(1));
String[][] contents = new String[1][1];
contents[0][0] = text;
return contents;
}
@Override
protected BreakIterator getBreakIterator(String field) {
return new WholeBreakIterator();
}
}; | 0true
| src_test_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighterTests.java |
169 | @Service("blURLHandlerService")
public class URLHandlerServiceImpl implements URLHandlerService {
private static final Log LOG = LogFactory.getLog(URLHandlerServiceImpl.class);
private final NullURLHandler NULL_URL_HANDLER = new NullURLHandler();
@Resource(name="blURLHandlerDao")
protected URLHandlerDao urlHandlerDao;
protected Cache urlHandlerCache;
/**
* Checks the passed in URL to determine if there is a matching URLHandler.
* Returns null if no handler was found.
*
* @param uri
* @return
*/
@Override
public URLHandler findURLHandlerByURI(String uri) {
URLHandler urlHandler = lookupHandlerFromCache(uri);
if (urlHandler instanceof NullURLHandler) {
return null;
} else {
return urlHandler;
}
}
private String buildKey(String requestUri) {
BroadleafRequestContext context = BroadleafRequestContext.getBroadleafRequestContext();
String key = requestUri;
if (context != null && context.getSandbox() != null) {
key = context.getSandbox().getId() + "_" + key;
}
return key;
}
private String buildKey(URLHandler urlHandler) {
BroadleafRequestContext context = BroadleafRequestContext.getBroadleafRequestContext();
String key = urlHandler.getIncomingURL();
if (context != null & context.getSandbox() != null) {
key = context.getSandbox().getId() + "_" + key;
}
return key;
}
private URLHandler lookupHandlerFromCache(String requestURI) {
String key =buildKey(requestURI);
URLHandler handler = getUrlHandlerFromCache(key);
if (handler == null) {
handler = findURLHandlerByURIInternal(requestURI);
getUrlHandlerCache().put(new Element(key, handler));
}
if (handler == null || handler instanceof NullURLHandler) {
return null;
} else {
return handler;
}
}
private Cache getUrlHandlerCache() {
if (urlHandlerCache == null) {
urlHandlerCache = CacheManager.getInstance().getCache("cmsUrlHandlerCache");
}
return urlHandlerCache;
}
private URLHandler getUrlHandlerFromCache(String key) {
Element cacheElement = getUrlHandlerCache().get(key);
if (cacheElement != null) {
return (URLHandler) cacheElement.getValue();
}
return null;
}
/**
* Call to evict an item from the cache.
* @param p
*/
public void removeURLHandlerFromCache(URLHandler urlhandler) {
getUrlHandlerCache().remove(buildKey(urlhandler));
}
protected URLHandler findURLHandlerByURIInternal(String uri) {
URLHandler urlHandler = urlHandlerDao.findURLHandlerByURI(uri);
if (urlHandler != null) {
return urlHandler;
} else {
return NULL_URL_HANDLER;
}
}
@Override
public List<URLHandler> findAllURLHandlers() {
return urlHandlerDao.findAllURLHandlers();
}
@Override
@Transactional("blTransactionManager")
public URLHandler saveURLHandler(URLHandler handler) {
return urlHandlerDao.saveURLHandler(handler);
}
} | 1no label
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_url_service_URLHandlerServiceImpl.java |
570 | @RunWith(HazelcastSerialClassRunner.class)
@Category(NightlyTest.class)
//TODO:
public class ClusterJoinTest {
@Before
@After
public void killAllHazelcastInstances() throws IOException {
Hazelcast.shutdownAll();
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIp1() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1");
c.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
h1.shutdown();
h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIp2() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig()
.addMember("127.0.0.1:5701")
.addMember("127.0.0.1:5702");
c.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
h1.shutdown();
h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIp3() throws Exception {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig()
.addMember("127.0.0.1:5701")
.addMember("127.0.0.1:5702");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
h1.getLifecycleService().shutdown();
h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testMulticast() throws Exception {
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testTcpIpWithDifferentBuildNumber() throws Exception {
System.setProperty("hazelcast.build", "1");
try {
Config c = new Config();
c.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
c.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
c.getNetworkConfig().getInterfaces().setEnabled(true);
c.getNetworkConfig().getJoin().getTcpIpConfig().addMember("127.0.0.1:5701");
c.getNetworkConfig().getInterfaces().addInterface("127.0.0.1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c);
assertEquals(1, h1.getCluster().getMembers().size());
System.setProperty("hazelcast.build", "2");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c);
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
} finally {
System.clearProperty("hazelcast.build");
}
}
@Test(timeout = 120000)
@Category(ProblematicTest.class)
public void testMulticastWithDifferentBuildNumber() throws Exception {
System.setProperty("hazelcast.build", "1");
try {
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(1, h1.getCluster().getMembers().size());
System.setProperty("hazelcast.build", "2");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
} finally {
System.clearProperty("hazelcast.build");
}
}
/**
* Test for the issue 184
* <p/>
* Hazelcas.newHazelcastInstance(new Config()) doesn't join the cluster.
* new Config() should be enough as the default config.
*/
@Test(timeout = 240000)
@Category(ProblematicTest.class)
public void testDefaultConfigCluster() {
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(1, h1.getCluster().getMembers().size());
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(new Config());
assertEquals(2, h1.getCluster().getMembers().size());
assertEquals(2, h2.getCluster().getMembers().size());
}
@Test
public void unresolvableHostName() {
Config config = new Config();
config.getGroupConfig().setName("abc");
config.getGroupConfig().setPassword("def");
JoinConfig join = config.getNetworkConfig().getJoin();
join.getMulticastConfig().setEnabled(false);
join.getTcpIpConfig().setEnabled(true);
join.getTcpIpConfig().setMembers(Arrays.asList(new String[]{"localhost", "nonexistinghost"}));
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
assertEquals(1, hz.getCluster().getMembers().size());
}
@Test
public void testNewInstanceByName() {
Config config = new Config();
config.setInstanceName("test");
HazelcastInstance hc1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance hc2 = Hazelcast.getHazelcastInstanceByName("test");
HazelcastInstance hc3 = Hazelcast.getHazelcastInstanceByName(hc1.getName());
assertTrue(hc1 == hc2);
assertTrue(hc1 == hc3);
}
@Test(expected = DuplicateInstanceNameException.class)
public void testNewInstanceByNameFail() {
Config config = new Config();
config.setInstanceName("test");
Hazelcast.newHazelcastInstance(config);
Hazelcast.newHazelcastInstance(config);
}
@Test
public void testMulticastJoinWithIncompatibleGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config1.getGroupConfig().setName("group1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setName("group2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testTcpIpJoinWithIncompatibleGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config1.getGroupConfig().setName("group1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setName("group2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testMulticastJoinWithIncompatiblePasswords() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config1.getGroupConfig().setPassword("pass1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setPassword("pass2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testTcpIpJoinWithIncompatiblePasswords() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config1.getGroupConfig().setPassword("pass1");
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getGroupConfig().setPassword("pass2");
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
@Category(ProblematicTest.class)
public void testJoinWithIncompatibleJoiners() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
@Category(ProblematicTest.class)
public void testMulticastJoinWithIncompatiblePartitionGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config1.getPartitionGroupConfig().setEnabled(true).setGroupType(PartitionGroupConfig.MemberGroupType.HOST_AWARE);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(true);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(false);
config2.getPartitionGroupConfig().setEnabled(false);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testTcpIpJoinWithIncompatiblePartitionGroups() throws Exception {
Config config1 = new Config();
config1.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config1.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config1.getPartitionGroupConfig().setEnabled(true).setGroupType(PartitionGroupConfig.MemberGroupType.CUSTOM);
HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config1);
Config config2 = new Config();
config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
config2.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true).addMember("127.0.0.1");
config2.getPartitionGroupConfig().setEnabled(true).setGroupType(PartitionGroupConfig.MemberGroupType.HOST_AWARE);
HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config2);
final int s1 = h1.getCluster().getMembers().size();
final int s2 = h2.getCluster().getMembers().size();
assertEquals(1, s1);
assertEquals(1, s2);
}
@Test
public void testMulticastJoinDuringSplitBrainHandlerRunning() throws InterruptedException {
Properties props = new Properties();
props.setProperty(GroupProperties.PROP_WAIT_SECONDS_BEFORE_JOIN, "5");
props.setProperty(GroupProperties.PROP_MERGE_FIRST_RUN_DELAY_SECONDS, "0");
props.setProperty(GroupProperties.PROP_MERGE_NEXT_RUN_DELAY_SECONDS, "0");
final CountDownLatch latch = new CountDownLatch(1);
Config config1 = new Config();
config1.getNetworkConfig().setPort(5901) ; // bigger port to make sure address.hashCode() check pass during merge!
config1.setProperties(props);
config1.addListenerConfig(new ListenerConfig(new LifecycleListener() {
public void stateChanged(final LifecycleEvent event) {
switch (event.getState()) {
case MERGING:
case MERGED:
latch.countDown();
default:
break;
}
}
}));
Hazelcast.newHazelcastInstance(config1);
Thread.sleep(5000);
Config config2 = new Config();
config2.getNetworkConfig().setPort(5701) ;
config2.setProperties(props);
Hazelcast.newHazelcastInstance(config2);
assertFalse("Latch should not be countdown!", latch.await(3, TimeUnit.SECONDS));
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_cluster_ClusterJoinTest.java |
1,431 | public class OChannelText extends OChannel {
public OChannelText(final Socket iSocket, final OContextConfiguration iConfig) throws IOException {
super(iSocket, iConfig);
}
/**
*
* @param iBuffer
* byte[] to fill
* @param iStartingPosition
* Offset to start to fill the buffer
* @param iContentLength
* Length of expected content to read
* @return total of bytes read
* @throws IOException
*/
public int read(final byte[] iBuffer, final int iStartingPosition, final int iContentLength) throws IOException {
int pos;
int read = 0;
pos = iStartingPosition;
for (int required = iContentLength; required > 0; required -= read) {
read = inStream.read(iBuffer, pos, required);
pos += read;
}
updateMetricReceivedBytes(read);
return pos - iStartingPosition;
}
public byte read() throws IOException {
updateMetricReceivedBytes(1);
return (byte) inStream.read();
}
public byte[] readBytes(final int iTotal) throws IOException {
final byte[] buffer = new byte[iTotal];
updateMetricReceivedBytes(iTotal);
inStream.read(buffer);
return buffer;
}
public OChannelText writeBytes(final byte[] iContent) throws IOException {
outStream.write(iContent);
updateMetricTransmittedBytes(iContent.length);
return this;
}
} | 0true
| enterprise_src_main_java_com_orientechnologies_orient_enterprise_channel_text_OChannelText.java |
868 | public interface OProcessor {
public Object process(OProcessorBlock iParent, Object iInput, OCommandContext iContext, final ODocument iOutput,
final boolean iReadOnly);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_processor_OProcessor.java |
2,962 | public class TurkishAnalyzerProvider extends AbstractIndexAnalyzerProvider<TurkishAnalyzer> {
private final TurkishAnalyzer analyzer;
@Inject
public TurkishAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
analyzer = new TurkishAnalyzer(version,
Analysis.parseStopWords(env, settings, TurkishAnalyzer.getDefaultStopSet(), version),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
}
@Override
public TurkishAnalyzer get() {
return this.analyzer;
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_TurkishAnalyzerProvider.java |
1,055 | public abstract class OCommandExecutorSQLSetAware extends OCommandExecutorSQLAbstract {
protected static final String KEYWORD_SET = "SET";
protected static final String KEYWORD_CONTENT = "CONTENT";
protected ODocument content = null;
protected int parameterCounter = 0;
protected void parseContent() {
if (!parserIsEnded() && !parserGetLastWord().equals(KEYWORD_WHERE)) {
final String contentAsString = parserRequiredWord(false, "Content expected").trim();
content = new ODocument().fromJSON(contentAsString);
parserSkipWhiteSpaces();
}
if (content == null)
throwSyntaxErrorException("Content not provided. Example: CONTENT { \"name\": \"Jay\" }");
}
protected void parseSetFields(final Map<String, Object> fields) {
String fieldName;
String fieldValue;
while (!parserIsEnded() && (fields.size() == 0 || parserGetLastSeparator() == ',' || parserGetCurrentChar() == ',')) {
fieldName = parserRequiredWord(false, "Field name expected");
if (fieldName.equalsIgnoreCase(KEYWORD_WHERE)) {
parserGoBack();
break;
}
parserNextChars(false, true, "=");
fieldValue = parserRequiredWord(false, "Value expected", " =><,\r\n");
// INSERT TRANSFORMED FIELD VALUE
fields.put(fieldName, getFieldValueCountingParameters(fieldValue));
parserSkipWhiteSpaces();
}
if (fields.size() == 0)
throwParsingException("Entries to set <field> = <value> are missed. Example: name = 'Bill', salary = 300.2");
}
protected Object getFieldValueCountingParameters(String fieldValue) {
if (fieldValue.trim().equals("?"))
parameterCounter++;
return OSQLHelper.parseValue(this, fieldValue, context);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLSetAware.java |
1,391 | Collection<IndexWarmersMetaData.Entry> filteredWarmers = Collections2.filter(indexWarmersMetaData.entries(), new Predicate<IndexWarmersMetaData.Entry>() {
@Override
public boolean apply(IndexWarmersMetaData.Entry warmer) {
if (warmers.length != 0 && types.length != 0) {
return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types());
} else if (warmers.length != 0) {
return Regex.simpleMatch(warmers, warmer.name());
} else if (types.length != 0) {
return Regex.simpleMatch(types, warmer.types());
} else {
return true;
}
}
}); | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaData.java |
1,328 | @ClusterScope(scope=Scope.TEST, numNodes=0)
public class SimpleDataNodesTests extends ElasticsearchIntegrationTest {
@Test
public void testDataNodes() throws Exception {
cluster().startNode(settingsBuilder().put("node.data", false).build());
client().admin().indices().create(createIndexRequest("test")).actionGet();
try {
client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
fail("no allocation should happen");
} catch (UnavailableShardsException e) {
// all is well
}
cluster().startNode(settingsBuilder().put("node.data", false).build());
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
// still no shard should be allocated
try {
client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
fail("no allocation should happen");
} catch (UnavailableShardsException e) {
// all is well
}
// now, start a node data, and see that it gets with shards
cluster().startNode(settingsBuilder().put("node.data", true).build());
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
assertThat(indexResponse.getId(), equalTo("1"));
assertThat(indexResponse.getType(), equalTo("type1"));
}
private String source(String id, String nameValue) {
return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
}
} | 0true
| src_test_java_org_elasticsearch_cluster_SimpleDataNodesTests.java |
418 | runConflictingTx(new TxJob() {
@Override
public void run(IndexTransaction tx) {
tx.add(defStore, defDoc, TEXT, revisedText, false);
}
}, new TxJob() { | 0true
| titan-test_src_main_java_com_thinkaurelius_titan_diskstorage_indexing_IndexProviderTest.java |
1,634 | public static final Validator TIME = new Validator() {
@Override
public String validate(String setting, String value) {
try {
if (TimeValue.parseTimeValue(value, null) == null) {
return "cannot parse value [" + value + "] as time";
}
} catch (ElasticsearchParseException ex) {
return "cannot parse value [" + value + "] as time";
}
return null;
}
}; | 0true
| src_main_java_org_elasticsearch_cluster_settings_Validator.java |
145 | public class AtomicDouble extends Number implements java.io.Serializable {
private static final long serialVersionUID = -8405198993435143622L;
private transient volatile long value;
/**
* Creates a new {@code AtomicDouble} with the given initial value.
*
* @param initialValue the initial value
*/
public AtomicDouble(double initialValue) {
value = doubleToRawLongBits(initialValue);
}
/**
* Creates a new {@code AtomicDouble} with initial value {@code 0.0}.
*/
public AtomicDouble() {
// assert doubleToRawLongBits(0.0) == 0L;
}
/**
* Gets the current value.
*
* @return the current value
*/
public final double get() {
return longBitsToDouble(value);
}
/**
* Sets to the given value.
*
* @param newValue the new value
*/
public final void set(double newValue) {
long next = doubleToRawLongBits(newValue);
value = next;
}
/**
* Eventually sets to the given value.
*
* @param newValue the new value
*/
public final void lazySet(double newValue) {
long next = doubleToRawLongBits(newValue);
unsafe.putOrderedLong(this, valueOffset, next);
}
/**
* Atomically sets to the given value and returns the old value.
*
* @param newValue the new value
* @return the previous value
*/
public final double getAndSet(double newValue) {
long next = doubleToRawLongBits(newValue);
while (true) {
long current = value;
if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
return longBitsToDouble(current);
}
}
/**
* Atomically sets the value to the given updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful. False return indicates that
* the actual value was not bitwise equal to the expected value.
*/
public final boolean compareAndSet(double expect, double update) {
return unsafe.compareAndSwapLong(this, valueOffset,
doubleToRawLongBits(expect),
doubleToRawLongBits(update));
}
/**
* Atomically sets the value to the given updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* <p><a
* href="http://download.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/package-summary.html#Spurious">
* May fail spuriously and does not provide ordering guarantees</a>,
* so is only rarely an appropriate alternative to {@code compareAndSet}.
*
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful
*/
public final boolean weakCompareAndSet(double expect, double update) {
return compareAndSet(expect, update);
}
/**
* Atomically adds the given value to the current value.
*
* @param delta the value to add
* @return the previous value
*/
public final double getAndAdd(double delta) {
while (true) {
long current = value;
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
return currentVal;
}
}
/**
* Atomically adds the given value to the current value.
*
* @param delta the value to add
* @return the updated value
*/
public final double addAndGet(double delta) {
while (true) {
long current = value;
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (unsafe.compareAndSwapLong(this, valueOffset, current, next))
return nextVal;
}
}
/**
* Returns the String representation of the current value.
* @return the String representation of the current value
*/
public String toString() {
return Double.toString(get());
}
/**
* Returns the value of this {@code AtomicDouble} as an {@code int}
* after a narrowing primitive conversion.
*/
public int intValue() {
return (int) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code long}
* after a narrowing primitive conversion.
*/
public long longValue() {
return (long) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code float}
* after a narrowing primitive conversion.
*/
public float floatValue() {
return (float) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code double}.
*/
public double doubleValue() {
return get();
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @param s the stream
* @throws java.io.IOException if an I/O error occurs
* @serialData The current value is emitted (a {@code double}).
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeDouble(get());
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
* @throws ClassNotFoundException if the class of a serialized object
* could not be found
* @throws java.io.IOException if an I/O error occurs
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
set(s.readDouble());
}
// Unsafe mechanics
private static final sun.misc.Unsafe unsafe = getUnsafe();
private static final long valueOffset;
static {
try {
valueOffset = unsafe.objectFieldOffset
(AtomicDouble.class.getDeclaredField("value"));
} catch (Exception ex) { throw new Error(ex); }
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
} | 0true
| src_main_java_jsr166e_extra_AtomicDouble.java |
801 | public static class CriteriaOfferXrefPK implements Serializable {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@ManyToOne(targetEntity = OfferImpl.class, optional=false)
@JoinColumn(name = "OFFER_ID")
protected Offer offer = new OfferImpl();
@ManyToOne(targetEntity = OfferItemCriteriaImpl.class, optional=false)
@JoinColumn(name = "OFFER_ITEM_CRITERIA_ID")
protected OfferItemCriteria offerCriteria = new OfferItemCriteriaImpl();
public Offer getOffer() {
return offer;
}
public void setOffer(Offer offer) {
this.offer = offer;
}
public OfferItemCriteria getOfferCriteria() {
return offerCriteria;
}
public void setOfferCriteria(OfferItemCriteria offerCriteria) {
this.offerCriteria = offerCriteria;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((offer == null) ? 0 : offer.hashCode());
result = prime * result + ((offerCriteria == null) ? 0 : offerCriteria.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CriteriaOfferXrefPK other = (CriteriaOfferXrefPK) obj;
if (offer == null) {
if (other.offer != null)
return false;
} else if (!offer.equals(other.offer))
return false;
if (offerCriteria == null) {
if (other.offerCriteria != null)
return false;
} else if (!offerCriteria.equals(other.offerCriteria))
return false;
return true;
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_CriteriaOfferXref.java |
701 | constructors[TXN_LIST_ADD] = new ConstructorFunction<Integer, Portable>() {
public Portable createNew(Integer arg) {
return new TxnListAddRequest();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_collection_CollectionPortableHook.java |
710 | .newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
thread.setName("Write Cache Flush Task");
return thread;
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_hashindex_local_cache_OWOWCache.java |
764 | @Test(enabled = false)
public class OSBTreeBonsaiWAL extends OSBTreeBonsaiTest {
private String buildDirectory;
private String actualStorageDir;
private String expectedStorageDir;
private OWriteAheadLog writeAheadLog;
private ODiskCache actualDiskCache;
private ODiskCache expectedDiskCache;
private OLocalPaginatedStorage actualStorage;
private OSBTreeBonsai<Integer, OIdentifiable> expectedSBTree;
@BeforeClass
@Override
public void beforeClass() {
}
@AfterClass
@Override
public void afterClass() {
}
@BeforeMethod
public void beforeMethod() throws IOException {
buildDirectory = System.getProperty("buildDirectory", ".");
buildDirectory += "/sbtreeWithWALTest";
createExpectedSBTree();
createActualSBTree();
}
@AfterMethod
@Override
public void afterMethod() throws Exception {
sbTree.delete();
expectedSBTree.delete();
actualDiskCache.delete();
expectedDiskCache.delete();
writeAheadLog.delete();
Assert.assertTrue(new File(actualStorageDir).delete());
Assert.assertTrue(new File(expectedStorageDir).delete());
Assert.assertTrue(new File(buildDirectory).delete());
}
private void createActualSBTree() throws IOException {
actualStorage = mock(OLocalPaginatedStorage.class);
OStorageConfiguration storageConfiguration = mock(OStorageConfiguration.class);
storageConfiguration.clusters = new ArrayList<OStorageClusterConfiguration>();
storageConfiguration.fileTemplate = new OStorageSegmentConfiguration();
actualStorageDir = buildDirectory + "/sbtreeWithWALTestActual";
when(actualStorage.getStoragePath()).thenReturn(actualStorageDir);
when(actualStorage.getName()).thenReturn("sbtreeWithWALTesActual");
File buildDir = new File(buildDirectory);
if (!buildDir.exists())
buildDir.mkdirs();
File actualStorageDirFile = new File(actualStorageDir);
if (!actualStorageDirFile.exists())
actualStorageDirFile.mkdirs();
writeAheadLog = new OWriteAheadLog(6000, -1, 10 * 1024L * OWALPage.PAGE_SIZE, 100L * 1024 * 1024 * 1024, actualStorage);
actualDiskCache = new OReadWriteDiskCache(400L * 1024 * 1024 * 1024, 1648L * 1024 * 1024,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024, 1000000, 100, actualStorage, null, false, false);
OStorageVariableParser variableParser = new OStorageVariableParser(actualStorageDir);
when(actualStorage.getStorageTransaction()).thenReturn(null);
when(actualStorage.getDiskCache()).thenReturn(actualDiskCache);
when(actualStorage.getWALInstance()).thenReturn(writeAheadLog);
when(actualStorage.getVariableParser()).thenReturn(variableParser);
when(actualStorage.getConfiguration()).thenReturn(storageConfiguration);
when(actualStorage.getMode()).thenReturn("rw");
when(storageConfiguration.getDirectory()).thenReturn(actualStorageDir);
sbTree = new OSBTreeBonsai<Integer, OIdentifiable>(".sbt", 1, false);
sbTree.create("actualSBTree", OIntegerSerializer.INSTANCE, OLinkSerializer.INSTANCE, actualStorage);
}
private void createExpectedSBTree() {
final OLocalPaginatedStorage expectedStorage = mock(OLocalPaginatedStorage.class);
OStorageConfiguration storageConfiguration = mock(OStorageConfiguration.class);
storageConfiguration.clusters = new ArrayList<OStorageClusterConfiguration>();
storageConfiguration.fileTemplate = new OStorageSegmentConfiguration();
expectedStorageDir = buildDirectory + "/sbtreeWithWALTestExpected";
when(expectedStorage.getStoragePath()).thenReturn(expectedStorageDir);
when(expectedStorage.getName()).thenReturn("sbtreeWithWALTesExpected");
File buildDir = new File(buildDirectory);
if (!buildDir.exists())
buildDir.mkdirs();
File expectedStorageDirFile = new File(expectedStorageDir);
if (!expectedStorageDirFile.exists())
expectedStorageDirFile.mkdirs();
expectedDiskCache = new OReadWriteDiskCache(400L * 1024 * 1024 * 1024, 1648L * 1024 * 1024,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024, 1000000, 100, expectedStorage, null, false, false);
OStorageVariableParser variableParser = new OStorageVariableParser(expectedStorageDir);
when(expectedStorage.getStorageTransaction()).thenReturn(null);
when(expectedStorage.getDiskCache()).thenReturn(expectedDiskCache);
when(expectedStorage.getWALInstance()).thenReturn(null);
when(expectedStorage.getVariableParser()).thenReturn(variableParser);
when(expectedStorage.getConfiguration()).thenReturn(storageConfiguration);
when(expectedStorage.getMode()).thenReturn("rw");
when(storageConfiguration.getDirectory()).thenReturn(expectedStorageDir);
expectedSBTree = new OSBTreeBonsai<Integer, OIdentifiable>(".sbt", 1, false);
expectedSBTree.create("expectedSBTree", OIntegerSerializer.INSTANCE, OLinkSerializer.INSTANCE, expectedStorage);
}
@Override
public void testKeyPut() throws Exception {
super.testKeyPut();
assertFileRestoreFromWAL();
}
@Override
public void testKeyPutRandomUniform() throws Exception {
super.testKeyPutRandomUniform();
assertFileRestoreFromWAL();
}
@Override
public void testKeyPutRandomGaussian() throws Exception {
super.testKeyPutRandomGaussian();
assertFileRestoreFromWAL();
}
@Override
public void testKeyDeleteRandomUniform() throws Exception {
super.testKeyDeleteRandomUniform();
assertFileRestoreFromWAL();
}
@Override
public void testKeyDeleteRandomGaussian() throws Exception {
super.testKeyDeleteRandomGaussian();
assertFileRestoreFromWAL();
}
@Override
public void testKeyDelete() throws Exception {
super.testKeyDelete();
assertFileRestoreFromWAL();
}
@Override
public void testKeyAddDelete() throws Exception {
super.testKeyAddDelete();
assertFileRestoreFromWAL();
}
@Override
public void testAddKeyValuesInTwoBucketsAndMakeFirstEmpty() throws Exception {
super.testAddKeyValuesInTwoBucketsAndMakeFirstEmpty();
assertFileRestoreFromWAL();
}
@Override
public void testAddKeyValuesInTwoBucketsAndMakeLastEmpty() throws Exception {
super.testAddKeyValuesInTwoBucketsAndMakeLastEmpty();
assertFileRestoreFromWAL();
}
@Override
public void testAddKeyValuesAndRemoveFirstMiddleAndLastPages() throws Exception {
super.testAddKeyValuesAndRemoveFirstMiddleAndLastPages();
assertFileRestoreFromWAL();
}
@Test(enabled = false)
@Override
public void testValuesMajor() {
super.testValuesMajor();
}
@Test(enabled = false)
@Override
public void testValuesMinor() {
super.testValuesMinor();
}
@Test(enabled = false)
@Override
public void testValuesBetween() {
super.testValuesBetween();
}
private void assertFileRestoreFromWAL() throws IOException {
sbTree.close();
writeAheadLog.close();
expectedSBTree.close();
actualDiskCache.clear();
restoreDataFromWAL();
expectedDiskCache.clear();
assertFileContentIsTheSame(expectedSBTree.getName(), sbTree.getName());
}
private void restoreDataFromWAL() throws IOException {
OWriteAheadLog log = new OWriteAheadLog(4, -1, 10 * 1024L * OWALPage.PAGE_SIZE, 100L * 1024 * 1024 * 1024, actualStorage);
OLogSequenceNumber lsn = log.begin();
List<OWALRecord> atomicUnit = new ArrayList<OWALRecord>();
boolean atomicChangeIsProcessed = false;
while (lsn != null) {
OWALRecord walRecord = log.read(lsn);
atomicUnit.add(walRecord);
if (!atomicChangeIsProcessed) {
Assert.assertTrue(walRecord instanceof OAtomicUnitStartRecord);
atomicChangeIsProcessed = true;
} else if (walRecord instanceof OAtomicUnitEndRecord) {
atomicChangeIsProcessed = false;
for (OWALRecord restoreRecord : atomicUnit) {
if (restoreRecord instanceof OAtomicUnitStartRecord || restoreRecord instanceof OAtomicUnitEndRecord)
continue;
final OUpdatePageRecord updatePageRecord = (OUpdatePageRecord) restoreRecord;
final long fileId = updatePageRecord.getFileId();
final long pageIndex = updatePageRecord.getPageIndex();
if (!expectedDiskCache.isOpen(fileId))
expectedDiskCache.openFile(fileId);
final OCacheEntry cacheEntry = expectedDiskCache.load(fileId, pageIndex, true);
final OCachePointer cachePointer = cacheEntry.getCachePointer();
cachePointer.acquireExclusiveLock();
try {
ODurablePage durablePage = new ODurablePage(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE);
durablePage.restoreChanges(updatePageRecord.getChanges());
durablePage.setLsn(updatePageRecord.getLsn());
cacheEntry.markDirty();
} finally {
cachePointer.releaseExclusiveLock();
expectedDiskCache.release(cacheEntry);
}
}
atomicUnit.clear();
} else {
Assert.assertTrue(walRecord instanceof OUpdatePageRecord);
}
lsn = log.next(lsn);
}
Assert.assertTrue(atomicUnit.isEmpty());
log.close();
}
private void assertFileContentIsTheSame(String expectedBTree, String actualBTree) throws IOException {
File expectedFile = new File(expectedStorageDir, expectedBTree + ".sbt");
RandomAccessFile fileOne = new RandomAccessFile(expectedFile, "r");
RandomAccessFile fileTwo = new RandomAccessFile(new File(actualStorageDir, actualBTree + ".sbt"), "r");
Assert.assertEquals(fileOne.length(), fileTwo.length());
byte[] expectedContent = new byte[OClusterPage.PAGE_SIZE];
byte[] actualContent = new byte[OClusterPage.PAGE_SIZE];
fileOne.seek(OAbstractFile.HEADER_SIZE);
fileTwo.seek(OAbstractFile.HEADER_SIZE);
int bytesRead = fileOne.read(expectedContent);
while (bytesRead >= 0) {
fileTwo.readFully(actualContent, 0, bytesRead);
Assert.assertEquals(expectedContent, actualContent);
expectedContent = new byte[OClusterPage.PAGE_SIZE];
actualContent = new byte[OClusterPage.PAGE_SIZE];
bytesRead = fileOne.read(expectedContent);
}
fileOne.close();
fileTwo.close();
}
} | 1no label
| core_src_test_java_com_orientechnologies_orient_core_index_sbtreebonsai_local_OSBTreeBonsaiWAL.java |
1,689 | runnable = new Runnable() { public void run() { map.putIfAbsent(null, "value"); } }; | 0true
| hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java |
1,157 | public class SimpleTimeBenchmark {
private static boolean USE_NANO_TIME = false;
private static long NUMBER_OF_ITERATIONS = 1000000;
private static int NUMBER_OF_THREADS = 100;
public static void main(String[] args) throws Exception {
StopWatch stopWatch = new StopWatch().start();
System.out.println("Running " + NUMBER_OF_ITERATIONS);
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
System.currentTimeMillis();
}
System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
if (USE_NANO_TIME) {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
System.nanoTime();
}
} else {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
System.currentTimeMillis();
}
}
latch.countDown();
}
});
}
stopWatch = new StopWatch().start();
for (Thread thread : threads) {
thread.start();
}
latch.await();
stopWatch.stop();
System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_time_SimpleTimeBenchmark.java |
3,039 | public static final class Defaults {
public static final float MAX_SATURATION = 0.1f;
public static final float SATURATION_LIMIT = 0.9f;
} | 0true
| src_main_java_org_elasticsearch_index_codec_postingsformat_BloomFilterLucenePostingsFormatProvider.java |
3,009 | public static class StubIndexService implements IndexService {
private final MapperService mapperService;
public StubIndexService(MapperService mapperService) {
this.mapperService = mapperService;
}
@Override
public Injector injector() {
return null;
}
@Override
public IndexGateway gateway() {
return null;
}
@Override
public IndexCache cache() {
return null;
}
@Override
public IndexFieldDataService fieldData() {
return null;
}
@Override
public IndexSettingsService settingsService() {
return null;
}
@Override
public AnalysisService analysisService() {
return null;
}
@Override
public MapperService mapperService() {
return mapperService;
}
@Override
public IndexQueryParserService queryParserService() {
return null;
}
@Override
public SimilarityService similarityService() {
return null;
}
@Override
public IndexAliasesService aliasesService() {
return null;
}
@Override
public IndexEngine engine() {
return null;
}
@Override
public IndexStore store() {
return null;
}
@Override
public IndexShard createShard(int sShardId) throws ElasticsearchException {
return null;
}
@Override
public void removeShard(int shardId, String reason) throws ElasticsearchException {
}
@Override
public int numberOfShards() {
return 0;
}
@Override
public ImmutableSet<Integer> shardIds() {
return null;
}
@Override
public boolean hasShard(int shardId) {
return false;
}
@Override
public IndexShard shard(int shardId) {
return null;
}
@Override
public IndexShard shardSafe(int shardId) throws IndexShardMissingException {
return null;
}
@Override
public Injector shardInjector(int shardId) {
return null;
}
@Override
public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException {
return null;
}
@Override
public String indexUUID() {
return IndexMetaData.INDEX_UUID_NA_VALUE;
}
@Override
public Index index() {
return null;
}
@Override
public Iterator<IndexShard> iterator() {
return null;
}
} | 0true
| src_test_java_org_elasticsearch_index_cache_id_SimpleIdCacheTests.java |
3,263 | MIN {
/**
* Equivalent to {@link Math#min(double, double)}
*/
@Override
public double apply(double a, double b) {
return Math.min(a, b);
}
/**
* Equivalent to {@link Math#min(long, long)}
*/
@Override
public long apply(long a, long b) {
return Math.min(a, b);
}
/**
* Returns {@link Double#POSITIVE_INFINITY}
*/
@Override
public double startDouble() {
return Double.POSITIVE_INFINITY;
}
/**
* Returns {@link Long#MAX_VALUE}
*/
@Override
public long startLong() {
return Long.MAX_VALUE;
}
/**
* Returns the first value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
* has no values.
*/
@Override
public double getRelevantValue(DoubleValues values, int docId, double defaultValue) {
assert values.getOrder() != AtomicFieldData.Order.NONE;
if (values.setDocument(docId) > 0) {
return values.nextValue();
}
return defaultValue;
}
/**
* Returns the first value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
* has no values.
*/
@Override
public long getRelevantValue(LongValues values, int docId, long defaultValue) {
assert values.getOrder() != AtomicFieldData.Order.NONE;
if (values.setDocument(docId) > 0) {
return values.nextValue();
}
return defaultValue;
}
/**
* Returns the first value returned for the given <tt>docId</tt> or the <tt>defaultValue</tt> if the document
* has no values.
*/
@Override
public BytesRef getRelevantValue(BytesValues values, int docId, BytesRef defaultValue) {
assert values.getOrder() != AtomicFieldData.Order.NONE;
if (values.setDocument(docId) > 0) {
return values.nextValue();
}
return defaultValue;
}
}, | 0true
| src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_SortMode.java |
3,188 | interface Builder {
IndexFieldData build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
CircuitBreakerService breakerService);
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_IndexFieldData.java |
1,261 | addOperation(operations, new Runnable() {
public void run() {
IMap map = hazelcast.getMap("myMap");
final CountDownLatch latch = new CountDownLatch(1);
EntryListener listener = new EntryAdapter() {
@Override
public void onEntryEvent(EntryEvent event) {
latch.countDown();
}
};
String id = map.addEntryListener(listener, true);
try {
latch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
map.removeEntryListener(id);
}
}, 1); | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
24 | public class NetworkBufferConstants {
public final static String HTTP_PROTOCOL = "http://";
public final static char DELIMITER = '/';
public final static char PORT_DELIMITER = ':';
public final static String GET_DATA_COMMAND = "requestData";
public final static String FEED_ID_PARAMETER = "feeds";
public final static String START_TIME_PARAMETER = "startTime";
public final static String END_TIME_PARAMETER = "endTime";
public final static char PARAMETER_DELIMITER = ',';
public final static String constructURL(String host, int port, String command) {
StringBuilder sb = new StringBuilder(HTTP_PROTOCOL);
sb.append(host);
sb.append(PORT_DELIMITER);
sb.append(port);
sb.append(DELIMITER);
sb.append(command);
sb.append(DELIMITER);
return sb.toString();
}
} | 0true
| timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_config_NetworkBufferConstants.java |
630 | public class TcpIpJoiner extends AbstractJoiner {
private static final int MAX_PORT_TRIES = 3;
private volatile boolean claimingMaster = false;
public TcpIpJoiner(Node node) {
super(node);
}
private void joinViaTargetMember(AtomicBoolean joined, Address targetAddress, long maxJoinMillis) {
try {
if (targetAddress == null) {
throw new IllegalArgumentException("Invalid target address -> NULL");
}
if (logger.isFinestEnabled()) {
logger.finest("Joining over target member " + targetAddress);
}
if (targetAddress.equals(node.getThisAddress()) || isLocalAddress(targetAddress)) {
node.setAsMaster();
return;
}
long joinStartTime = Clock.currentTimeMillis();
Connection connection = null;
while (node.isActive() && !joined.get() && (Clock.currentTimeMillis() - joinStartTime < maxJoinMillis)) {
connection = node.connectionManager.getOrConnect(targetAddress);
if (connection == null) {
//noinspection BusyWait
Thread.sleep(2000L);
continue;
}
if (logger.isFinestEnabled()) {
logger.finest("Sending joinRequest " + targetAddress);
}
node.clusterService.sendJoinRequest(targetAddress, true);
//noinspection BusyWait
Thread.sleep(3000L);
}
} catch (final Exception e) {
logger.warning(e);
}
}
public static class MasterClaim extends AbstractOperation implements JoinOperation {
private transient boolean approvedAsMaster = false;
@Override
public void run() {
final NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
Node node = nodeEngine.getNode();
Joiner joiner = node.getJoiner();
final ILogger logger = node.getLogger(getClass().getName());
if (joiner instanceof TcpIpJoiner) {
TcpIpJoiner tcpIpJoiner = (TcpIpJoiner) joiner;
final Address endpoint = getCallerAddress();
final Address masterAddress = node.getMasterAddress();
approvedAsMaster = !tcpIpJoiner.claimingMaster && !node.isMaster()
&& (masterAddress == null || masterAddress.equals(endpoint));
} else {
approvedAsMaster = false;
logger.warning("This node requires MulticastJoin strategy!");
}
if (logger.isFinestEnabled()) {
logger.finest("Sending '" + approvedAsMaster + "' for master claim of node: " + getCallerAddress());
}
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
public Object getResponse() {
return approvedAsMaster;
}
}
//todo: why are we passing argument if not used?
private void joinViaPossibleMembers(AtomicBoolean joined) {
try {
node.getFailedConnections().clear();
final Collection<Address> colPossibleAddresses = getPossibleAddresses();
colPossibleAddresses.remove(node.getThisAddress());
for (final Address possibleAddress : colPossibleAddresses) {
logger.info("Connecting to possible member: " + possibleAddress);
node.connectionManager.getOrConnect(possibleAddress);
}
boolean foundConnection = false;
int numberOfSeconds = 0;
final int connectionTimeoutSeconds = getConnTimeoutSeconds();
while (!foundConnection && numberOfSeconds < connectionTimeoutSeconds) {
if (logger.isFinestEnabled()) {
logger.finest("Removing failedConnections: " + node.getFailedConnections());
}
colPossibleAddresses.removeAll(node.getFailedConnections());
if (colPossibleAddresses.size() == 0) {
break;
}
if (logger.isFinestEnabled()) {
logger.finest("We are going to try to connect to each address" + colPossibleAddresses);
}
for (Address possibleAddress : colPossibleAddresses) {
final Connection conn = node.connectionManager.getOrConnect(possibleAddress);
if (conn != null) {
foundConnection = true;
if (logger.isFinestEnabled()) {
logger.finest("Found a connection and sending join request to " + possibleAddress);
}
node.clusterService.sendJoinRequest(possibleAddress, true);
}
}
if (!foundConnection) {
Thread.sleep(1000L);
numberOfSeconds++;
}
}
if (logger.isFinestEnabled()) {
logger.finest("FOUND " + foundConnection);
}
if (!foundConnection) {
logger.finest("This node will assume master role since no possible member where connected to.");
node.setAsMaster();
} else {
if (!node.joined()) {
final int totalSleep = connectionTimeoutSeconds - numberOfSeconds;
for (int i = 0; i < totalSleep * 2 && !node.joined(); i++) {
logger.finest("Waiting for join request answer, sleeping for 500 ms...");
Thread.sleep(500L);
Address masterAddress = node.getMasterAddress();
if (masterAddress != null) {
if (logger.isFinestEnabled()) {
logger.finest("Sending join request to " + masterAddress);
}
node.clusterService.sendJoinRequest(masterAddress, true);
}
}
colPossibleAddresses.removeAll(node.getFailedConnections());
if (colPossibleAddresses.size() == 0) {
logger.finest("This node will assume master role since none of the possible members accepted join request.");
node.setAsMaster();
} else if (!node.joined()) {
boolean masterCandidate = true;
for (Address address : colPossibleAddresses) {
if (node.connectionManager.getConnection(address) != null) {
if (node.getThisAddress().hashCode() > address.hashCode()) {
masterCandidate = false;
}
}
}
if (masterCandidate) {
// ask others...
claimingMaster = true;
Collection<Future<Boolean>> responses = new LinkedList<Future<Boolean>>();
for (Address address : colPossibleAddresses) {
if (node.getConnectionManager().getConnection(address) != null) {
logger.finest("Claiming myself as master node!");
Future future = node.nodeEngine.getOperationService().createInvocationBuilder(
ClusterServiceImpl.SERVICE_NAME, new MasterClaim(), address)
.setTryCount(1).invoke();
responses.add(future);
}
}
final long maxWait = TimeUnit.SECONDS.toMillis(10);
long waitTime = 0L;
boolean allApprovedAsMaster = true;
for (Future<Boolean> response : responses) {
if (!allApprovedAsMaster || waitTime > maxWait) {
allApprovedAsMaster = false;
break;
}
long t = Clock.currentTimeMillis();
try {
allApprovedAsMaster &= response.get(1, TimeUnit.SECONDS);
} catch (Exception e) {
logger.finest(e);
allApprovedAsMaster = false;
} finally {
waitTime += (Clock.currentTimeMillis() - t);
}
}
if (allApprovedAsMaster) {
if (logger.isFinestEnabled()) {
logger.finest(node.getThisAddress() + " Setting myself as master! group "
+ node.getConfig().getGroupConfig().getName() + " possible addresses "
+ colPossibleAddresses.size() + " " + colPossibleAddresses);
}
node.setAsMaster();
return;
} else {
lookForMaster(colPossibleAddresses);
}
} else {
lookForMaster(colPossibleAddresses);
}
}
}
}
colPossibleAddresses.clear();
node.getFailedConnections().clear();
} catch (Throwable t) {
logger.severe(t);
}
}
protected int getConnTimeoutSeconds() {
return config.getNetworkConfig().getJoin().getTcpIpConfig().getConnectionTimeoutSeconds();
}
private void lookForMaster(Collection<Address> colPossibleAddresses) throws InterruptedException {
int tryCount = 0;
claimingMaster = false;
while (!node.joined() && tryCount++ < 20 && (node.getMasterAddress() == null)) {
connectAndSendJoinRequest(colPossibleAddresses);
//noinspection BusyWait
Thread.sleep(1000L);
}
int requestCount = 0;
colPossibleAddresses.removeAll(node.getFailedConnections());
if (colPossibleAddresses.size() == 0) {
node.setAsMaster();
if (logger.isFinestEnabled()) {
logger.finest(node.getThisAddress() + " Setting myself as master! group " + node.getConfig().getGroupConfig().getName()
+ " no possible addresses without failed connection");
}
return;
}
if (logger.isFinestEnabled()) {
logger.finest(node.getThisAddress() + " joining to master " + node.getMasterAddress() + ", group " + node.getConfig().getGroupConfig().getName());
}
while (node.isActive() && !node.joined()) {
//noinspection BusyWait
Thread.sleep(1000L);
final Address master = node.getMasterAddress();
if (master != null) {
node.clusterService.sendJoinRequest(master, true);
if (requestCount++ > node.getGroupProperties().MAX_WAIT_SECONDS_BEFORE_JOIN.getInteger() + 10) {
logger.warning("Couldn't join to the master : " + master);
return;
}
} else {
if (logger.isFinestEnabled()) {
logger.finest(node.getThisAddress() + " couldn't find a master! but there was connections available: " + colPossibleAddresses);
}
return;
}
}
}
private Address getRequiredMemberAddress() {
final TcpIpConfig tcpIpConfig = config.getNetworkConfig().getJoin().getTcpIpConfig();
final String host = tcpIpConfig.getRequiredMember();
try {
final AddressHolder addressHolder = AddressUtil.getAddressHolder(host, config.getNetworkConfig().getPort());
if (AddressUtil.isIpAddress(addressHolder.getAddress())) {
return new Address(addressHolder.getAddress(), addressHolder.getPort());
} else {
final InterfacesConfig interfaces = config.getNetworkConfig().getInterfaces();
if (interfaces.isEnabled()) {
final InetAddress[] inetAddresses = InetAddress.getAllByName(addressHolder.getAddress());
if (inetAddresses.length > 1) {
for (InetAddress inetAddress : inetAddresses) {
if (AddressUtil.matchAnyInterface(inetAddress.getHostAddress(),
interfaces.getInterfaces())) {
return new Address(inetAddress, addressHolder.getPort());
}
}
} else {
final InetAddress inetAddress = inetAddresses[0];
if (AddressUtil.matchAnyInterface(inetAddress.getHostAddress(),
interfaces.getInterfaces())) {
return new Address(addressHolder.getAddress(), addressHolder.getPort());
}
}
} else {
return new Address(addressHolder.getAddress(), addressHolder.getPort());
}
}
} catch (final Exception e) {
logger.warning(e);
}
return null;
}
public void doJoin(AtomicBoolean joined) {
final Address targetAddress = getTargetAddress();
if (targetAddress != null) {
long maxJoinMergeTargetMillis = node.getGroupProperties().MAX_JOIN_MERGE_TARGET_SECONDS.getInteger() * 1000;
joinViaTargetMember(joined, targetAddress, maxJoinMergeTargetMillis);
if (!joined.get()) {
joinViaPossibleMembers(joined);
}
} else if (config.getNetworkConfig().getJoin().getTcpIpConfig().getRequiredMember() != null) {
Address requiredMember = getRequiredMemberAddress();
long maxJoinMillis = node.getGroupProperties().MAX_JOIN_SECONDS.getInteger() * 1000;
joinViaTargetMember(joined, requiredMember, maxJoinMillis);
} else {
joinViaPossibleMembers(joined);
}
}
private Collection<Address> getPossibleAddresses() {
final Collection<String> possibleMembers = getMembers();
final Set<Address> possibleAddresses = new HashSet<Address>();
final NetworkConfig networkConfig = config.getNetworkConfig();
for (String possibleMember : possibleMembers) {
try {
final AddressHolder addressHolder = AddressUtil.getAddressHolder(possibleMember);
final boolean portIsDefined = addressHolder.getPort() != -1 || !networkConfig.isPortAutoIncrement();
final int count = portIsDefined ? 1 : MAX_PORT_TRIES;
final int port = addressHolder.getPort() != -1 ? addressHolder.getPort() : networkConfig.getPort();
AddressMatcher addressMatcher = null;
try {
addressMatcher = AddressUtil.getAddressMatcher(addressHolder.getAddress());
} catch (InvalidAddressException ignore) {
}
if (addressMatcher != null) {
final Collection<String> matchedAddresses;
if (addressMatcher.isIPv4()) {
matchedAddresses = AddressUtil.getMatchingIpv4Addresses(addressMatcher);
} else {
// for IPv6 we are not doing wildcard matching
matchedAddresses = Collections.singleton(addressHolder.getAddress());
}
for (String matchedAddress : matchedAddresses) {
addPossibleAddresses(possibleAddresses, null, InetAddress.getByName(matchedAddress), port, count);
}
} else {
final String host = addressHolder.getAddress();
final InterfacesConfig interfaces = networkConfig.getInterfaces();
if (interfaces.isEnabled()) {
final InetAddress[] inetAddresses = InetAddress.getAllByName(host);
if (inetAddresses.length > 1) {
for (InetAddress inetAddress : inetAddresses) {
if (AddressUtil.matchAnyInterface(inetAddress.getHostAddress(),
interfaces.getInterfaces())) {
addPossibleAddresses(possibleAddresses, null, inetAddress, port, count);
}
}
} else {
final InetAddress inetAddress = inetAddresses[0];
if (AddressUtil.matchAnyInterface(inetAddress.getHostAddress(),
interfaces.getInterfaces())) {
addPossibleAddresses(possibleAddresses, host, null, port, count);
}
}
} else {
addPossibleAddresses(possibleAddresses, host, null, port, count);
}
}
} catch (UnknownHostException e) {
logger.warning(e);
}
}
return possibleAddresses;
}
private void addPossibleAddresses(final Set<Address> possibleAddresses,
final String host, final InetAddress inetAddress,
final int port, final int count) throws UnknownHostException {
for (int i = 0; i < count; i++) {
final int currentPort = port + i;
final Address address = host != null ? new Address(host, currentPort) : new Address(inetAddress, currentPort);
if (!isLocalAddress(address)) {
possibleAddresses.add(address);
}
}
}
private boolean isLocalAddress(final Address address) throws UnknownHostException {
final Address thisAddress = node.getThisAddress();
final boolean local = thisAddress.getInetSocketAddress().equals(address.getInetSocketAddress());
if (logger.isFinestEnabled()) {
logger.finest(address + " is local? " + local);
}
return local;
}
protected Collection<String> getMembers() {
return getConfigurationMembers(config);
}
public static Collection<String> getConfigurationMembers(Config config) {
final TcpIpConfig tcpIpConfig = config.getNetworkConfig().getJoin().getTcpIpConfig();
final Collection<String> configMembers = tcpIpConfig.getMembers();
final Set<String> possibleMembers = new HashSet<String>();
for (String member : configMembers) {
// split members defined in tcp-ip configuration by comma(,) semi-colon(;) space( ).
String[] members = member.split("[,; ]");
Collections.addAll(possibleMembers, members);
}
return possibleMembers;
}
public void searchForOtherClusters() {
final Collection<Address> colPossibleAddresses;
try {
colPossibleAddresses = getPossibleAddresses();
} catch (Throwable e) {
logger.severe(e);
return;
}
colPossibleAddresses.remove(node.getThisAddress());
for (Member member : node.getClusterService().getMembers()) {
colPossibleAddresses.remove(((MemberImpl) member).getAddress());
}
if (colPossibleAddresses.isEmpty()) {
return;
}
for (Address possibleAddress : colPossibleAddresses) {
if (logger.isFinestEnabled()) {
logger.finest(node.getThisAddress() + " is connecting to " + possibleAddress);
}
node.connectionManager.getOrConnect(possibleAddress, true);
try {
//noinspection BusyWait
Thread.sleep(1500);
} catch (InterruptedException e) {
return;
}
final Connection conn = node.connectionManager.getConnection(possibleAddress);
if (conn != null) {
final JoinRequest response = node.clusterService.checkJoinInfo(possibleAddress);
if (response != null && shouldMerge(response)) {
logger.warning(node.getThisAddress() + " is merging [tcp/ip] to " + possibleAddress);
setTargetAddress(possibleAddress);
startClusterMerge(possibleAddress);
return;
}
}
}
}
@Override
public String getType() {
return "tcp-ip";
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_cluster_TcpIpJoiner.java |
3,304 | public static class Builder implements IndexFieldData.Builder {
@Override
public IndexFieldData<AtomicFieldData<?>> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
// Ignore Circuit Breaker
return new DisabledIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache);
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_DisabledIndexFieldData.java |
242 | public class XAnalyzingSuggester extends Lookup {
/**
* FST<Weight,Surface>:
* input is the analyzed form, with a null byte between terms
* weights are encoded as costs: (Integer.MAX_VALUE-weight)
* surface is the original, unanalyzed form.
*/
private FST<Pair<Long,BytesRef>> fst = null;
/**
* Analyzer that will be used for analyzing suggestions at
* index time.
*/
private final Analyzer indexAnalyzer;
/**
* Analyzer that will be used for analyzing suggestions at
* query time.
*/
private final Analyzer queryAnalyzer;
/**
* True if exact match suggestions should always be returned first.
*/
private final boolean exactFirst;
/**
* True if separator between tokens should be preserved.
*/
private final boolean preserveSep;
/** Include this flag in the options parameter to {@link
* #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)} to always
* return the exact match first, regardless of score. This
* has no performance impact but could result in
* low-quality suggestions. */
public static final int EXACT_FIRST = 1;
/** Include this flag in the options parameter to {@link
* #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)} to preserve
* token separators when matching. */
public static final int PRESERVE_SEP = 2;
/** Represents the separation between tokens, if
* PRESERVE_SEP was specified */
public static final int SEP_LABEL = '\u001F';
/** Marks end of the analyzed input and start of dedup
* byte. */
public static final int END_BYTE = 0x0;
/** Maximum number of dup surface forms (different surface
* forms for the same analyzed form). */
private final int maxSurfaceFormsPerAnalyzedForm;
/** Maximum graph paths to index for a single analyzed
* surface form. This only matters if your analyzer
* makes lots of alternate paths (e.g. contains
* SynonymFilter). */
private final int maxGraphExpansions;
/** Highest number of analyzed paths we saw for any single
* input surface form. For analyzers that never create
* graphs this will always be 1. */
private int maxAnalyzedPathsForOneInput;
private boolean hasPayloads;
private final int sepLabel;
private final int payloadSep;
private final int endByte;
private final int holeCharacter;
public static final int PAYLOAD_SEP = '\u001F';
public static final int HOLE_CHARACTER = '\u001E';
/** Whether position holes should appear in the automaton. */
private boolean preservePositionIncrements;
/**
* Calls {@link #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)
* AnalyzingSuggester(analyzer, analyzer, EXACT_FIRST |
* PRESERVE_SEP, 256, -1)}
*/
public XAnalyzingSuggester(Analyzer analyzer) {
this(analyzer, analyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true, null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
}
/**
* Calls {@link #XAnalyzingSuggester(Analyzer,Analyzer,int,int,int,boolean,FST,boolean,int,int,int,int,int)
* AnalyzingSuggester(indexAnalyzer, queryAnalyzer, EXACT_FIRST |
* PRESERVE_SEP, 256, -1)}
*/
public XAnalyzingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) {
this(indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, true, null, false, 0, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
}
/**
* Creates a new suggester.
*
* @param indexAnalyzer Analyzer that will be used for
* analyzing suggestions while building the index.
* @param queryAnalyzer Analyzer that will be used for
* analyzing query text during lookup
* @param options see {@link #EXACT_FIRST}, {@link #PRESERVE_SEP}
* @param maxSurfaceFormsPerAnalyzedForm Maximum number of
* surface forms to keep for a single analyzed form.
* When there are too many surface forms we discard the
* lowest weighted ones.
* @param maxGraphExpansions Maximum number of graph paths
* to expand from the analyzed form. Set this to -1 for
* no limit.
*/
public XAnalyzingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer, int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
boolean preservePositionIncrements, FST<Pair<Long, BytesRef>> fst, boolean hasPayloads, int maxAnalyzedPathsForOneInput,
int sepLabel, int payloadSep, int endByte, int holeCharacter) {
// SIMON EDIT: I added fst, hasPayloads and maxAnalyzedPathsForOneInput
this.indexAnalyzer = indexAnalyzer;
this.queryAnalyzer = queryAnalyzer;
this.fst = fst;
this.hasPayloads = hasPayloads;
if ((options & ~(EXACT_FIRST | PRESERVE_SEP)) != 0) {
throw new IllegalArgumentException("options should only contain EXACT_FIRST and PRESERVE_SEP; got " + options);
}
this.exactFirst = (options & EXACT_FIRST) != 0;
this.preserveSep = (options & PRESERVE_SEP) != 0;
// NOTE: this is just an implementation limitation; if
// somehow this is a problem we could fix it by using
// more than one byte to disambiguate ... but 256 seems
// like it should be way more then enough.
if (maxSurfaceFormsPerAnalyzedForm <= 0 || maxSurfaceFormsPerAnalyzedForm > 256) {
throw new IllegalArgumentException("maxSurfaceFormsPerAnalyzedForm must be > 0 and < 256 (got: " + maxSurfaceFormsPerAnalyzedForm + ")");
}
this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
if (maxGraphExpansions < 1 && maxGraphExpansions != -1) {
throw new IllegalArgumentException("maxGraphExpansions must -1 (no limit) or > 0 (got: " + maxGraphExpansions + ")");
}
this.maxGraphExpansions = maxGraphExpansions;
this.maxAnalyzedPathsForOneInput = maxAnalyzedPathsForOneInput;
this.preservePositionIncrements = preservePositionIncrements;
this.sepLabel = sepLabel;
this.payloadSep = payloadSep;
this.endByte = endByte;
this.holeCharacter = holeCharacter;
}
/** Returns byte size of the underlying FST. */
public long sizeInBytes() {
return fst == null ? 0 : fst.sizeInBytes();
}
private static void copyDestTransitions(State from, State to, List<Transition> transitions) {
if (to.isAccept()) {
from.setAccept(true);
}
for(Transition t : to.getTransitions()) {
transitions.add(t);
}
}
// Replaces SEP with epsilon or remaps them if
// we were asked to preserve them:
private static void replaceSep(Automaton a, boolean preserveSep, int replaceSep) {
State[] states = a.getNumberedStates();
// Go in reverse topo sort so we know we only have to
// make one pass:
for(int stateNumber=states.length-1;stateNumber >=0;stateNumber--) {
final State state = states[stateNumber];
List<Transition> newTransitions = new ArrayList<Transition>();
for(Transition t : state.getTransitions()) {
assert t.getMin() == t.getMax();
if (t.getMin() == TokenStreamToAutomaton.POS_SEP) {
if (preserveSep) {
// Remap to SEP_LABEL:
newTransitions.add(new Transition(replaceSep, t.getDest()));
} else {
copyDestTransitions(state, t.getDest(), newTransitions);
a.setDeterministic(false);
}
} else if (t.getMin() == TokenStreamToAutomaton.HOLE) {
// Just remove the hole: there will then be two
// SEP tokens next to each other, which will only
// match another hole at search time. Note that
// it will also match an empty-string token ... if
// that's somehow a problem we can always map HOLE
// to a dedicated byte (and escape it in the
// input).
copyDestTransitions(state, t.getDest(), newTransitions);
a.setDeterministic(false);
} else {
newTransitions.add(t);
}
}
state.setTransitions(newTransitions.toArray(new Transition[newTransitions.size()]));
}
}
protected Automaton convertAutomaton(Automaton a) {
return a;
}
/** Just escapes the 0xff byte (which we still for SEP). */
private static final class EscapingTokenStreamToAutomaton extends TokenStreamToAutomaton {
final BytesRef spare = new BytesRef();
private char sepLabel;
public EscapingTokenStreamToAutomaton(char sepLabel) {
this.sepLabel = sepLabel;
}
@Override
protected BytesRef changeToken(BytesRef in) {
int upto = 0;
for(int i=0;i<in.length;i++) {
byte b = in.bytes[in.offset+i];
if (b == (byte) sepLabel) {
if (spare.bytes.length == upto) {
spare.grow(upto+2);
}
spare.bytes[upto++] = (byte) sepLabel;
spare.bytes[upto++] = b;
} else {
if (spare.bytes.length == upto) {
spare.grow(upto+1);
}
spare.bytes[upto++] = b;
}
}
spare.offset = 0;
spare.length = upto;
return spare;
}
}
public TokenStreamToAutomaton getTokenStreamToAutomaton() {
final TokenStreamToAutomaton tsta;
if (preserveSep) {
tsta = new EscapingTokenStreamToAutomaton((char) sepLabel);
} else {
// When we're not preserving sep, we don't steal 0xff
// byte, so we don't need to do any escaping:
tsta = new TokenStreamToAutomaton();
}
tsta.setPreservePositionIncrements(preservePositionIncrements);
return tsta;
}
private static class AnalyzingComparator implements Comparator<BytesRef> {
private final boolean hasPayloads;
public AnalyzingComparator(boolean hasPayloads) {
this.hasPayloads = hasPayloads;
}
private final ByteArrayDataInput readerA = new ByteArrayDataInput();
private final ByteArrayDataInput readerB = new ByteArrayDataInput();
private final BytesRef scratchA = new BytesRef();
private final BytesRef scratchB = new BytesRef();
@Override
public int compare(BytesRef a, BytesRef b) {
// First by analyzed form:
readerA.reset(a.bytes, a.offset, a.length);
scratchA.length = readerA.readShort();
scratchA.bytes = a.bytes;
scratchA.offset = readerA.getPosition();
readerB.reset(b.bytes, b.offset, b.length);
scratchB.bytes = b.bytes;
scratchB.length = readerB.readShort();
scratchB.offset = readerB.getPosition();
int cmp = scratchA.compareTo(scratchB);
if (cmp != 0) {
return cmp;
}
readerA.skipBytes(scratchA.length);
readerB.skipBytes(scratchB.length);
// Next by cost:
long aCost = readerA.readInt();
long bCost = readerB.readInt();
if (aCost < bCost) {
return -1;
} else if (aCost > bCost) {
return 1;
}
// Finally by surface form:
if (hasPayloads) {
scratchA.length = readerA.readShort();
scratchA.offset = readerA.getPosition();
scratchB.length = readerB.readShort();
scratchB.offset = readerB.getPosition();
} else {
scratchA.offset = readerA.getPosition();
scratchA.length = a.length - scratchA.offset;
scratchB.offset = readerB.getPosition();
scratchB.length = b.length - scratchB.offset;
}
return scratchA.compareTo(scratchB);
}
}
@Override
public void build(InputIterator iterator) throws IOException {
String prefix = getClass().getSimpleName();
File directory = Sort.defaultTempDir();
File tempInput = File.createTempFile(prefix, ".input", directory);
File tempSorted = File.createTempFile(prefix, ".sorted", directory);
hasPayloads = iterator.hasPayloads();
Sort.ByteSequencesWriter writer = new Sort.ByteSequencesWriter(tempInput);
Sort.ByteSequencesReader reader = null;
BytesRef scratch = new BytesRef();
TokenStreamToAutomaton ts2a = getTokenStreamToAutomaton();
boolean success = false;
byte buffer[] = new byte[8];
try {
ByteArrayDataOutput output = new ByteArrayDataOutput(buffer);
BytesRef surfaceForm;
while ((surfaceForm = iterator.next()) != null) {
Set<IntsRef> paths = toFiniteStrings(surfaceForm, ts2a);
maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, paths.size());
for (IntsRef path : paths) {
Util.toBytesRef(path, scratch);
// length of the analyzed text (FST input)
if (scratch.length > Short.MAX_VALUE-2) {
throw new IllegalArgumentException("cannot handle analyzed forms > " + (Short.MAX_VALUE-2) + " in length (got " + scratch.length + ")");
}
short analyzedLength = (short) scratch.length;
// compute the required length:
// analyzed sequence + weight (4) + surface + analyzedLength (short)
int requiredLength = analyzedLength + 4 + surfaceForm.length + 2;
BytesRef payload;
if (hasPayloads) {
if (surfaceForm.length > (Short.MAX_VALUE-2)) {
throw new IllegalArgumentException("cannot handle surface form > " + (Short.MAX_VALUE-2) + " in length (got " + surfaceForm.length + ")");
}
payload = iterator.payload();
// payload + surfaceLength (short)
requiredLength += payload.length + 2;
} else {
payload = null;
}
buffer = ArrayUtil.grow(buffer, requiredLength);
output.reset(buffer);
output.writeShort(analyzedLength);
output.writeBytes(scratch.bytes, scratch.offset, scratch.length);
output.writeInt(encodeWeight(iterator.weight()));
if (hasPayloads) {
for(int i=0;i<surfaceForm.length;i++) {
if (surfaceForm.bytes[i] == payloadSep) {
throw new IllegalArgumentException("surface form cannot contain unit separator character U+001F; this character is reserved");
}
}
output.writeShort((short) surfaceForm.length);
output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
output.writeBytes(payload.bytes, payload.offset, payload.length);
} else {
output.writeBytes(surfaceForm.bytes, surfaceForm.offset, surfaceForm.length);
}
assert output.getPosition() == requiredLength: output.getPosition() + " vs " + requiredLength;
writer.write(buffer, 0, output.getPosition());
}
}
writer.close();
// Sort all input/output pairs (required by FST.Builder):
new Sort(new AnalyzingComparator(hasPayloads)).sort(tempInput, tempSorted);
// Free disk space:
tempInput.delete();
reader = new Sort.ByteSequencesReader(tempSorted);
PairOutputs<Long,BytesRef> outputs = new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
Builder<Pair<Long,BytesRef>> builder = new Builder<Pair<Long,BytesRef>>(FST.INPUT_TYPE.BYTE1, outputs);
// Build FST:
BytesRef previousAnalyzed = null;
BytesRef analyzed = new BytesRef();
BytesRef surface = new BytesRef();
IntsRef scratchInts = new IntsRef();
ByteArrayDataInput input = new ByteArrayDataInput();
// Used to remove duplicate surface forms (but we
// still index the hightest-weight one). We clear
// this when we see a new analyzed form, so it cannot
// grow unbounded (at most 256 entries):
Set<BytesRef> seenSurfaceForms = new HashSet<BytesRef>();
int dedup = 0;
while (reader.read(scratch)) {
input.reset(scratch.bytes, scratch.offset, scratch.length);
short analyzedLength = input.readShort();
analyzed.grow(analyzedLength+2);
input.readBytes(analyzed.bytes, 0, analyzedLength);
analyzed.length = analyzedLength;
long cost = input.readInt();
surface.bytes = scratch.bytes;
if (hasPayloads) {
surface.length = input.readShort();
surface.offset = input.getPosition();
} else {
surface.offset = input.getPosition();
surface.length = scratch.length - surface.offset;
}
if (previousAnalyzed == null) {
previousAnalyzed = new BytesRef();
previousAnalyzed.copyBytes(analyzed);
seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
} else if (analyzed.equals(previousAnalyzed)) {
dedup++;
if (dedup >= maxSurfaceFormsPerAnalyzedForm) {
// More than maxSurfaceFormsPerAnalyzedForm
// dups: skip the rest:
continue;
}
if (seenSurfaceForms.contains(surface)) {
continue;
}
seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
} else {
dedup = 0;
previousAnalyzed.copyBytes(analyzed);
seenSurfaceForms.clear();
seenSurfaceForms.add(BytesRef.deepCopyOf(surface));
}
// TODO: I think we can avoid the extra 2 bytes when
// there is no dup (dedup==0), but we'd have to fix
// the exactFirst logic ... which would be sort of
// hairy because we'd need to special case the two
// (dup/not dup)...
// NOTE: must be byte 0 so we sort before whatever
// is next
analyzed.bytes[analyzed.offset+analyzed.length] = 0;
analyzed.bytes[analyzed.offset+analyzed.length+1] = (byte) dedup;
analyzed.length += 2;
Util.toIntsRef(analyzed, scratchInts);
//System.out.println("ADD: " + scratchInts + " -> " + cost + ": " + surface.utf8ToString());
if (!hasPayloads) {
builder.add(scratchInts, outputs.newPair(cost, BytesRef.deepCopyOf(surface)));
} else {
int payloadOffset = input.getPosition() + surface.length;
int payloadLength = scratch.length - payloadOffset;
BytesRef br = new BytesRef(surface.length + 1 + payloadLength);
System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
br.bytes[surface.length] = (byte) payloadSep;
System.arraycopy(scratch.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength);
br.length = br.bytes.length;
builder.add(scratchInts, outputs.newPair(cost, br));
}
}
fst = builder.finish();
//PrintWriter pw = new PrintWriter("/tmp/out.dot");
//Util.toDot(fst, pw, true, true);
//pw.close();
success = true;
} finally {
if (success) {
IOUtils.close(reader, writer);
} else {
IOUtils.closeWhileHandlingException(reader, writer);
}
tempInput.delete();
tempSorted.delete();
}
}
@Override
public boolean store(OutputStream output) throws IOException {
DataOutput dataOut = new OutputStreamDataOutput(output);
try {
if (fst == null) {
return false;
}
fst.save(dataOut);
dataOut.writeVInt(maxAnalyzedPathsForOneInput);
dataOut.writeByte((byte) (hasPayloads ? 1 : 0));
} finally {
IOUtils.close(output);
}
return true;
}
@Override
public boolean load(InputStream input) throws IOException {
DataInput dataIn = new InputStreamDataInput(input);
try {
this.fst = new FST<Pair<Long,BytesRef>>(dataIn, new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
maxAnalyzedPathsForOneInput = dataIn.readVInt();
hasPayloads = dataIn.readByte() == 1;
} finally {
IOUtils.close(input);
}
return true;
}
private LookupResult getLookupResult(Long output1, BytesRef output2, CharsRef spare) {
LookupResult result;
if (hasPayloads) {
int sepIndex = -1;
for(int i=0;i<output2.length;i++) {
if (output2.bytes[output2.offset+i] == payloadSep) {
sepIndex = i;
break;
}
}
assert sepIndex != -1;
spare.grow(sepIndex);
final int payloadLen = output2.length - sepIndex - 1;
UnicodeUtil.UTF8toUTF16(output2.bytes, output2.offset, sepIndex, spare);
BytesRef payload = new BytesRef(payloadLen);
System.arraycopy(output2.bytes, sepIndex+1, payload.bytes, 0, payloadLen);
payload.length = payloadLen;
result = new LookupResult(spare.toString(), decodeWeight(output1), payload);
} else {
spare.grow(output2.length);
UnicodeUtil.UTF8toUTF16(output2, spare);
result = new LookupResult(spare.toString(), decodeWeight(output1));
}
return result;
}
private boolean sameSurfaceForm(BytesRef key, BytesRef output2) {
if (hasPayloads) {
// output2 has at least PAYLOAD_SEP byte:
if (key.length >= output2.length) {
return false;
}
for(int i=0;i<key.length;i++) {
if (key.bytes[key.offset+i] != output2.bytes[output2.offset+i]) {
return false;
}
}
return output2.bytes[output2.offset + key.length] == payloadSep;
} else {
return key.bytesEquals(output2);
}
}
@Override
public List<LookupResult> lookup(final CharSequence key, boolean onlyMorePopular, int num) {
assert num > 0;
if (onlyMorePopular) {
throw new IllegalArgumentException("this suggester only works with onlyMorePopular=false");
}
if (fst == null) {
return Collections.emptyList();
}
//System.out.println("lookup key=" + key + " num=" + num);
for (int i = 0; i < key.length(); i++) {
if (key.charAt(i) == holeCharacter) {
throw new IllegalArgumentException("lookup key cannot contain HOLE character U+001E; this character is reserved");
}
if (key.charAt(i) == sepLabel) {
throw new IllegalArgumentException("lookup key cannot contain unit separator character U+001F; this character is reserved");
}
}
final BytesRef utf8Key = new BytesRef(key);
try {
Automaton lookupAutomaton = toLookupAutomaton(key);
final CharsRef spare = new CharsRef();
//System.out.println(" now intersect exactFirst=" + exactFirst);
// Intersect automaton w/ suggest wFST and get all
// prefix starting nodes & their outputs:
//final PathIntersector intersector = getPathIntersector(lookupAutomaton, fst);
//System.out.println(" prefixPaths: " + prefixPaths.size());
BytesReader bytesReader = fst.getBytesReader();
FST.Arc<Pair<Long,BytesRef>> scratchArc = new FST.Arc<Pair<Long,BytesRef>>();
final List<LookupResult> results = new ArrayList<LookupResult>();
List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths = FSTUtil.intersectPrefixPaths(convertAutomaton(lookupAutomaton), fst);
if (exactFirst) {
int count = 0;
for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
if (fst.findTargetArc(endByte, path.fstNode, scratchArc, bytesReader) != null) {
// This node has END_BYTE arc leaving, meaning it's an
// "exact" match:
count++;
}
}
// Searcher just to find the single exact only
// match, if present:
Util.TopNSearcher<Pair<Long,BytesRef>> searcher;
searcher = new Util.TopNSearcher<Pair<Long,BytesRef>>(fst, count * maxSurfaceFormsPerAnalyzedForm, count * maxSurfaceFormsPerAnalyzedForm, weightComparator);
// NOTE: we could almost get away with only using
// the first start node. The only catch is if
// maxSurfaceFormsPerAnalyzedForm had kicked in and
// pruned our exact match from one of these nodes
// ...:
for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
if (fst.findTargetArc(endByte, path.fstNode, scratchArc, bytesReader) != null) {
// This node has END_BYTE arc leaving, meaning it's an
// "exact" match:
searcher.addStartPaths(scratchArc, fst.outputs.add(path.output, scratchArc.output), false, path.input);
}
}
MinResult<Pair<Long,BytesRef>> completions[] = searcher.search();
// NOTE: this is rather inefficient: we enumerate
// every matching "exactly the same analyzed form"
// path, and then do linear scan to see if one of
// these exactly matches the input. It should be
// possible (though hairy) to do something similar
// to getByOutput, since the surface form is encoded
// into the FST output, so we more efficiently hone
// in on the exact surface-form match. Still, I
// suspect very little time is spent in this linear
// seach: it's bounded by how many prefix start
// nodes we have and the
// maxSurfaceFormsPerAnalyzedForm:
for(MinResult<Pair<Long,BytesRef>> completion : completions) {
BytesRef output2 = completion.output.output2;
if (sameSurfaceForm(utf8Key, output2)) {
results.add(getLookupResult(completion.output.output1, output2, spare));
break;
}
}
if (results.size() == num) {
// That was quick:
return results;
}
}
Util.TopNSearcher<Pair<Long,BytesRef>> searcher;
searcher = new Util.TopNSearcher<Pair<Long,BytesRef>>(fst,
num - results.size(),
num * maxAnalyzedPathsForOneInput,
weightComparator) {
private final Set<BytesRef> seen = new HashSet<BytesRef>();
@Override
protected boolean acceptResult(IntsRef input, Pair<Long,BytesRef> output) {
// Dedup: when the input analyzes to a graph we
// can get duplicate surface forms:
if (seen.contains(output.output2)) {
return false;
}
seen.add(output.output2);
if (!exactFirst) {
return true;
} else {
// In exactFirst mode, don't accept any paths
// matching the surface form since that will
// create duplicate results:
if (sameSurfaceForm(utf8Key, output.output2)) {
// We found exact match, which means we should
// have already found it in the first search:
assert results.size() == 1;
return false;
} else {
return true;
}
}
}
};
prefixPaths = getFullPrefixPaths(prefixPaths, lookupAutomaton, fst);
for (FSTUtil.Path<Pair<Long,BytesRef>> path : prefixPaths) {
searcher.addStartPaths(path.fstNode, path.output, true, path.input);
}
MinResult<Pair<Long,BytesRef>> completions[] = searcher.search();
for(MinResult<Pair<Long,BytesRef>> completion : completions) {
LookupResult result = getLookupResult(completion.output.output1, completion.output.output2, spare);
// TODO: for fuzzy case would be nice to return
// how many edits were required
//System.out.println(" result=" + result);
results.add(result);
if (results.size() == num) {
// In the exactFirst=true case the search may
// produce one extra path
break;
}
}
return results;
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
}
/** Returns all completion paths to initialize the search. */
protected List<FSTUtil.Path<Pair<Long,BytesRef>>> getFullPrefixPaths(List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths,
Automaton lookupAutomaton,
FST<Pair<Long,BytesRef>> fst)
throws IOException {
return prefixPaths;
}
public final Set<IntsRef> toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException {
// Analyze surface form:
TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString());
return toFiniteStrings(ts2a, ts);
}
public final Set<IntsRef> toFiniteStrings(final TokenStreamToAutomaton ts2a, TokenStream ts) throws IOException {
// Analyze surface form:
// Create corresponding automaton: labels are bytes
// from each analyzed token, with byte 0 used as
// separator between tokens:
Automaton automaton = ts2a.toAutomaton(ts);
ts.close();
replaceSep(automaton, preserveSep, sepLabel);
assert SpecialOperations.isFinite(automaton);
// Get all paths from the automaton (there can be
// more than one path, eg if the analyzer created a
// graph using SynFilter or WDF):
// TODO: we could walk & add simultaneously, so we
// don't have to alloc [possibly biggish]
// intermediate HashSet in RAM:
return SpecialOperations.getFiniteStrings(automaton, maxGraphExpansions);
}
final Automaton toLookupAutomaton(final CharSequence key) throws IOException {
// Turn tokenstream into automaton:
TokenStream ts = queryAnalyzer.tokenStream("", key.toString());
Automaton automaton = (getTokenStreamToAutomaton()).toAutomaton(ts);
ts.close();
// TODO: we could use the end offset to "guess"
// whether the final token was a partial token; this
// would only be a heuristic ... but maybe an OK one.
// This way we could eg differentiate "net" from "net ",
// which we can't today...
replaceSep(automaton, preserveSep, sepLabel);
// TODO: we can optimize this somewhat by determinizing
// while we convert
BasicOperations.determinize(automaton);
return automaton;
}
/**
* Returns the weight associated with an input string,
* or null if it does not exist.
*/
public Object get(CharSequence key) {
throw new UnsupportedOperationException();
}
/** cost -> weight */
public static int decodeWeight(long encoded) {
return (int)(Integer.MAX_VALUE - encoded);
}
/** weight -> cost */
public static int encodeWeight(long value) {
if (value < 0 || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value);
}
return Integer.MAX_VALUE - (int)value;
}
static final Comparator<Pair<Long,BytesRef>> weightComparator = new Comparator<Pair<Long,BytesRef>> () {
@Override
public int compare(Pair<Long,BytesRef> left, Pair<Long,BytesRef> right) {
return left.output1.compareTo(right.output1);
}
};
public static class XBuilder {
private Builder<Pair<Long, BytesRef>> builder;
private int maxSurfaceFormsPerAnalyzedForm;
private IntsRef scratchInts = new IntsRef();
private final PairOutputs<Long, BytesRef> outputs;
private boolean hasPayloads;
private BytesRef analyzed = new BytesRef();
private final SurfaceFormAndPayload[] surfaceFormsAndPayload;
private int count;
private ObjectIntOpenHashMap<BytesRef> seenSurfaceForms = HppcMaps.Object.Integer.ensureNoNullKeys(256, 0.75f);
private int payloadSep;
public XBuilder(int maxSurfaceFormsPerAnalyzedForm, boolean hasPayloads, int payloadSep) {
this.payloadSep = payloadSep;
this.outputs = new PairOutputs<Long, BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
this.builder = new Builder<Pair<Long, BytesRef>>(FST.INPUT_TYPE.BYTE1, outputs);
this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
this.hasPayloads = hasPayloads;
surfaceFormsAndPayload = new SurfaceFormAndPayload[maxSurfaceFormsPerAnalyzedForm];
}
public void startTerm(BytesRef analyzed) {
this.analyzed.copyBytes(analyzed);
this.analyzed.grow(analyzed.length+2);
}
private final static class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
BytesRef payload;
long weight;
public SurfaceFormAndPayload(BytesRef payload, long cost) {
super();
this.payload = payload;
this.weight = cost;
}
@Override
public int compareTo(SurfaceFormAndPayload o) {
int res = compare(weight, o.weight);
if (res == 0 ){
return payload.compareTo(o.payload);
}
return res;
}
public static int compare(long x, long y) {
return (x < y) ? -1 : ((x == y) ? 0 : 1);
}
}
public void addSurface(BytesRef surface, BytesRef payload, long cost) throws IOException {
int surfaceIndex = -1;
long encodedWeight = cost == -1 ? cost : encodeWeight(cost);
/*
* we need to check if we have seen this surface form, if so only use the
* the surface form with the highest weight and drop the rest no matter if
* the payload differs.
*/
if (count >= maxSurfaceFormsPerAnalyzedForm) {
// More than maxSurfaceFormsPerAnalyzedForm
// dups: skip the rest:
return;
}
BytesRef surfaceCopy;
if (count > 0 && seenSurfaceForms.containsKey(surface)) {
surfaceIndex = seenSurfaceForms.lget();
SurfaceFormAndPayload surfaceFormAndPayload = surfaceFormsAndPayload[surfaceIndex];
if (encodedWeight >= surfaceFormAndPayload.weight) {
return;
}
surfaceCopy = BytesRef.deepCopyOf(surface);
} else {
surfaceIndex = count++;
surfaceCopy = BytesRef.deepCopyOf(surface);
seenSurfaceForms.put(surfaceCopy, surfaceIndex);
}
BytesRef payloadRef;
if (!hasPayloads) {
payloadRef = surfaceCopy;
} else {
int len = surface.length + 1 + payload.length;
final BytesRef br = new BytesRef(len);
System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
br.bytes[surface.length] = (byte) payloadSep;
System.arraycopy(payload.bytes, payload.offset, br.bytes, surface.length + 1, payload.length);
br.length = len;
payloadRef = br;
}
if (surfaceFormsAndPayload[surfaceIndex] == null) {
surfaceFormsAndPayload[surfaceIndex] = new SurfaceFormAndPayload(payloadRef, encodedWeight);
} else {
surfaceFormsAndPayload[surfaceIndex].payload = payloadRef;
surfaceFormsAndPayload[surfaceIndex].weight = encodedWeight;
}
}
public void finishTerm(long defaultWeight) throws IOException {
ArrayUtil.timSort(surfaceFormsAndPayload, 0, count);
int deduplicator = 0;
analyzed.bytes[analyzed.offset + analyzed.length] = 0;
analyzed.length += 2;
for (int i = 0; i < count; i++) {
analyzed.bytes[analyzed.offset + analyzed.length - 1 ] = (byte) deduplicator++;
Util.toIntsRef(analyzed, scratchInts);
SurfaceFormAndPayload candiate = surfaceFormsAndPayload[i];
long cost = candiate.weight == -1 ? encodeWeight(Math.min(Integer.MAX_VALUE, defaultWeight)) : candiate.weight;
builder.add(scratchInts, outputs.newPair(cost, candiate.payload));
}
seenSurfaceForms.clear();
count = 0;
}
public FST<Pair<Long, BytesRef>> build() throws IOException {
return builder.finish();
}
public boolean hasPayloads() {
return hasPayloads;
}
public int maxSurfaceFormsPerAnalyzedForm() {
return maxSurfaceFormsPerAnalyzedForm;
}
}
} | 1no label
| src_main_java_org_apache_lucene_search_suggest_analyzing_XAnalyzingSuggester.java |
791 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class AtomicLongTest extends HazelcastTestSupport {
@Test
@ClientCompatibleTest
public void testSimpleAtomicLong() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong an = hazelcastInstance.getAtomicLong("testAtomicLong");
assertEquals(0, an.get());
assertEquals(-1, an.decrementAndGet());
assertEquals(0, an.incrementAndGet());
assertEquals(1, an.incrementAndGet());
assertEquals(2, an.incrementAndGet());
assertEquals(1, an.decrementAndGet());
assertEquals(1, an.getAndSet(23));
assertEquals(28, an.addAndGet(5));
assertEquals(28, an.get());
assertEquals(28, an.getAndAdd(-3));
assertEquals(24, an.decrementAndGet());
Assert.assertFalse(an.compareAndSet(23, 50));
assertTrue(an.compareAndSet(24, 50));
assertTrue(an.compareAndSet(50, 0));
}
@Test
@ClientCompatibleTest
public void testMultipleThreadAtomicLong() throws InterruptedException {
final HazelcastInstance instance = createHazelcastInstance();
final int k = 10;
final CountDownLatch countDownLatch = new CountDownLatch(k);
final IAtomicLong atomicLong = instance.getAtomicLong("testMultipleThreadAtomicLong");
for (int i = 0; i < k; i++) {
new Thread() {
public void run() {
long delta = (long) (Math.random() * 1000);
for (int j = 0; j < 10000; j++) {
atomicLong.addAndGet(delta);
}
for (int j = 0; j < 10000; j++) {
atomicLong.addAndGet(-1 * delta);
}
countDownLatch.countDown();
}
}.start();
}
assertOpenEventually(countDownLatch, 50);
assertEquals(0, atomicLong.get());
}
@Test
@ClientCompatibleTest
public void testAtomicLongFailure() {
int k = 4;
TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(k + 1);
HazelcastInstance instance = nodeFactory.newHazelcastInstance();
String name = "testAtomicLongFailure";
IAtomicLong atomicLong = instance.getAtomicLong(name);
atomicLong.set(100);
for (int i = 0; i < k; i++) {
HazelcastInstance newInstance = nodeFactory.newHazelcastInstance();
IAtomicLong newAtomicLong = newInstance.getAtomicLong(name);
assertEquals((long) 100 + i, newAtomicLong.get());
newAtomicLong.incrementAndGet();
instance.shutdown();
instance = newInstance;
}
}
@Test
@ClientCompatibleTest
public void testAtomicLongSpawnNodeInParallel() throws InterruptedException {
int total = 6;
int parallel = 2;
final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(total + 1);
HazelcastInstance instance = nodeFactory.newHazelcastInstance();
final String name = "testAtomicLongSpawnNodeInParallel";
IAtomicLong atomicLong = instance.getAtomicLong(name);
atomicLong.set(100);
final ExecutorService ex = Executors.newFixedThreadPool(parallel);
try {
for (int i = 0; i < total / parallel; i++) {
final HazelcastInstance[] instances = new HazelcastInstance[parallel];
final CountDownLatch countDownLatch = new CountDownLatch(parallel);
for (int j = 0; j < parallel; j++) {
final int id = j;
ex.execute(new Runnable() {
public void run() {
instances[id] = nodeFactory.newHazelcastInstance();
instances[id].getAtomicLong(name).incrementAndGet();
countDownLatch.countDown();
}
});
}
assertTrue(countDownLatch.await(1, TimeUnit.MINUTES));
IAtomicLong newAtomicLong = instance.getAtomicLong(name);
assertEquals((long) 100 + (i + 1) * parallel, newAtomicLong.get());
instance.shutdown();
instance = instances[0];
}
} finally {
ex.shutdownNow();
}
}
@Test(expected = IllegalArgumentException.class)
@ClientCompatibleTest
public void apply_whenCalledWithNullFunction() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("apply_whenCalledWithNullFunction");
ref.apply(null);
}
@Test
@ClientCompatibleTest
public void apply() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("apply");
assertEquals(new Long(1), ref.apply(new AddOneFunction()));
assertEquals(0, ref.get());
}
@Test
@ClientCompatibleTest
public void apply_whenException() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("apply");
ref.set(1);
try {
ref.apply(new FailingFunction());
fail();
} catch (WoohaaException expected) {
}
assertEquals(1, ref.get());
}
@Test(expected = IllegalArgumentException.class)
@ClientCompatibleTest
public void alter_whenCalledWithNullFunction() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("alter_whenCalledWithNullFunction");
ref.alter(null);
}
@Test
@ClientCompatibleTest
public void alter_whenException() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("alter_whenException");
ref.set(10);
try {
ref.alter(new FailingFunction());
fail();
} catch (WoohaaException expected) {
}
assertEquals(10, ref.get());
}
@Test
@ClientCompatibleTest
public void alter() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("alter");
ref.set(10);
ref.alter(new AddOneFunction());
assertEquals(11, ref.get());
}
@Test(expected = IllegalArgumentException.class)
@ClientCompatibleTest
public void alterAndGet_whenCalledWithNullFunction() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("alterAndGet_whenCalledWithNullFunction");
ref.alterAndGet(null);
}
@Test
@ClientCompatibleTest
public void alterAndGet_whenException() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("alterAndGet_whenException");
ref.set(10);
try {
ref.alterAndGet(new FailingFunction());
fail();
} catch (WoohaaException expected) {
}
assertEquals(10, ref.get());
}
@Test
@ClientCompatibleTest
public void alterAndGet() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("alterAndGet");
ref.set(10);
assertEquals(11, ref.alterAndGet(new AddOneFunction()));
assertEquals(11, ref.get());
}
@Test(expected = IllegalArgumentException.class)
@ClientCompatibleTest
public void getAndAlter_whenCalledWithNullFunction() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("getAndAlter_whenCalledWithNullFunction");
ref.getAndAlter(null);
}
@Test
@ClientCompatibleTest
public void getAndAlter_whenException() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("getAndAlter_whenException");
ref.set(10);
try {
ref.getAndAlter(new FailingFunction());
fail();
} catch (WoohaaException expected) {
}
assertEquals(10, ref.get());
}
@Test
@ClientCompatibleTest
public void getAndAlter() {
HazelcastInstance hazelcastInstance = createHazelcastInstance();
IAtomicLong ref = hazelcastInstance.getAtomicLong("getAndAlter");
ref.set(10);
assertEquals(10, ref.getAndAlter(new AddOneFunction()));
assertEquals(11, ref.get());
}
private static class AddOneFunction implements IFunction<Long, Long> {
@Override
public Long apply(Long input) {
return input+1;
}
}
private static class FailingFunction implements IFunction<Long, Long> {
@Override
public Long apply(Long input) {
throw new WoohaaException();
}
}
private static class WoohaaException extends RuntimeException {
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_concurrent_atomiclong_AtomicLongTest.java |
2,460 | public class LoggingRunnable implements Runnable {
private final Runnable runnable;
private final ESLogger logger;
public LoggingRunnable(ESLogger logger, Runnable runnable) {
this.runnable = runnable;
this.logger = logger;
}
@Override
public void run() {
try {
runnable.run();
} catch (Exception e) {
logger.warn("failed to execute [{}]", e, runnable.toString());
}
}
} | 0true
| src_main_java_org_elasticsearch_common_util_concurrent_LoggingRunnable.java |
2,967 | public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory {
private final byte[] charTypeTable;
private final int flags;
private final CharArraySet protoWords;
@Inject
public WordDelimiterTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
// Sample Format for the type table:
// $ => DIGIT
// % => DIGIT
// . => DIGIT
// \u002C => DIGIT
// \u200D => ALPHANUM
List<String> charTypeTableValues = Analysis.getWordList(env, settings, "type_table");
if (charTypeTableValues == null) {
this.charTypeTable = WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE;
} else {
this.charTypeTable = parseTypes(charTypeTableValues);
}
int flags = 0;
// If set, causes parts of words to be generated: "PowerShot" => "Power" "Shot"
flags |= getFlag(GENERATE_WORD_PARTS, settings, "generate_word_parts", true);
// If set, causes number subwords to be generated: "500-42" => "500" "42"
flags |= getFlag(GENERATE_NUMBER_PARTS, settings, "generate_number_parts", true);
// 1, causes maximum runs of word parts to be catenated: "wi-fi" => "wifi"
flags |= getFlag(CATENATE_WORDS, settings, "catenate_words", false);
// If set, causes maximum runs of number parts to be catenated: "500-42" => "50042"
flags |= getFlag(CATENATE_NUMBERS, settings, "catenate_numbers", false);
// If set, causes all subword parts to be catenated: "wi-fi-4000" => "wifi4000"
flags |= getFlag(CATENATE_ALL, settings, "catenate_all", false);
// 1, causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regards)
flags |= getFlag(SPLIT_ON_CASE_CHANGE, settings, "split_on_case_change", true);
// If set, includes original words in subwords: "500-42" => "500" "42" "500-42"
flags |= getFlag(PRESERVE_ORIGINAL, settings, "preserve_original", false);
// 1, causes "j2se" to be three tokens; "j" "2" "se"
flags |= getFlag(SPLIT_ON_NUMERICS, settings, "split_on_numerics", true);
// If set, causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil"
flags |= getFlag(STEM_ENGLISH_POSSESSIVE, settings, "stem_english_possessive", true);
// If not null is the set of tokens to protect from being delimited
Set<?> protectedWords = Analysis.getWordSet(env, settings, "protected_words", version);
this.protoWords = protectedWords == null ? null : CharArraySet.copy(Lucene.VERSION, protectedWords);
this.flags = flags;
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new WordDelimiterFilter(tokenStream,
charTypeTable,
flags,
protoWords);
}
public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
if (settings.getAsBoolean(key, defaultValue)) {
return flag;
}
return 0;
}
// source => type
private static Pattern typePattern = Pattern.compile("(.*)\\s*=>\\s*(.*)\\s*$");
/**
* parses a list of MappingCharFilter style rules into a custom byte[] type table
*/
private byte[] parseTypes(Collection<String> rules) {
SortedMap<Character, Byte> typeMap = new TreeMap<Character, Byte>();
for (String rule : rules) {
Matcher m = typePattern.matcher(rule);
if (!m.find())
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]");
String lhs = parseString(m.group(1).trim());
Byte rhs = parseType(m.group(2).trim());
if (lhs.length() != 1)
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
if (rhs == null)
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
typeMap.put(lhs.charAt(0), rhs);
}
// ensure the table is always at least as big as DEFAULT_WORD_DELIM_TABLE for performance
byte types[] = new byte[Math.max(typeMap.lastKey() + 1, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE.length)];
for (int i = 0; i < types.length; i++)
types[i] = WordDelimiterIterator.getType(i);
for (Map.Entry<Character, Byte> mapping : typeMap.entrySet())
types[mapping.getKey()] = mapping.getValue();
return types;
}
private Byte parseType(String s) {
if (s.equals("LOWER"))
return WordDelimiterFilter.LOWER;
else if (s.equals("UPPER"))
return WordDelimiterFilter.UPPER;
else if (s.equals("ALPHA"))
return WordDelimiterFilter.ALPHA;
else if (s.equals("DIGIT"))
return WordDelimiterFilter.DIGIT;
else if (s.equals("ALPHANUM"))
return WordDelimiterFilter.ALPHANUM;
else if (s.equals("SUBWORD_DELIM"))
return WordDelimiterFilter.SUBWORD_DELIM;
else
return null;
}
char[] out = new char[256];
private String parseString(String s) {
int readPos = 0;
int len = s.length();
int writePos = 0;
while (readPos < len) {
char c = s.charAt(readPos++);
if (c == '\\') {
if (readPos >= len)
throw new RuntimeException("Invalid escaped char in [" + s + "]");
c = s.charAt(readPos++);
switch (c) {
case '\\':
c = '\\';
break;
case 'n':
c = '\n';
break;
case 't':
c = '\t';
break;
case 'r':
c = '\r';
break;
case 'b':
c = '\b';
break;
case 'f':
c = '\f';
break;
case 'u':
if (readPos + 3 >= len)
throw new RuntimeException("Invalid escaped char in [" + s + "]");
c = (char) Integer.parseInt(s.substring(readPos, readPos + 4), 16);
readPos += 4;
break;
}
}
out[writePos++] = c;
}
return new String(out, 0, writePos);
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_WordDelimiterTokenFilterFactory.java |
787 | public class MultiPercolateRequestBuilder extends ActionRequestBuilder<MultiPercolateRequest, MultiPercolateResponse, MultiPercolateRequestBuilder> {
public MultiPercolateRequestBuilder(Client client) {
super((InternalClient) client, new MultiPercolateRequest());
}
/**
* Bundles the specified percolate request to the multi percolate request.
*/
public MultiPercolateRequestBuilder add(PercolateRequest percolateRequest) {
request.add(percolateRequest);
return this;
}
/**
* Bundles the specified percolate request build to the multi percolate request.
*/
public MultiPercolateRequestBuilder add(PercolateRequestBuilder percolateRequestBuilder) {
request.add(percolateRequestBuilder);
return this;
}
/**
* Specifies how to globally ignore indices that are not available and how to deal with wildcard indices expressions.
*
* Invoke this method before invoking {@link #add(PercolateRequestBuilder)}.
*/
public MultiPercolateRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request.indicesOptions(indicesOptions);
return this;
}
@Override
protected void doExecute(ActionListener<MultiPercolateResponse> listener) {
((Client) client).multiPercolate(request, listener);
}
} | 0true
| src_main_java_org_elasticsearch_action_percolate_MultiPercolateRequestBuilder.java |
1,032 | @SuppressWarnings("unchecked")
public class OCommandExecutorSQLCreateLink extends OCommandExecutorSQLAbstract {
public static final String KEYWORD_CREATE = "CREATE";
public static final String KEYWORD_LINK = "LINK";
private static final String KEYWORD_FROM = "FROM";
private static final String KEYWORD_TO = "TO";
private static final String KEYWORD_TYPE = "TYPE";
private String destClassName;
private String destField;
private String sourceClassName;
private String sourceField;
private String linkName;
private OType linkType;
private boolean inverse = false;
public OCommandExecutorSQLCreateLink parse(final OCommandRequest iRequest) {
init((OCommandRequestText) iRequest);
StringBuilder word = new StringBuilder();
int oldPos = 0;
int pos = nextWord(parserText, parserTextUpperCase, oldPos, word, true);
if (pos == -1 || !word.toString().equals(KEYWORD_CREATE))
throw new OCommandSQLParsingException("Keyword " + KEYWORD_CREATE + " not found. Use " + getSyntax(), parserText, oldPos);
oldPos = pos;
pos = nextWord(parserText, parserTextUpperCase, oldPos, word, true);
if (pos == -1 || !word.toString().equals(KEYWORD_LINK))
throw new OCommandSQLParsingException("Keyword " + KEYWORD_LINK + " not found. Use " + getSyntax(), parserText, oldPos);
oldPos = pos;
pos = nextWord(parserText, parserTextUpperCase, oldPos, word, false);
if (pos == -1)
throw new OCommandSQLParsingException("Keyword " + KEYWORD_FROM + " not found. Use " + getSyntax(), parserText, oldPos);
if (!word.toString().equalsIgnoreCase(KEYWORD_FROM)) {
// GET THE LINK NAME
linkName = word.toString();
if (OStringSerializerHelper.contains(linkName, ' '))
throw new OCommandSQLParsingException("Link name '" + linkName + "' contains not valid characters", parserText, oldPos);
oldPos = pos;
pos = nextWord(parserText, parserTextUpperCase, oldPos, word, true);
}
if (word.toString().equalsIgnoreCase(KEYWORD_TYPE)) {
oldPos = pos;
pos = nextWord(parserText, parserTextUpperCase, pos, word, true);
if (pos == -1)
throw new OCommandSQLParsingException("Link type missed. Use " + getSyntax(), parserText, oldPos);
linkType = OType.valueOf(word.toString().toUpperCase(Locale.ENGLISH));
oldPos = pos;
pos = nextWord(parserText, parserTextUpperCase, pos, word, true);
}
if (pos == -1 || !word.toString().equals(KEYWORD_FROM))
throw new OCommandSQLParsingException("Keyword " + KEYWORD_FROM + " not found. Use " + getSyntax(), parserText, oldPos);
pos = nextWord(parserText, parserTextUpperCase, pos, word, false);
if (pos == -1)
throw new OCommandSQLParsingException("Expected <class>.<property>. Use " + getSyntax(), parserText, pos);
String[] parts = word.toString().split("\\.");
if (parts.length != 2)
throw new OCommandSQLParsingException("Expected <class>.<property>. Use " + getSyntax(), parserText, pos);
sourceClassName = parts[0];
if (sourceClassName == null)
throw new OCommandSQLParsingException("Class not found", parserText, pos);
sourceField = parts[1];
pos = nextWord(parserText, parserTextUpperCase, pos, word, true);
if (pos == -1 || !word.toString().equals(KEYWORD_TO))
throw new OCommandSQLParsingException("Keyword " + KEYWORD_TO + " not found. Use " + getSyntax(), parserText, oldPos);
pos = nextWord(parserText, parserTextUpperCase, pos, word, false);
if (pos == -1)
throw new OCommandSQLParsingException("Expected <class>.<property>. Use " + getSyntax(), parserText, pos);
parts = word.toString().split("\\.");
if (parts.length != 2)
throw new OCommandSQLParsingException("Expected <class>.<property>. Use " + getSyntax(), parserText, pos);
destClassName = parts[0];
if (destClassName == null)
throw new OCommandSQLParsingException("Class not found", parserText, pos);
destField = parts[1];
pos = nextWord(parserText, parserTextUpperCase, pos, word, true);
if (pos == -1)
return this;
if (!word.toString().equalsIgnoreCase("INVERSE"))
throw new OCommandSQLParsingException("Missed 'INVERSE'. Use " + getSyntax(), parserText, pos);
inverse = true;
return this;
}
/**
* Execute the CREATE LINK.
*/
public Object execute(final Map<Object, Object> iArgs) {
if (destField == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
final ODatabaseRecord database = getDatabase();
if (!(database.getDatabaseOwner() instanceof ODatabaseDocumentTx))
throw new OCommandSQLParsingException("This command supports only the database type ODatabaseDocumentTx and type '"
+ database.getClass() + "' was found");
final ODatabaseDocumentTx db = (ODatabaseDocumentTx) database.getDatabaseOwner();
final OClass sourceClass = database.getMetadata().getSchema().getClass(sourceClassName);
if (sourceClass == null)
throw new OCommandExecutionException("Source class '" + sourceClassName + "' not found");
final OClass destClass = database.getMetadata().getSchema().getClass(destClassName);
if (destClass == null)
throw new OCommandExecutionException("Destination class '" + destClassName + "' not found");
Object value;
String cmd = "select from ";
if (!ODocumentHelper.ATTRIBUTE_RID.equals(destField)) {
cmd = "select from " + destClassName + " where " + destField + " = ";
}
List<ODocument> result;
ODocument target;
Object oldValue;
long total = 0;
if (linkName == null)
// NO LINK NAME EXPRESSED: OVERWRITE THE SOURCE FIELD
linkName = sourceField;
boolean multipleRelationship;
if (linkType != null)
// DETERMINE BASED ON FORCED TYPE
multipleRelationship = linkType == OType.LINKSET || linkType == OType.LINKLIST;
else
multipleRelationship = false;
long totRecords = db.countClass(sourceClass.getName());
long currRecord = 0;
if (progressListener != null)
progressListener.onBegin(this, totRecords);
database.declareIntent(new OIntentMassiveInsert());
try {
// BROWSE ALL THE RECORDS OF THE SOURCE CLASS
for (ODocument doc : db.browseClass(sourceClass.getName())) {
doc.unpin();
value = doc.field(sourceField);
if (value != null) {
if (value instanceof ODocument || value instanceof ORID) {
// ALREADY CONVERTED
} else if (value instanceof Collection<?>) {
// TODO
} else {
// SEARCH THE DESTINATION RECORD
target = null;
if (!ODocumentHelper.ATTRIBUTE_RID.equals(destField) && value instanceof String)
if (((String) value).length() == 0)
value = null;
else
value = "'" + value + "'";
result = database.<OCommandRequest> command(new OSQLSynchQuery<ODocument>(cmd + value)).execute();
if (result == null || result.size() == 0)
value = null;
else if (result.size() > 1)
throw new OCommandExecutionException("Cannot create link because multiple records was found in class '"
+ destClass.getName() + "' with value " + value + " in field '" + destField + "'");
else {
target = result.get(0);
value = target;
}
if (target != null && inverse) {
// INVERSE RELATIONSHIP
oldValue = target.field(linkName);
if (oldValue != null) {
if (!multipleRelationship)
multipleRelationship = true;
Collection<ODocument> coll;
if (oldValue instanceof Collection) {
// ADD IT IN THE EXISTENT COLLECTION
coll = (Collection<ODocument>) oldValue;
target.setDirty();
} else {
// CREATE A NEW COLLECTION FOR BOTH
coll = new ArrayList<ODocument>(2);
target.field(linkName, coll);
coll.add((ODocument) oldValue);
}
coll.add(doc);
} else {
if (linkType != null)
if (linkType == OType.LINKSET) {
value = new OMVRBTreeRIDSet(target);
((OMVRBTreeRIDSet) value).add(doc);
} else if (linkType == OType.LINKLIST) {
value = new ORecordLazyList(target);
((ORecordLazyList) value).add(doc);
} else
// IGNORE THE TYPE, SET IT AS LINK
value = doc;
else
value = doc;
target.field(linkName, value);
}
target.save();
} else {
// SET THE REFERENCE
doc.field(linkName, value);
doc.save();
}
total++;
}
}
if (progressListener != null)
progressListener.onProgress(this, currRecord, currRecord * 100f / totRecords);
}
if (total > 0) {
if (inverse) {
// REMOVE THE OLD PROPERTY IF ANY
OProperty prop = destClass.getProperty(linkName);
if (prop != null)
destClass.dropProperty(linkName);
if (linkType == null)
linkType = multipleRelationship ? OType.LINKSET : OType.LINK;
// CREATE THE PROPERTY
destClass.createProperty(linkName, linkType, sourceClass);
} else {
// REMOVE THE OLD PROPERTY IF ANY
OProperty prop = sourceClass.getProperty(linkName);
if (prop != null)
sourceClass.dropProperty(linkName);
// CREATE THE PROPERTY
sourceClass.createProperty(linkName, OType.LINK, destClass);
}
}
if (progressListener != null)
progressListener.onCompletition(this, true);
} catch (Exception e) {
if (progressListener != null)
progressListener.onCompletition(this, false);
throw new OCommandExecutionException("Error on creation of links", e);
} finally {
database.declareIntent(null);
}
return total;
}
@Override
public String getSyntax() {
return "CREATE LINK <link-name> [TYPE <link-type>] FROM <source-class>.<source-property> TO <destination-class>.<destination-property> [INVERSE]";
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLCreateLink.java |
255 | public class EmailTargetImpl implements EmailTarget {
private static final long serialVersionUID = 1L;
protected String[] bccAddresses;
protected String[] ccAddresses;
protected String emailAddress;
/*
* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTarget#getBCCAddresses()
*/
public String[] getBCCAddresses() {
return bccAddresses;
}
/*
* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTarget#getCCAddresses()
*/
public String[] getCCAddresses() {
return ccAddresses;
}
/*
* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTarget#getEmailAddress()
*/
public String getEmailAddress() {
return emailAddress;
}
/*
* (non-Javadoc)
* @see
* org.broadleafcommerce.common.email.domain.EmailTarget#setBCCAddresses(java.lang
* .String[])
*/
public void setBCCAddresses(String[] bccAddresses) {
this.bccAddresses = bccAddresses;
}
/*
* (non-Javadoc)
* @see
* org.broadleafcommerce.common.email.domain.EmailTarget#setCCAddresses(java.lang
* .String[])
*/
public void setCCAddresses(String[] ccAddresses) {
this.ccAddresses = ccAddresses;
}
/*
* (non-Javadoc)
* @see
* org.broadleafcommerce.common.email.domain.EmailTarget#setEmailAddress(java.lang
* .String)
*/
public void setEmailAddress(String emailAddress) {
this.emailAddress = emailAddress;
}
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + Arrays.hashCode(bccAddresses);
result = prime * result + Arrays.hashCode(ccAddresses);
result = prime * result + ((emailAddress == null) ? 0 : emailAddress.hashCode());
return result;
}
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EmailTargetImpl other = (EmailTargetImpl) obj;
if (!Arrays.equals(bccAddresses, other.bccAddresses))
return false;
if (!Arrays.equals(ccAddresses, other.ccAddresses))
return false;
if (emailAddress == null) {
if (other.emailAddress != null)
return false;
} else if (!emailAddress.equals(other.emailAddress))
return false;
return true;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_email_domain_EmailTargetImpl.java |
758 | public class ListIndexOfOperation extends CollectionOperation {
private boolean last;
private Data value;
public ListIndexOfOperation() {
}
public ListIndexOfOperation(String name, boolean last, Data value) {
super(name);
this.last = last;
this.value = value;
}
@Override
public int getId() {
return CollectionDataSerializerHook.LIST_INDEX_OF;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
response = getOrCreateListContainer().indexOf(last, value);
}
@Override
public void afterRun() throws Exception {
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeBoolean(last);
value.writeData(out);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
last = in.readBoolean();
value = new Data();
value.readData(in);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_list_ListIndexOfOperation.java |
166 | return executeRead(new Callable<List<String>>() {
@Override
public List<String> call() throws Exception {
return indexTx.query(query);
}
@Override
public String toString() {
return "IndexQuery";
}
}); | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_BackendTransaction.java |
180 | public class OByteBufferUtilsTest {
private ByteBuffer buffer1;
private ByteBuffer buffer2;
@BeforeMethod
public void setUp() throws Exception {
buffer1 = ByteBuffer.allocate(10);
buffer2 = ByteBuffer.allocate(10);
}
@Test
public void testSplitShort() throws Exception {
short var = 42;
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitShortToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
short storedVar = OByteBufferUtils.mergeShortFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 251;
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitShortToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeShortFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSplitLong() throws Exception {
long var = 42;
buffer1.position(3);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(3);
buffer2.position(0);
long storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(4);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(4);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(5);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(5);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(6);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(6);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 2512513332512512344l;
buffer1.position(3);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(3);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(4);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(4);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(5);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(5);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(6);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(6);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitLongToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeLongFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSplitInt() throws Exception {
int var = 42;
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
int storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
var = 251251333;
buffer1.position(7);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(7);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(8);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(8);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
buffer1.position(9);
buffer2.position(0);
OByteBufferUtils.splitIntToBuffers(buffer1, buffer2, var);
buffer1.position(9);
buffer2.position(0);
storedVar = OByteBufferUtils.mergeIntFromBuffers(buffer1, buffer2);
assertEquals(storedVar, var);
}
@Test
public void testSpecialSplitShort() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 1);
ByteBuffer part2 = ByteBuffer.wrap(array, 1, 1);
ByteBuffer all = ByteBuffer.wrap(array, 0, 2);
short value = Short.MAX_VALUE;
OByteBufferUtils.splitShortToBuffers(part1, part2, value);
all.position(0);
short storedValue = all.getShort();
assertEquals(value, storedValue);
}
@Test
public void testSpecialSplitInteger() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 2);
ByteBuffer part2 = ByteBuffer.wrap(array, 2, 2);
ByteBuffer all = ByteBuffer.wrap(array, 0, 4);
int value = Integer.MAX_VALUE;
OByteBufferUtils.splitIntToBuffers(part1, part2, value);
all.position(0);
int storedValue = all.getInt();
assertEquals(value, storedValue);
}
@Test
public void testSpecialSplitLong() throws Exception {
byte[] array = new byte[10];
ByteBuffer part1 = ByteBuffer.wrap(array, 0, 4);
ByteBuffer part2 = ByteBuffer.wrap(array, 4, 4);
ByteBuffer all = ByteBuffer.wrap(array, 0, 8);
long value = Long.MAX_VALUE;
OByteBufferUtils.splitLongToBuffers(part1, part2, value);
all.position(0);
long storedValue = all.getLong();
assertEquals(value, storedValue);
}
} | 0true
| core_src_test_java_com_orientechnologies_common_util_OByteBufferUtilsTest.java |
476 | public class GetAliasesRequest extends MasterNodeReadOperationRequest<GetAliasesRequest> {
private String[] indices = Strings.EMPTY_ARRAY;
private String[] aliases = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.strict();
public GetAliasesRequest(String[] aliases) {
this.aliases = aliases;
}
public GetAliasesRequest(String alias) {
this.aliases = new String[]{alias};
}
public GetAliasesRequest() {
}
public GetAliasesRequest indices(String... indices) {
this.indices = indices;
return this;
}
public GetAliasesRequest aliases(String... aliases) {
this.aliases = aliases;
return this;
}
public GetAliasesRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
public String[] indices() {
return indices;
}
public String[] aliases() {
return aliases;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
aliases = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readLocal(in, Version.V_1_0_0_RC2);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
out.writeStringArray(aliases);
indicesOptions.writeIndicesOptions(out);
writeLocal(out, Version.V_1_0_0_RC2);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_alias_get_GetAliasesRequest.java |
1,857 | instance.executeTransaction(new TransactionalTask<Boolean>() {
public Boolean execute(TransactionalTaskContext context) throws TransactionException {
final TransactionalMap<String, Integer> txMap = context.getMap("default");
Integer value = txMap.getForUpdate(key);
txMap.put(key, value + 1);
return true;
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapTransactionStressTest.java |
212 | public final class CustomPostingsHighlighter extends XPostingsHighlighter {
private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
private static final Passage[] EMPTY_PASSAGE = new Passage[0];
private final CustomPassageFormatter passageFormatter;
private final int noMatchSize;
private final int totalContentLength;
private final String[] fieldValues;
private final int[] fieldValuesOffsets;
private int currentValueIndex = 0;
private BreakIterator breakIterator;
public CustomPostingsHighlighter(CustomPassageFormatter passageFormatter, List<Object> fieldValues, boolean mergeValues, int maxLength, int noMatchSize) {
super(maxLength);
this.passageFormatter = passageFormatter;
this.noMatchSize = noMatchSize;
if (mergeValues) {
String rawValue = Strings.collectionToDelimitedString(fieldValues, String.valueOf(getMultiValuedSeparator("")));
String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
this.fieldValues = new String[]{fieldValue};
this.fieldValuesOffsets = new int[]{0};
this.totalContentLength = fieldValue.length();
} else {
this.fieldValues = new String[fieldValues.size()];
this.fieldValuesOffsets = new int[fieldValues.size()];
int contentLength = 0;
int offset = 0;
int previousLength = -1;
for (int i = 0; i < fieldValues.size(); i++) {
String rawValue = fieldValues.get(i).toString();
String fieldValue = rawValue.substring(0, Math.min(rawValue.length(), maxLength));
this.fieldValues[i] = fieldValue;
contentLength += fieldValue.length();
offset += previousLength + 1;
this.fieldValuesOffsets[i] = offset;
previousLength = fieldValue.length();
}
this.totalContentLength = contentLength;
}
}
/*
Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object
*/
public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException {
IndexReader reader = searcher.getIndexReader();
IndexReaderContext readerContext = reader.getContext();
List<AtomicReaderContext> leaves = readerContext.leaves();
String[] contents = new String[]{loadCurrentFieldValue()};
Map<Integer, Object> snippetsMap = highlightField(field, contents, getBreakIterator(field), terms, new int[]{docId}, leaves, maxPassages);
//increment the current value index so that next time we'll highlight the next value if available
currentValueIndex++;
Object snippetObject = snippetsMap.get(docId);
if (snippetObject != null && snippetObject instanceof Snippet[]) {
return (Snippet[]) snippetObject;
}
return EMPTY_SNIPPET;
}
/*
Method provided through our own fork: allows to do proper scoring when doing per value discrete highlighting.
Used to provide the total length of the field (all values) for proper scoring.
*/
@Override
protected int getContentLength(String field, int docId) {
return totalContentLength;
}
/*
Method provided through our own fork: allows to perform proper per value discrete highlighting.
Used to provide the offset for the current value.
*/
@Override
protected int getOffsetForCurrentValue(String field, int docId) {
if (currentValueIndex < fieldValuesOffsets.length) {
return fieldValuesOffsets[currentValueIndex];
}
throw new IllegalArgumentException("No more values offsets to return");
}
public void setBreakIterator(BreakIterator breakIterator) {
this.breakIterator = breakIterator;
}
@Override
protected PassageFormatter getFormatter(String field) {
return passageFormatter;
}
@Override
protected BreakIterator getBreakIterator(String field) {
if (breakIterator == null) {
return super.getBreakIterator(field);
}
return breakIterator;
}
@Override
protected char getMultiValuedSeparator(String field) {
//U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting
return HighlightUtils.PARAGRAPH_SEPARATOR;
}
/*
By default the postings highlighter returns non highlighted snippet when there are no matches.
We want to return no snippets by default, unless no_match_size is greater than 0
*/
@Override
protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
if (noMatchSize > 0) {
//we want to return the first sentence of the first snippet only
return super.getEmptyHighlight(fieldName, bi, 1);
}
return EMPTY_PASSAGE;
}
/*
Not needed since we call our own loadCurrentFieldValue explicitly, but we override it anyway for consistency.
*/
@Override
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
return new String[][]{new String[]{loadCurrentFieldValue()}};
}
/*
Our own method that returns the field values, which relies on the content that was provided when creating the highlighter.
Supports per value discrete highlighting calling the highlightDoc method multiple times, one per value.
*/
protected String loadCurrentFieldValue() {
if (currentValueIndex < fieldValues.length) {
return fieldValues[currentValueIndex];
}
throw new IllegalArgumentException("No more values to return");
}
} | 0true
| src_main_java_org_apache_lucene_search_postingshighlight_CustomPostingsHighlighter.java |
1,480 | public class OSQLFunctionOutE extends OSQLFunctionMove {
public static final String NAME = "outE";
public OSQLFunctionOutE() {
super(NAME, 0, 1);
}
@Override
protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) {
return v2e(graph, iRecord, Direction.OUT, iLabels);
}
} | 1no label
| graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionOutE.java |
1,804 | return new Predicate<String, String>() {
@Override
public boolean apply(Map.Entry<String, String> mapEntry) {
return false;
}
}; | 0true
| hazelcast_src_test_java_com_hazelcast_map_ListenerTest.java |
1,082 | public interface OrderItemService {
public OrderItem readOrderItemById(Long orderItemId);
public OrderItem saveOrderItem(OrderItem orderItem);
public void delete(OrderItem item);
public PersonalMessage createPersonalMessage();
public DiscreteOrderItem createDiscreteOrderItem(DiscreteOrderItemRequest itemRequest);
public DiscreteOrderItem createDynamicPriceDiscreteOrderItem(final DiscreteOrderItemRequest itemRequest, @SuppressWarnings("rawtypes") HashMap skuPricingConsiderations);
public GiftWrapOrderItem createGiftWrapOrderItem(GiftWrapOrderItemRequest itemRequest);
/**
* Used to create "manual" product bundles. Manual product bundles are primarily designed
* for grouping items in the cart display. Typically ProductBundle will be used to
* achieve non programmer related bundles.
*
*
* @param itemRequest
* @return
*/
public BundleOrderItem createBundleOrderItem(BundleOrderItemRequest itemRequest);
public BundleOrderItem createBundleOrderItem(ProductBundleOrderItemRequest itemRequest);
/**
* Creates an OrderItemRequestDTO object that most closely resembles the given OrderItem.
* That is, it will copy the SKU and quantity and attempt to copy the product and category
* if they exist.
*
* @param item the item to copy
* @return the OrderItemRequestDTO that mirrors the item
*/
public OrderItemRequestDTO buildOrderItemRequestDTOFromOrderItem(OrderItem item);
public OrderItem createOrderItem(OrderItemRequest itemRequest);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_OrderItemService.java |
1,807 | class ConstantFactory<T> implements InternalFactory<T> {
private final Initializable<T> initializable;
public ConstantFactory(Initializable<T> initializable) {
this.initializable = initializable;
}
public T get(Errors errors, InternalContext context, Dependency dependency)
throws ErrorsException {
return initializable.get(errors);
}
public String toString() {
return new ToStringBuilder(ConstantFactory.class)
.add("value", initializable)
.toString();
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_ConstantFactory.java |
540 | public class CreateTransactionRequest extends BaseTransactionRequest implements SecureRequest {
private TransactionOptions options;
private SerializableXID sXid;
public CreateTransactionRequest() {
}
public CreateTransactionRequest(TransactionOptions options, SerializableXID sXid) {
this.options = options;
this.sXid = sXid;
}
@Override
public Object innerCall() throws Exception {
ClientEngineImpl clientEngine = getService();
ClientEndpoint endpoint = getEndpoint();
TransactionManagerServiceImpl transactionManager =
(TransactionManagerServiceImpl) clientEngine.getTransactionManagerService();
TransactionContext context = transactionManager.newClientTransactionContext(options, endpoint.getUuid());
if (sXid != null) {
Transaction transaction = TransactionAccessor.getTransaction(context);
transactionManager.addManagedTransaction(sXid, transaction);
}
context.beginTransaction();
endpoint.setTransactionContext(context);
return context.getTxnId();
}
@Override
public String getServiceName() {
return ClientEngineImpl.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return ClientTxnPortableHook.F_ID;
}
@Override
public int getClassId() {
return ClientTxnPortableHook.CREATE;
}
@Override
public void write(PortableWriter writer) throws IOException {
super.write(writer);
ObjectDataOutput out = writer.getRawDataOutput();
options.writeData(out);
out.writeBoolean(sXid != null);
if (sXid != null) {
sXid.writeData(out);
}
}
@Override
public void read(PortableReader reader) throws IOException {
super.read(reader);
ObjectDataInput in = reader.getRawDataInput();
options = new TransactionOptions();
options.readData(in);
boolean sXidNotNull = in.readBoolean();
if (sXidNotNull) {
sXid = new SerializableXID();
sXid.readData(in);
}
}
@Override
public Permission getRequiredPermission() {
return new TransactionPermission();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_txn_CreateTransactionRequest.java |
146 | class FileChannelWithChoppyDisk extends StoreFileChannel
{
ByteBuffer buff = ByteBuffer.allocate(1024);
private int chunkSize;
public FileChannelWithChoppyDisk(int writeThisMuchAtATime)
{
super( (FileChannel) null );
this.chunkSize = writeThisMuchAtATime;
}
@Override
public int write( ByteBuffer byteBuffer, long l ) throws IOException
{
int bytesToWrite = chunkSize > (byteBuffer.limit() - byteBuffer.position()) ? byteBuffer.limit() - byteBuffer.position() : chunkSize;
buff.position( (int)l );
// Remember original limit
int originalLimit = byteBuffer.limit();
// Set limit to not be bigger than chunk size
byteBuffer.limit(byteBuffer.position() + bytesToWrite);
// Write
buff.put( byteBuffer );
// Restore limit
byteBuffer.limit(originalLimit);
return bytesToWrite;
}
@Override
public long position() throws IOException
{
return buff.position();
}
@Override
public StoreFileChannel position( long l ) throws IOException
{
buff.position( (int) l );
return this;
}
@Override
public long size() throws IOException
{
return buff.capacity();
}
@Override
public StoreFileChannel truncate( long l ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public void force( boolean b ) throws IOException { }
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_TestDirectMappedLogBuffer.java |
449 | public class ClientReplicatedMapTest
extends HazelcastTestSupport {
@After
public void cleanup() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testAddObjectDelay0()
throws Exception {
testAdd(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testAddObjectDelayDefault()
throws Exception {
testAdd(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testAddBinaryDelay0()
throws Exception {
testAdd(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testAddBinaryDelayDefault()
throws Exception {
testAdd(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testAdd(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<String, String> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<String, String> map2 = instance2.getReplicatedMap("default");
final int operations = 100;
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map1.put("foo-" + i, "bar");
}
}
}, 60, EntryEventType.ADDED, operations, 0.75, map1, map2);
for (Map.Entry<String, String> entry : map2.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
for (Map.Entry<String, String> entry : map1.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
}
@Test
public void testClearObjectDelay0()
throws Exception {
testClear(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testClearObjectDelayDefault()
throws Exception {
testClear(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testClearBinaryDelay0()
throws Exception {
testClear(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testClearBinaryDelayDefault()
throws Exception {
testClear(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testClear(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<String, String> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<String, String> map2 = instance2.getReplicatedMap("default");
final int operations = 100;
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map1.put("foo-" + i, "bar");
}
}
}, 60, EntryEventType.ADDED, operations, 0.75, map1, map2);
for (Map.Entry<String, String> entry : map2.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
for (Map.Entry<String, String> entry : map1.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
// TODO Should clear be a sychronous operation? What happens on lost clear message?
final AtomicBoolean happened = new AtomicBoolean(false);
for (int i = 0; i < 10; i++) {
map1.clear();
Thread.sleep(1000);
try {
assertEquals(0, map1.size());
assertEquals(0, map2.size());
happened.set(true);
} catch (AssertionError ignore) {
// ignore and retry
}
if (happened.get()) {
break;
}
}
}
@Test
public void testUpdateObjectDelay0()
throws Exception {
testUpdate(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testUpdateObjectDelayDefault()
throws Exception {
testUpdate(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testUpdateBinaryDelay0()
throws Exception {
testUpdate(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testUpdateBinaryDelayDefault()
throws Exception {
testUpdate(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testUpdate(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<String, String> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<String, String> map2 = instance2.getReplicatedMap("default");
final int operations = 100;
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map1.put("foo-" + i, "bar");
}
}
}, 60, EntryEventType.ADDED, operations, 0.75, map1, map2);
for (Map.Entry<String, String> entry : map2.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
for (Map.Entry<String, String> entry : map1.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map2.put("foo-" + i, "bar2");
}
}
}, 60, EntryEventType.UPDATED, operations, 0.75, map1, map2);
int map2Updated = 0;
for (Map.Entry<String, String> entry : map2.entrySet()) {
if ("bar2".equals(entry.getValue())) {
map2Updated++;
}
}
int map1Updated = 0;
for (Map.Entry<String, String> entry : map1.entrySet()) {
if ("bar2".equals(entry.getValue())) {
map1Updated++;
}
}
assertMatchSuccessfulOperationQuota(0.75, operations, map1Updated, map2Updated);
}
@Test
public void testRemoveObjectDelay0()
throws Exception {
testRemove(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testRemoveObjectDelayDefault()
throws Exception {
testRemove(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testRemoveBinaryDelay0()
throws Exception {
testRemove(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testRemoveBinaryDelayDefault()
throws Exception {
testRemove(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testRemove(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<String, String> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<String, String> map2 = instance2.getReplicatedMap("default");
final int operations = 100;
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map1.put("foo-" + i, "bar");
}
}
}, 60, EntryEventType.ADDED, operations, 0.75, map1, map2);
for (Map.Entry<String, String> entry : map2.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
for (Map.Entry<String, String> entry : map1.entrySet()) {
assertStartsWith("foo-", entry.getKey());
assertEquals("bar", entry.getValue());
}
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map2.remove("foo-" + i);
}
}
}, 60, EntryEventType.REMOVED, operations, 0.75, map1, map2);
int map2Updated = 0;
for (int i = 0; i < operations; i++) {
Object value = map2.get("foo-" + i);
if (value == null) {
map2Updated++;
}
}
int map1Updated = 0;
for (int i = 0; i < operations; i++) {
Object value = map1.get("foo-" + i);
if (value == null) {
map1Updated++;
}
}
assertMatchSuccessfulOperationQuota(0.75, operations, map1Updated, map2Updated);
}
@Test
public void testSizeObjectDelay0()
throws Exception {
testSize(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testSizeObjectDelayDefault()
throws Exception {
testSize(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testSizeBinaryDelay0()
throws Exception {
testSize(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testSizeBinaryDelayDefault()
throws Exception {
testSize(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testSize(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<Integer, Integer> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<Integer, Integer> map2 = instance2.getReplicatedMap("default");
final AbstractMap.SimpleEntry<Integer, Integer>[] testValues = buildTestValues();
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
int half = testValues.length / 2;
for (int i = 0; i < testValues.length; i++) {
final ReplicatedMap map = i < half ? map1 : map2;
final AbstractMap.SimpleEntry<Integer, Integer> entry = testValues[i];
map.put(entry.getKey(), entry.getValue());
}
}
}, 2, EntryEventType.ADDED, 100, 0.75, map1, map2);
assertMatchSuccessfulOperationQuota(0.75, map1.size(), map2.size());
}
@Test
public void testContainsKeyObjectDelay0()
throws Exception {
testContainsKey(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testContainsKeyObjectDelayDefault()
throws Exception {
testContainsKey(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testContainsKeyBinaryDelay0()
throws Exception {
testContainsKey(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testContainsKeyBinaryDelayDefault()
throws Exception {
testContainsKey(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testContainsKey(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<String, String> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<String, String> map2 = instance2.getReplicatedMap("default");
final int operations = 100;
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map1.put("foo-" + i, "bar");
}
}
}, 60, EntryEventType.ADDED, operations, 0.75, map1, map2);
int map2Contains = 0;
for (int i = 0; i < operations; i++) {
if (map2.containsKey("foo-" + i)) {
map2Contains++;
}
}
int map1Contains = 0;
for (int i = 0; i < operations; i++) {
if (map1.containsKey("foo-" + i)) {
map1Contains++;
}
}
assertMatchSuccessfulOperationQuota(0.75, operations, map1Contains, map2Contains);
}
@Test
public void testContainsValueObjectDelay0()
throws Exception {
testContainsValue(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testContainsValueObjectDelayDefault()
throws Exception {
testContainsValue(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testContainsValueBinaryDelay0()
throws Exception {
testContainsValue(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testContainsValueBinaryDelayDefault()
throws Exception {
testContainsValue(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testContainsValue(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<Integer, Integer> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<Integer, Integer> map2 = instance2.getReplicatedMap("default");
final AbstractMap.SimpleEntry<Integer, Integer>[] testValues = buildTestValues();
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
int half = testValues.length / 2;
for (int i = 0; i < testValues.length; i++) {
final ReplicatedMap map = i < half ? map1 : map2;
final AbstractMap.SimpleEntry<Integer, Integer> entry = testValues[i];
map.put(entry.getKey(), entry.getValue());
}
}
}, 2, EntryEventType.ADDED, testValues.length, 0.75, map1, map2);
int map2Contains = 0;
for (int i = 0; i < testValues.length; i++) {
if (map2.containsValue(testValues[i].getValue())) {
map2Contains++;
}
}
int map1Contains = 0;
for (int i = 0; i < testValues.length; i++) {
if (map1.containsValue(testValues[i].getValue())) {
map1Contains++;
}
}
assertMatchSuccessfulOperationQuota(0.75, testValues.length, map1Contains, map2Contains);
}
@Test
public void testValuesObjectDelay0()
throws Exception {
testValues(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testValuesObjectDelayDefault()
throws Exception {
testValues(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testValuesBinaryDelay0()
throws Exception {
testValues(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testValuesBinaryDefault()
throws Exception {
testValues(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testValues(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<Integer, Integer> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<Integer, Integer> map2 = instance2.getReplicatedMap("default");
final AbstractMap.SimpleEntry<Integer, Integer>[] testValues = buildTestValues();
final List<Integer> valuesTestValues = new ArrayList<Integer>(testValues.length);
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
int half = testValues.length / 2;
for (int i = 0; i < testValues.length; i++) {
final ReplicatedMap map = i < half ? map1 : map2;
final AbstractMap.SimpleEntry<Integer, Integer> entry = testValues[i];
map.put(entry.getKey(), entry.getValue());
valuesTestValues.add(entry.getValue());
}
}
}, 2, EntryEventType.ADDED, 100, 0.75, map1, map2);
List<Integer> values1 = new ArrayList<Integer>(map1.values());
List<Integer> values2 = new ArrayList<Integer>(map2.values());
int map1Contains = 0;
int map2Contains = 0;
for (Integer value : valuesTestValues) {
if (values2.contains(value)) {
map2Contains++;
}
if (values1.contains(value)) {
map1Contains++;
}
}
assertMatchSuccessfulOperationQuota(0.75, testValues.length, map1Contains, map2Contains);
}
@Test
public void testKeySetObjectDelay0()
throws Exception {
testKeySet(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testKeySetObjectDelayDefault()
throws Exception {
testKeySet(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testKeySetBinaryDelay0()
throws Exception {
testKeySet(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testKeySetBinaryDelayDefault()
throws Exception {
testKeySet(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testKeySet(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<Integer, Integer> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<Integer, Integer> map2 = instance2.getReplicatedMap("default");
final AbstractMap.SimpleEntry<Integer, Integer>[] testValues = buildTestValues();
final List<Integer> keySetTestValues = new ArrayList<Integer>(testValues.length);
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
int half = testValues.length / 2;
for (int i = 0; i < testValues.length; i++) {
final ReplicatedMap map = i < half ? map1 : map2;
final AbstractMap.SimpleEntry<Integer, Integer> entry = testValues[i];
map.put(entry.getKey(), entry.getValue());
keySetTestValues.add(entry.getKey());
}
}
}, 2, EntryEventType.ADDED, 100, 0.75, map1, map2);
List<Integer> keySet1 = new ArrayList<Integer>(map1.keySet());
List<Integer> keySet2 = new ArrayList<Integer>(map2.keySet());
int map1Contains = 0;
int map2Contains = 0;
for (Integer value : keySetTestValues) {
if (keySet2.contains(value)) {
map2Contains++;
}
if (keySet1.contains(value)) {
map1Contains++;
}
}
assertMatchSuccessfulOperationQuota(0.75, testValues.length, map1Contains, map2Contains);
}
@Test
public void testEntrySetObjectDelay0()
throws Exception {
testEntrySet(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testEntrySetObjectDelayDefault()
throws Exception {
testEntrySet(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testEntrySetBinaryDelay0()
throws Exception {
testEntrySet(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testEntrySetBinaryDelayDefault()
throws Exception {
testEntrySet(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testEntrySet(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
final ReplicatedMap<Integer, Integer> map1 = instance1.getReplicatedMap("default");
final ReplicatedMap<Integer, Integer> map2 = instance2.getReplicatedMap("default");
final AbstractMap.SimpleEntry<Integer, Integer>[] testValues = buildTestValues();
final List<AbstractMap.SimpleEntry<Integer, Integer>> entrySetTestValues = Arrays.asList(testValues);
WatchedOperationExecutor executor = new WatchedOperationExecutor();
executor.execute(new Runnable() {
@Override
public void run() {
int half = testValues.length / 2;
for (int i = 0; i < testValues.length; i++) {
final ReplicatedMap map = i < half ? map1 : map2;
final AbstractMap.SimpleEntry<Integer, Integer> entry = testValues[i];
map.put(entry.getKey(), entry.getValue());
}
}
}, 2, EntryEventType.ADDED, 100, 0.75, map1, map2);
List<Entry<Integer, Integer>> entrySet1 = new ArrayList<Entry<Integer, Integer>>(map1.entrySet());
List<Entry<Integer, Integer>> entrySet2 = new ArrayList<Entry<Integer, Integer>>(map2.entrySet());
int map2Contains = 0;
for (Entry<Integer, Integer> entry : entrySet2) {
Integer value = findValue(entry.getKey(), testValues);
if (value.equals(entry.getValue())) {
map2Contains++;
}
}
int map1Contains = 0;
for (Entry<Integer, Integer> entry : entrySet1) {
Integer value = findValue(entry.getKey(), testValues);
if (value.equals(entry.getValue())) {
map1Contains++;
}
}
assertMatchSuccessfulOperationQuota(0.75, testValues.length, map1Contains, map2Contains);
}
@Test
public void testRetrieveUnknownValueObjectDelay0()
throws Exception {
testRetrieveUnknownValue(buildConfig(InMemoryFormat.OBJECT, 0));
}
@Test
public void testRetrieveUnknownValueObjectDelayDefault()
throws Exception {
testRetrieveUnknownValue(buildConfig(InMemoryFormat.OBJECT, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
@Test
public void testRetrieveUnknownValueBinaryDelay0()
throws Exception {
testRetrieveUnknownValue(buildConfig(InMemoryFormat.BINARY, 0));
}
@Test
public void testRetrieveUnknownValueBinaryDelayDefault()
throws Exception {
testRetrieveUnknownValue(buildConfig(InMemoryFormat.BINARY, ReplicatedMapConfig.DEFAULT_REPLICATION_DELAY_MILLIS));
}
private void testRetrieveUnknownValue(Config config)
throws Exception {
HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
HazelcastInstance instance2 = HazelcastClient.newHazelcastClient();
ReplicatedMap<String, String> map = instance2.getReplicatedMap("default");
String value = map.get("foo");
assertNull(value);
}
private Config buildConfig(InMemoryFormat inMemoryFormat, long replicationDelay) {
Config config = new Config();
ReplicatedMapConfig replicatedMapConfig = config.getReplicatedMapConfig("default");
replicatedMapConfig.setReplicationDelayMillis(replicationDelay);
replicatedMapConfig.setInMemoryFormat(inMemoryFormat);
return config;
}
private Integer findValue(int key, AbstractMap.SimpleEntry<Integer, Integer>[] values) {
for (int i = 0; i < values.length; i++) {
if (values[i].getKey().equals(key)) {
return values[i].getValue();
}
}
return null;
}
private void assertMatchSuccessfulOperationQuota(double quota, int completeOps, int... values) {
float[] quotas = new float[values.length];
Object[] args = new Object[values.length + 1];
args[0] = quota;
for (int i = 0; i < values.length; i++) {
quotas[i] = (float) values[i] / completeOps;
args[i + 1] = new Float(quotas[i]);
}
boolean success = true;
for (int i = 0; i < values.length; i++) {
if (quotas[i] < quota) {
success = false;
break;
}
}
if (!success) {
StringBuilder sb = new StringBuilder("Quote (%s) for updates not reached,");
for (int i = 0; i < values.length; i++) {
sb.append(" map").append(i + 1).append(": %s,");
}
sb.deleteCharAt(sb.length() - 1);
fail(String.format(sb.toString(), args));
}
}
private AbstractMap.SimpleEntry<Integer, Integer>[] buildTestValues() {
Random random = new Random();
AbstractMap.SimpleEntry<Integer, Integer>[] testValues = new AbstractMap.SimpleEntry[100];
for (int i = 0; i < testValues.length; i++) {
testValues[i] = new AbstractMap.SimpleEntry<Integer, Integer>(random.nextInt(), random.nextInt());
}
return testValues;
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_replicatedmap_ClientReplicatedMapTest.java |
5 | public class BrowserInformationControl extends AbstractInformationControl
implements IInformationControlExtension2, IDelayedInputChangeProvider {
/**
* Tells whether the SWT Browser widget and hence this information
* control is available.
*
* @param parent the parent component used for checking or <code>null</code> if none
* @return <code>true</code> if this control is available
*/
public static boolean isAvailable(Composite parent) {
if (!fgAvailabilityChecked) {
try {
Browser browser= new Browser(parent, SWT.NONE);
browser.dispose();
fgIsAvailable= true;
Slider sliderV= new Slider(parent, SWT.VERTICAL);
Slider sliderH= new Slider(parent, SWT.HORIZONTAL);
int width= sliderV.computeSize(SWT.DEFAULT, SWT.DEFAULT).x;
int height= sliderH.computeSize(SWT.DEFAULT, SWT.DEFAULT).y;
fgScrollBarSize= new Point(width, height);
sliderV.dispose();
sliderH.dispose();
} catch (SWTError er) {
fgIsAvailable= false;
} finally {
fgAvailabilityChecked= true;
}
}
return fgIsAvailable;
}
/**
* Minimal size constraints.
* @since 3.2
*/
private static final int MIN_WIDTH= 80;
private static final int MIN_HEIGHT= 50;
/**
* Availability checking cache.
*/
private static boolean fgIsAvailable= false;
private static boolean fgAvailabilityChecked= false;
/**
* Cached scroll bar width and height
* @since 3.4
*/
private static Point fgScrollBarSize;
/** The control's browser widget */
private Browser fBrowser;
/** Tells whether the browser has content */
private boolean fBrowserHasContent;
/** Text layout used to approximate size of content when rendered in browser */
private TextLayout fTextLayout;
/** Bold text style */
private TextStyle fBoldStyle;
private BrowserInput fInput;
/**
* <code>true</code> iff the browser has completed loading of the last
* input set via {@link #setInformation(String)}.
* @since 3.4
*/
private boolean fCompleted= false;
/**
* The listener to be notified when a delayed location changing event happened.
* @since 3.4
*/
private IInputChangedListener fDelayedInputChangeListener;
/**
* The listeners to be notified when the input changed.
* @since 3.4
*/
private ListenerList/*<IInputChangedListener>*/fInputChangeListeners= new ListenerList(ListenerList.IDENTITY);
/**
* The symbolic name of the font used for size computations, or <code>null</code> to use dialog font.
* @since 3.4
*/
private final String fSymbolicFontName;
/**
* Creates a browser information control with the given shell as parent.
*
* @param parent the parent shell
* @param symbolicFontName the symbolic name of the font used for size computations
* @param resizable <code>true</code> if the control should be resizable
* @since 3.4
*/
public BrowserInformationControl(Shell parent, String symbolicFontName,
boolean resizable) {
super(parent, resizable);
fSymbolicFontName= symbolicFontName;
create();
}
/**
* Creates a browser information control with the given shell as parent.
*
* @param parent the parent shell
* @param symbolicFontName the symbolic name of the font used for size computations
* @param statusFieldText the text to be used in the optional status field
* or <code>null</code> if the status field should be hidden
* @since 3.4
*/
public BrowserInformationControl(Shell parent, String symbolicFontName,
String statusFieldText) {
super(parent, statusFieldText);
fSymbolicFontName= symbolicFontName;
create();
}
/**
* Creates a browser information control with the given shell as parent.
*
* @param parent the parent shell
* @param symbolicFontName the symbolic name of the font used for size computations
* @param toolBarManager the manager or <code>null</code> if toolbar is not desired
* @since 3.4
*/
public BrowserInformationControl(Shell parent, String symbolicFontName,
ToolBarManager toolBarManager) {
super(parent, toolBarManager);
fSymbolicFontName= symbolicFontName;
create();
}
@Override
protected void createContent(Composite parent) {
fBrowser= new Browser(parent, SWT.NONE);
fBrowser.setJavascriptEnabled(false);
Display display= getShell().getDisplay();
fBrowser.setForeground(display.getSystemColor(SWT.COLOR_INFO_FOREGROUND));
fBrowser.setBackground(display.getSystemColor(SWT.COLOR_INFO_BACKGROUND));
//fBrowser.setBackground(color);
fBrowser.addProgressListener(new ProgressAdapter() {
@Override
public void completed(ProgressEvent event) {
fCompleted= true;
}
});
fBrowser.addOpenWindowListener(new OpenWindowListener() {
@Override
public void open(WindowEvent event) {
event.required= true; // Cancel opening of new windows
}
});
// Replace browser's built-in context menu with none
fBrowser.setMenu(new Menu(getShell(), SWT.NONE));
createTextLayout();
}
/**
* {@inheritDoc}
* @deprecated use {@link #setInput(Object)}
*/
@Override
public void setInformation(final String content) {
setInput(new BrowserInput(null) {
@Override
public String getHtml() {
return content;
}
@Override
public String getInputName() {
return "";
}
});
}
/**
* {@inheritDoc} This control can handle {@link String} and
* {@link BrowserInput}.
*/
@Override
public void setInput(Object input) {
Assert.isLegal(input == null ||
input instanceof String ||
input instanceof BrowserInput);
if (input instanceof String) {
setInformation((String)input);
return;
}
fInput= (BrowserInput) input;
String content= null;
if (fInput != null)
content= fInput.getHtml();
fBrowserHasContent= content != null && content.length() > 0;
if (!fBrowserHasContent)
content= "<html><body ></html>"; //$NON-NLS-1$
boolean RTL= (getShell().getStyle() & SWT.RIGHT_TO_LEFT) != 0;
boolean resizable= isResizable();
// The default "overflow:auto" would not result in a predictable width for the client area
// and the re-wrapping would cause visual noise
String[] styles= null;
if (RTL && resizable)
styles= new String[] { "direction:rtl;", "overflow:scroll;", "word-wrap:break-word;" }; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
else if (RTL && !resizable)
styles= new String[] { "direction:rtl;", "overflow:hidden;", "word-wrap:break-word;" }; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
else if (!resizable)
//XXX: In IE, "word-wrap: break-word;" causes bogus wrapping even in non-broken words :-(see e.g. Javadoc of String).
// Re-check whether we really still need this now that the Javadoc Hover header already sets this style.
styles= new String[] { "overflow:hidden;"/*, "word-wrap: break-word;"*/}; //$NON-NLS-1$
else
styles= new String[] { "overflow:scroll;" }; //$NON-NLS-1$
StringBuilder buffer= new StringBuilder(content);
HTMLPrinter.insertStyles(buffer, styles);
content= buffer.toString();
/*
* XXX: Should add some JavaScript here that shows something like
* "(continued...)" or "..." at the end of the visible area when the page overflowed
* with "overflow:hidden;".
*/
fCompleted= false;
fBrowser.setText(content);
Object[] listeners= fInputChangeListeners.getListeners();
for (int i= 0; i < listeners.length; i++)
((IInputChangedListener)listeners[i]).inputChanged(fInput);
}
@Override
public void setVisible(boolean visible) {
Shell shell= getShell();
if (shell.isVisible() == visible)
return;
if (!visible) {
super.setVisible(false);
setInput(null);
return;
}
/*
* The Browser widget flickers when made visible while it is not completely loaded.
* The fix is to delay the call to setVisible until either loading is completed
* (see ProgressListener in constructor), or a timeout has been reached.
*/
final Display display= shell.getDisplay();
// Make sure the display wakes from sleep after timeout:
display.timerExec(100, new Runnable() {
@Override
public void run() {
fCompleted= true;
}
});
while (!fCompleted) {
// Drive the event loop to process the events required to load the browser widget's contents:
if (!display.readAndDispatch()) {
display.sleep();
}
}
shell= getShell();
if (shell == null || shell.isDisposed())
return;
/*
* Avoids flickering when replacing hovers, especially on Vista in ON_CLICK mode.
* Causes flickering on GTK. Carbon does not care.
*/
if ("win32".equals(SWT.getPlatform())) //$NON-NLS-1$
shell.moveAbove(null);
super.setVisible(true);
}
@Override
public void setSize(int width, int height) {
fBrowser.setRedraw(false); // avoid flickering
try {
super.setSize(width, height);
} finally {
fBrowser.setRedraw(true);
}
}
/**
* Creates and initializes the text layout used
* to compute the size hint.
*
* @since 3.2
*/
private void createTextLayout() {
fTextLayout= new TextLayout(fBrowser.getDisplay());
// Initialize fonts
String symbolicFontName= fSymbolicFontName == null ? JFaceResources.DIALOG_FONT : fSymbolicFontName;
Font font= JFaceResources.getFont(symbolicFontName);
fTextLayout.setFont(font);
fTextLayout.setWidth(-1);
font= JFaceResources.getFontRegistry().getBold(symbolicFontName);
fBoldStyle= new TextStyle(font, null, null);
// Compute and set tab width
fTextLayout.setText(" "); //$NON-NLS-1$
int tabWidth= fTextLayout.getBounds().width;
fTextLayout.setTabs(new int[] { tabWidth });
fTextLayout.setText(""); //$NON-NLS-1$
}
@Override
protected void handleDispose() {
if (fTextLayout != null) {
fTextLayout.dispose();
fTextLayout= null;
}
fBrowser= null;
super.handleDispose();
}
@Override
public Point computeSizeHint() {
Point sizeConstraints = getSizeConstraints();
Rectangle trim = computeTrim();
//FIXME: The HTML2TextReader does not render <p> like a browser.
// Instead of inserting an empty line, it just adds a single line break.
// Furthermore, the indentation of <dl><dd> elements is too small (e.g with a long @see line)
TextPresentation presentation= new TextPresentation();
HTML2TextReader reader= new HTML2TextReader(new StringReader(fInput.getHtml()), presentation);
String text;
try {
text= reader.getString();
} catch (IOException e) {
text= "";
}
finally {
try {
reader.close();
} catch (IOException e) {
e.printStackTrace();
}
}
fTextLayout.setText(text);
fTextLayout.setWidth(sizeConstraints==null ? SWT.DEFAULT : sizeConstraints.x-trim.width);
@SuppressWarnings("unchecked")
Iterator<StyleRange> iter = presentation.getAllStyleRangeIterator();
while (iter.hasNext()) {
StyleRange sr = iter.next();
if (sr.fontStyle == SWT.BOLD)
fTextLayout.setStyle(fBoldStyle, sr.start, sr.start + sr.length);
}
Rectangle bounds = fTextLayout.getBounds(); // does not return minimum width, see https://bugs.eclipse.org/bugs/show_bug.cgi?id=217446
int lineCount = fTextLayout.getLineCount();
int textWidth = 0;
for (int i=0; i<lineCount; i++) {
Rectangle rect = fTextLayout.getLineBounds(i);
int lineWidth = rect.x + rect.width;
if (i==0) {
lineWidth *= 1.25; //to accommodate it is not only bold but also monospace
lineWidth += 20;
}
textWidth = Math.max(textWidth, lineWidth);
}
bounds.width = textWidth;
fTextLayout.setText("");
int minWidth = textWidth;
int minHeight = trim.height + bounds.height;
// Add some air to accommodate for different browser renderings
minWidth += 30;
minHeight += 60;
// Apply max size constraints
if (sizeConstraints!=null) {
if (sizeConstraints.x!=SWT.DEFAULT)
minWidth = Math.min(sizeConstraints.x, minWidth + trim.width);
if (sizeConstraints.y!=SWT.DEFAULT)
minHeight = Math.min(sizeConstraints.y, minHeight);
}
// Ensure minimal size
int width = Math.max(MIN_WIDTH, minWidth);
int height = Math.max(MIN_HEIGHT, minHeight);
return new Point(width, height);
}
@Override
public Rectangle computeTrim() {
Rectangle trim= super.computeTrim();
if (isResizable() && fgScrollBarSize!=null) {
boolean RTL= (getShell().getStyle() & SWT.RIGHT_TO_LEFT) != 0;
if (RTL) {
trim.x-= fgScrollBarSize.x;
}
trim.width+= fgScrollBarSize.x;
trim.height+= fgScrollBarSize.y;
}
return trim;
}
/**
* Adds the listener to the collection of listeners who will be
* notified when the current location has changed or is about to change.
*
* @param listener the location listener
* @since 3.4
*/
public void addLocationListener(LocationListener listener) {
fBrowser.addLocationListener(listener);
}
@Override
public void setForegroundColor(Color foreground) {
super.setForegroundColor(foreground);
fBrowser.setForeground(foreground);
}
@Override
public void setBackgroundColor(Color background) {
super.setBackgroundColor(background);
fBrowser.setBackground(background);
}
@Override
public boolean hasContents() {
return fBrowserHasContent;
}
/**
* Adds a listener for input changes to this input change provider.
* Has no effect if an identical listener is already registered.
*
* @param inputChangeListener the listener to add
* @since 3.4
*/
public void addInputChangeListener(IInputChangedListener inputChangeListener) {
Assert.isNotNull(inputChangeListener);
fInputChangeListeners.add(inputChangeListener);
}
/**
* Removes the given input change listener from this input change provider.
* Has no effect if an identical listener is not registered.
*
* @param inputChangeListener the listener to remove
* @since 3.4
*/
public void removeInputChangeListener(IInputChangedListener inputChangeListener) {
fInputChangeListeners.remove(inputChangeListener);
}
@Override
public void setDelayedInputChangeListener(IInputChangedListener inputChangeListener) {
fDelayedInputChangeListener= inputChangeListener;
}
/**
* Tells whether a delayed input change listener is registered.
*
* @return <code>true</code> iff a delayed input change
* listener is currently registered
* @since 3.4
*/
public boolean hasDelayedInputChangeListener() {
return fDelayedInputChangeListener != null;
}
/**
* Notifies listeners of a delayed input change.
*
* @param newInput the new input, or <code>null</code> to request cancellation
* @since 3.4
*/
public void notifyDelayedInputChange(Object newInput) {
if (fDelayedInputChangeListener != null)
fDelayedInputChangeListener.inputChanged(newInput);
}
@Override
public String toString() {
String style= (getShell().getStyle() & SWT.RESIZE) == 0 ? "fixed" : "resizeable"; //$NON-NLS-1$ //$NON-NLS-2$
return super.toString() + " - style: " + style; //$NON-NLS-1$
}
/**
* @return the current browser input or <code>null</code>
*/
public BrowserInput getInput() {
return fInput;
}
@Override
public Point computeSizeConstraints(int widthInChars, int heightInChars) {
if (fSymbolicFontName == null)
return null;
GC gc= new GC(fBrowser);
Font font= fSymbolicFontName == null ? JFaceResources.getDialogFont() : JFaceResources.getFont(fSymbolicFontName);
gc.setFont(font);
int width= gc.getFontMetrics().getAverageCharWidth();
int height= gc.getFontMetrics().getHeight();
gc.dispose();
return new Point(widthInChars * width, heightInChars * height);
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInformationControl.java |
89 | private static class DeadlockProneTransactionState extends WritableTransactionState
{
private final DoubleLatch latch;
public DeadlockProneTransactionState( LockManager lockManager, NodeManager nodeManager,
Logging logging, javax.transaction.Transaction tx, RemoteTxHook txHook, TxIdGenerator txIdGenerator, DoubleLatch latch )
{
super( lockManager, nodeManager, logging, tx, txHook, txIdGenerator );
this.latch = latch;
}
@Override
public void commitCows()
{
latch.startAndAwaitFinish();
super.commitCows();
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestCacheUpdateDeadlock.java |
778 | public class CollectionTxnAddBackupOperation extends CollectionOperation implements BackupOperation {
private long itemId;
private Data value;
public CollectionTxnAddBackupOperation() {
}
public CollectionTxnAddBackupOperation(String name, long itemId, Data value) {
super(name);
this.itemId = itemId;
this.value = value;
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_TXN_ADD_BACKUP;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
getOrCreateContainer().commitAddBackup(itemId, value);
response = true;
}
@Override
public void afterRun() throws Exception {
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(itemId);
value.writeData(out);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
itemId = in.readLong();
value = new Data();
value.readData(in);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_txn_CollectionTxnAddBackupOperation.java |
3,231 | public class ReplicationPublisher<K, V>
implements ReplicationChannel {
private static final String SERVICE_NAME = ReplicatedMapService.SERVICE_NAME;
private static final String EVENT_TOPIC_NAME = ReplicatedMapService.EVENT_TOPIC_NAME;
private static final String EXECUTOR_NAME = "hz:replicated-map";
private static final int MAX_MESSAGE_CACHE_SIZE = 1000;
private static final int MAX_CLEAR_EXECUTION_RETRY = 5;
private final List<ReplicationMessage> replicationMessageCache = new ArrayList<ReplicationMessage>();
private final Lock replicationMessageCacheLock = new ReentrantLock();
private final Random memberRandomizer = new Random();
private final ScheduledExecutorService executorService;
private final ExecutionService executionService;
private final OperationService operationService;
private final ClusterService clusterService;
private final EventService eventService;
private final NodeEngine nodeEngine;
private final AbstractBaseReplicatedRecordStore<K, V> replicatedRecordStore;
private final InternalReplicatedMapStorage<K, V> storage;
private final ReplicatedMapConfig replicatedMapConfig;
private final LocalReplicatedMapStatsImpl mapStats;
private final Member localMember;
private final String name;
private final boolean allowReplicationHooks;
private volatile PreReplicationHook preReplicationHook;
ReplicationPublisher(AbstractBaseReplicatedRecordStore<K, V> replicatedRecordStore, NodeEngine nodeEngine) {
this.replicatedRecordStore = replicatedRecordStore;
this.nodeEngine = nodeEngine;
this.name = replicatedRecordStore.getName();
this.storage = replicatedRecordStore.storage;
this.mapStats = replicatedRecordStore.mapStats;
this.eventService = nodeEngine.getEventService();
this.localMember = replicatedRecordStore.localMember;
this.clusterService = nodeEngine.getClusterService();
this.executionService = nodeEngine.getExecutionService();
this.operationService = nodeEngine.getOperationService();
this.replicatedMapConfig = replicatedRecordStore.replicatedMapConfig;
this.executorService = getExecutorService(nodeEngine, replicatedMapConfig);
this.allowReplicationHooks = Boolean.parseBoolean(System.getProperty("hazelcast.repmap.hooks.allowed", "false"));
}
@Override
public void replicate(MultiReplicationMessage message) {
distributeReplicationMessage(message, true);
}
@Override
public void replicate(ReplicationMessage message) {
distributeReplicationMessage(message, true);
}
public void setPreReplicationHook(PreReplicationHook preReplicationHook) {
this.preReplicationHook = preReplicationHook;
}
public void publishReplicatedMessage(ReplicationMessage message) {
if (replicatedMapConfig.getReplicationDelayMillis() == 0) {
distributeReplicationMessage(message, false);
} else {
replicationMessageCacheLock.lock();
try {
replicationMessageCache.add(message);
if (replicationMessageCache.size() == 1) {
ReplicationCachedSenderTask task = new ReplicationCachedSenderTask(this);
long replicationDelayMillis = replicatedMapConfig.getReplicationDelayMillis();
executorService.schedule(task, replicationDelayMillis, TimeUnit.MILLISECONDS);
} else {
if (replicationMessageCache.size() > MAX_MESSAGE_CACHE_SIZE) {
processMessageCache();
}
}
} finally {
replicationMessageCacheLock.unlock();
}
}
}
public void queueUpdateMessage(final ReplicationMessage update) {
executorService.execute(new Runnable() {
@Override
public void run() {
processUpdateMessage(update);
}
});
}
public void queueUpdateMessages(final MultiReplicationMessage updates) {
executorService.execute(new Runnable() {
@Override
public void run() {
for (ReplicationMessage update : updates.getReplicationMessages()) {
processUpdateMessage(update);
}
}
});
}
void destroy() {
executorService.shutdownNow();
}
void processMessageCache() {
ReplicationMessage[] replicationMessages = null;
replicationMessageCacheLock.lock();
try {
final int size = replicationMessageCache.size();
if (size > 0) {
replicationMessages = replicationMessageCache.toArray(new ReplicationMessage[size]);
replicationMessageCache.clear();
}
} finally {
replicationMessageCacheLock.unlock();
}
if (replicationMessages != null) {
MultiReplicationMessage message = new MultiReplicationMessage(name, replicationMessages);
distributeReplicationMessage(message, false);
}
}
void distributeReplicationMessage(final Object message, final boolean forceSend) {
final PreReplicationHook preReplicationHook = getPreReplicationHook();
if (forceSend || preReplicationHook == null) {
Collection<EventRegistration> eventRegistrations = eventService.getRegistrations(SERVICE_NAME, EVENT_TOPIC_NAME);
Collection<EventRegistration> registrations = filterEventRegistrations(eventRegistrations);
eventService.publishEvent(ReplicatedMapService.SERVICE_NAME, registrations, message, name.hashCode());
} else {
executionService.execute(EXECUTOR_NAME, new Runnable() {
@Override
public void run() {
if (message instanceof MultiReplicationMessage) {
preReplicationHook.preReplicateMultiMessage((MultiReplicationMessage) message, ReplicationPublisher.this);
} else {
preReplicationHook.preReplicateMessage((ReplicationMessage) message, ReplicationPublisher.this);
}
}
});
}
}
public void queuePreProvision(Address callerAddress, int chunkSize) {
RemoteProvisionTask task = new RemoteProvisionTask(replicatedRecordStore, nodeEngine, callerAddress, chunkSize);
executionService.execute(EXECUTOR_NAME, task);
}
public void retryWithDifferentReplicationNode(Member member) {
List<MemberImpl> members = new ArrayList<MemberImpl>(nodeEngine.getClusterService().getMemberList());
members.remove(member);
// If there are less than two members there is not other possible candidate to replicate from
if (members.size() < 2) {
return;
}
sendPreProvisionRequest(members);
}
public void distributeClear(boolean emptyReplicationQueue) {
executeRemoteClear(emptyReplicationQueue);
}
public void emptyReplicationQueue() {
replicationMessageCacheLock.lock();
try {
replicationMessageCache.clear();
} finally {
replicationMessageCacheLock.unlock();
}
}
void sendPreProvisionRequest(List<MemberImpl> members) {
if (members.size() == 0) {
return;
}
int randomMember = memberRandomizer.nextInt(members.size());
MemberImpl newMember = members.get(randomMember);
ReplicatedMapPostJoinOperation.MemberMapPair[] memberMapPairs = new ReplicatedMapPostJoinOperation.MemberMapPair[1];
memberMapPairs[0] = new ReplicatedMapPostJoinOperation.MemberMapPair(newMember, name);
OperationService operationService = nodeEngine.getOperationService();
int defaultChunkSize = ReplicatedMapPostJoinOperation.DEFAULT_CHUNK_SIZE;
ReplicatedMapPostJoinOperation op = new ReplicatedMapPostJoinOperation(memberMapPairs, defaultChunkSize);
operationService.send(op, newMember.getAddress());
}
private void executeRemoteClear(boolean emptyReplicationQueue) {
List<MemberImpl> failedMembers = new ArrayList<MemberImpl>(clusterService.getMemberList());
for (int i = 0; i < MAX_CLEAR_EXECUTION_RETRY; i++) {
Map<MemberImpl, InternalCompletableFuture> futures = executeClearOnMembers(failedMembers, emptyReplicationQueue);
// Clear to collect new failing members
failedMembers.clear();
for (Map.Entry<MemberImpl, InternalCompletableFuture> future : futures.entrySet()) {
try {
future.getValue().get();
} catch (Exception e) {
nodeEngine.getLogger(ReplicationPublisher.class).finest(e);
failedMembers.add(future.getKey());
}
}
if (failedMembers.size() == 0) {
return;
}
}
// If we get here we does not seem to have finished the operation
throw new CallTimeoutException("ReplicatedMap::clear couldn't be finished, failed nodes: " + failedMembers);
}
private Map executeClearOnMembers(Collection<MemberImpl> members, boolean emptyReplicationQueue) {
Address thisAddress = clusterService.getThisAddress();
Map<MemberImpl, InternalCompletableFuture> futures = new HashMap<MemberImpl, InternalCompletableFuture>(members.size());
for (MemberImpl member : members) {
Address address = member.getAddress();
if (!thisAddress.equals(address)) {
Operation operation = new ReplicatedMapClearOperation(name, emptyReplicationQueue);
InvocationBuilder ib = operationService.createInvocationBuilder(SERVICE_NAME, operation, address);
futures.put(member, ib.invoke());
}
}
return futures;
}
private void processUpdateMessage(ReplicationMessage update) {
if (localMember.equals(update.getOrigin())) {
return;
}
mapStats.incrementReceivedReplicationEvents();
if (update.getKey() instanceof String) {
String key = (String) update.getKey();
if (AbstractReplicatedRecordStore.CLEAR_REPLICATION_MAGIC_KEY.equals(key)) {
storage.clear();
return;
}
}
K marshalledKey = (K) replicatedRecordStore.marshallKey(update.getKey());
synchronized (replicatedRecordStore.getMutex(marshalledKey)) {
final ReplicatedRecord<K, V> localEntry = storage.get(marshalledKey);
if (localEntry == null) {
if (!update.isRemove()) {
V marshalledValue = (V) replicatedRecordStore.marshallValue(update.getValue());
VectorClock vectorClock = update.getVectorClock();
int updateHash = update.getUpdateHash();
long ttlMillis = update.getTtlMillis();
storage.put(marshalledKey,
new ReplicatedRecord<K, V>(marshalledKey, marshalledValue, vectorClock, updateHash, ttlMillis));
if (ttlMillis > 0) {
replicatedRecordStore.scheduleTtlEntry(ttlMillis, marshalledKey, null);
} else {
replicatedRecordStore.cancelTtlEntry(marshalledKey);
}
replicatedRecordStore.fireEntryListenerEvent(update.getKey(), null, update.getValue());
}
} else {
final VectorClock currentVectorClock = localEntry.getVectorClock();
final VectorClock updateVectorClock = update.getVectorClock();
if (VectorClock.happenedBefore(updateVectorClock, currentVectorClock)) {
// ignore the update. This is an old update
return;
} else if (VectorClock.happenedBefore(currentVectorClock, updateVectorClock)) {
// A new update happened
applyTheUpdate(update, localEntry);
} else {
if (localEntry.getLatestUpdateHash() >= update.getUpdateHash()) {
applyTheUpdate(update, localEntry);
} else {
currentVectorClock.applyVector(updateVectorClock);
currentVectorClock.incrementClock(localMember);
Object key = update.getKey();
V value = localEntry.getValue();
long ttlMillis = update.getTtlMillis();
int latestUpdateHash = localEntry.getLatestUpdateHash();
ReplicationMessage message = new ReplicationMessage(name, key, value, currentVectorClock, localMember,
latestUpdateHash, ttlMillis);
distributeReplicationMessage(message, true);
}
}
}
}
}
private void applyTheUpdate(ReplicationMessage<K, V> update, ReplicatedRecord<K, V> localEntry) {
VectorClock localVectorClock = localEntry.getVectorClock();
VectorClock remoteVectorClock = update.getVectorClock();
K marshalledKey = (K) replicatedRecordStore.marshallKey(update.getKey());
V marshalledValue = (V) replicatedRecordStore.marshallValue(update.getValue());
long ttlMillis = update.getTtlMillis();
long oldTtlMillis = localEntry.getTtlMillis();
Object oldValue = localEntry.setValue(marshalledValue, update.getUpdateHash(), ttlMillis);
if (update.isRemove()) {
// Force removal of the underlying stored entry
storage.remove(marshalledKey, localEntry);
}
localVectorClock.applyVector(remoteVectorClock);
if (ttlMillis > 0) {
replicatedRecordStore.scheduleTtlEntry(ttlMillis, marshalledKey, null);
} else {
replicatedRecordStore.cancelTtlEntry(marshalledKey);
}
V unmarshalledOldValue = (V) replicatedRecordStore.unmarshallValue(oldValue);
if (unmarshalledOldValue == null || !unmarshalledOldValue.equals(update.getValue())
|| update.getTtlMillis() != oldTtlMillis) {
replicatedRecordStore.fireEntryListenerEvent(update.getKey(), unmarshalledOldValue, update.getValue());
}
}
private Collection<EventRegistration> filterEventRegistrations(Collection<EventRegistration> eventRegistrations) {
Address address = ((MemberImpl) localMember).getAddress();
List<EventRegistration> registrations = new ArrayList<EventRegistration>(eventRegistrations);
Iterator<EventRegistration> iterator = registrations.iterator();
while (iterator.hasNext()) {
EventRegistration registration = iterator.next();
if (address.equals(registration.getSubscriber())) {
iterator.remove();
}
}
return registrations;
}
private PreReplicationHook getPreReplicationHook() {
if (!allowReplicationHooks) {
return null;
}
return preReplicationHook;
}
private ScheduledExecutorService getExecutorService(NodeEngine nodeEngine, ReplicatedMapConfig replicatedMapConfig) {
ScheduledExecutorService es = replicatedMapConfig.getReplicatorExecutorService();
if (es == null) {
es = nodeEngine.getExecutionService().getDefaultScheduledExecutor();
}
return new WrappedExecutorService(es);
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_replicatedmap_record_ReplicationPublisher.java |
2,890 | return new NGramTokenizer(version, reader, minGram, maxGram) {
@Override
protected boolean isTokenChar(int chr) {
return matcher.isTokenChar(chr);
}
}; | 0true
| src_main_java_org_elasticsearch_index_analysis_NGramTokenizerFactory.java |
807 | execute(request, new ActionListener<MultiPercolateResponse>() {
@Override
public void onResponse(MultiPercolateResponse response) {
try {
channel.sendResponse(response);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [mpercolate] and request [" + request + "]", e1);
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_percolate_TransportMultiPercolateAction.java |
3,142 | engine.recover(new Engine.RecoveryHandler() {
@Override
public void phase1(SnapshotIndexCommit snapshot) throws EngineException {
try {
engine.flush(new Engine.Flush());
assertThat("flush is not allowed in phase 3", false, equalTo(true));
} catch (FlushNotAllowedEngineException e) {
// all is well
}
}
@Override
public void phase2(Translog.Snapshot snapshot) throws EngineException {
MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
try {
engine.flush(new Engine.Flush());
assertThat("flush is not allowed in phase 3", false, equalTo(true));
} catch (FlushNotAllowedEngineException e) {
// all is well
}
}
@Override
public void phase3(Translog.Snapshot snapshot) throws EngineException {
MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0));
try {
// we can do this here since we are on the same thread
engine.flush(new Engine.Flush());
assertThat("flush is not allowed in phase 3", false, equalTo(true));
} catch (FlushNotAllowedEngineException e) {
// all is well
}
}
}); | 0true
| src_test_java_org_elasticsearch_index_engine_internal_InternalEngineTests.java |
6,420 | public class LocalTransportChannel implements TransportChannel {
private final LocalTransport sourceTransport;
// the transport we will *send to*
private final LocalTransport targetTransport;
private final String action;
private final long requestId;
private final Version version;
public LocalTransportChannel(LocalTransport sourceTransport, LocalTransport targetTransport, String action, long requestId, Version version) {
this.sourceTransport = sourceTransport;
this.targetTransport = targetTransport;
this.action = action;
this.requestId = requestId;
this.version = version;
}
@Override
public String action() {
return action;
}
@Override
public void sendResponse(TransportResponse response) throws IOException {
sendResponse(response, TransportResponseOptions.EMPTY);
}
@Override
public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
BytesStreamOutput bStream = new BytesStreamOutput();
StreamOutput stream = new HandlesStreamOutput(bStream);
stream.setVersion(version);
stream.writeLong(requestId);
byte status = 0;
status = TransportStatus.setResponse(status);
stream.writeByte(status); // 0 for request, 1 for response.
response.writeTo(stream);
stream.close();
final byte[] data = bStream.bytes().toBytes();
targetTransport.threadPool().generic().execute(new Runnable() {
@Override
public void run() {
targetTransport.messageReceived(data, action, sourceTransport, version, null);
}
});
}
@Override
public void sendResponse(Throwable error) throws IOException {
BytesStreamOutput stream = new BytesStreamOutput();
try {
writeResponseExceptionHeader(stream);
RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddress(), action, error);
ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(stream);
too.writeObject(tx);
too.close();
} catch (NotSerializableException e) {
stream.reset();
writeResponseExceptionHeader(stream);
RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddress(), action, new NotSerializableTransportException(error));
ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(stream);
too.writeObject(tx);
too.close();
}
final byte[] data = stream.bytes().toBytes();
targetTransport.threadPool().generic().execute(new Runnable() {
@Override
public void run() {
targetTransport.messageReceived(data, action, sourceTransport, version, null);
}
});
}
private void writeResponseExceptionHeader(BytesStreamOutput stream) throws IOException {
stream.writeLong(requestId);
byte status = 0;
status = TransportStatus.setResponse(status);
status = TransportStatus.setError(status);
stream.writeByte(status);
}
} | 1no label
| src_main_java_org_elasticsearch_transport_local_LocalTransportChannel.java |
774 | public class SkuAvailabilityTest extends BaseTest {
protected final Long[] skuIDs = {1L, 2L, 3L, 4L, 5L};
protected final List<Long> skuIdList = Arrays.asList(skuIDs);
@Resource
private AvailabilityService availabilityService;
@Test(groups = { "createSkuAvailability" }, dataProvider = "setupSkuAvailability", dataProviderClass = SkuAvailabilityDataProvider.class)
@Rollback(false)
public void createSkuAvailability(SkuAvailability skuAvailability) {
availabilityService.save(skuAvailability);
}
@Test(dependsOnGroups = { "createSkuAvailability" })
public void readSKUAvailabilityEntries() {
List<SkuAvailability> skuAvailabilityList = availabilityService.lookupSKUAvailability(skuIdList, false);
assert(skuAvailabilityList.size() == 5);
int backorderCount=0;
int availableCount=0;
for (SkuAvailability skuAvailability : skuAvailabilityList) {
if (skuAvailability.getAvailabilityStatus() != null && skuAvailability.getAvailabilityStatus().equals(AvailabilityStatusType.BACKORDERED)) {
backorderCount++;
}
if (skuAvailability.getAvailabilityStatus() != null && skuAvailability.getAvailabilityStatus().equals(AvailabilityStatusType.AVAILABLE)) {
availableCount++;
}
}
assert(backorderCount == 1);
assert(availableCount == 1);
}
@Test(dependsOnGroups = { "createSkuAvailability" })
public void readAvailableSkusForUnknownLocation() {
List<SkuAvailability> skuAvailabilityList = availabilityService.lookupSKUAvailabilityForLocation(skuIdList, 100L, false);
assert(skuAvailabilityList.size() == 0);
}
@Test(dependsOnGroups = { "createSkuAvailability" })
public void readAvailableSkusForLocation() {
List<SkuAvailability> skuAvailabilityList = availabilityService.lookupSKUAvailabilityForLocation(skuIdList, 1L, false);
assert(skuAvailabilityList.size() == 5);
}
@Test(dependsOnGroups = { "createSkuAvailability" })
public void checkAvailableQuantityWithReserveAndQOH() {
SkuAvailability skuAvailability = availabilityService.lookupSKUAvailabilityForLocation(2L, 1L, false);
assert(skuAvailability.getReserveQuantity() == 1 && skuAvailability.getQuantityOnHand() == 5);
assert(skuAvailability.getAvailableQuantity() == 4);
}
@Test(dependsOnGroups = { "createSkuAvailability" })
public void checkAvailableQuantityWithNullReserveQty() {
SkuAvailability skuAvailability = availabilityService.lookupSKUAvailabilityForLocation(5L, 1L, false);
assert(skuAvailability.getReserveQuantity() == null && skuAvailability.getQuantityOnHand() == 5);
assert(skuAvailability.getAvailableQuantity() == 5);
}
@Test(dependsOnGroups = { "createSkuAvailability" })
public void checkAvailableQuantityWithNullQuantityOnHand() {
SkuAvailability skuAvailability = availabilityService.lookupSKUAvailabilityForLocation(1L, 1L, false);
assert(skuAvailability.getReserveQuantity() == 1 && skuAvailability.getQuantityOnHand() == null);
assert(skuAvailability.getAvailableQuantity() == null);
}
} | 0true
| integration_src_test_java_org_broadleafcommerce_core_inventory_service_SkuAvailabilityTest.java |
2,159 | public class TxnSetOperation extends BasePutOperation implements MapTxnOperation {
private long version;
private transient boolean shouldBackup;
private String ownerUuid;
public TxnSetOperation() {
}
public TxnSetOperation(String name, Data dataKey, Data value, long version) {
super(name, dataKey, value);
this.version = version;
}
public TxnSetOperation(String name, Data dataKey, Data value, long version, long ttl) {
super(name, dataKey, value);
this.version = version;
this.ttl = ttl;
}
@Override
public boolean shouldWait() {
return !recordStore.canAcquireLock(dataKey, ownerUuid, getThreadId());
}
@Override
public void run() {
recordStore.unlock(dataKey, ownerUuid, getThreadId());
Record record = recordStore.getRecord(dataKey);
if (record == null || version == record.getVersion()) {
recordStore.set(dataKey, dataValue, ttl);
shouldBackup = true;
}
}
public long getVersion() {
return version;
}
public void setVersion(long version) {
this.version = version;
}
@Override
public void setOwnerUuid(String ownerUuid) {
this.ownerUuid = ownerUuid;
}
@Override
public Object getResponse() {
return Boolean.TRUE;
}
public boolean shouldNotify() {
return true;
}
public Operation getBackupOperation() {
RecordInfo replicationInfo = mapService.createRecordInfo(recordStore.getRecord(dataKey));
return new PutBackupOperation(name, dataKey, dataValue, replicationInfo, true);
}
public void onWaitExpire() {
final ResponseHandler responseHandler = getResponseHandler();
responseHandler.sendResponse(false);
}
@Override
public boolean shouldBackup() {
return shouldBackup;
}
public WaitNotifyKey getNotifiedKey() {
return getWaitKey();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(version);
out.writeUTF(ownerUuid);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
version = in.readLong();
ownerUuid = in.readUTF();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_map_tx_TxnSetOperation.java |
400 | @XmlAccessorType(XmlAccessType.FIELD)
public class Money implements Serializable, Cloneable, Comparable<Money>, Externalizable {
private static final long serialVersionUID = 1L;
@XmlElement
@XmlJavaTypeAdapter(value = BigDecimalRoundingAdapter.class)
private BigDecimal amount;
@XmlElement
@XmlJavaTypeAdapter(CurrencyAdapter.class)
private final Currency currency;
public static final Money ZERO = new Money(BigDecimal.ZERO);
protected static String getCurrencyCode(BroadleafCurrency blCurrency) {
if (blCurrency != null) {
return blCurrency.getCurrencyCode();
} else {
return defaultCurrency().getCurrencyCode();
}
}
public Money(Currency currency) {
this(BankersRounding.zeroAmount(), currency);
}
public Money(BroadleafCurrency blCurrency) {
this(0, Currency.getInstance(getCurrencyCode(blCurrency)));
}
public Money(BigDecimal amount, BroadleafCurrency blCurrency) {
this(amount, Currency.getInstance(getCurrencyCode(blCurrency)));
}
public Money(BigDecimal amount, BroadleafCurrency blCurrency, int scale) {
this(amount, Currency.getInstance(getCurrencyCode(blCurrency)), scale);
}
public Money() {
this(BankersRounding.zeroAmount(), defaultCurrency());
}
public Money(BigDecimal amount) {
this(amount, defaultCurrency());
}
public Money(double amount) {
this(valueOf(amount), defaultCurrency());
}
public Money(int amount) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(defaultCurrency()),
RoundingMode.HALF_EVEN), defaultCurrency());
}
public Money(long amount) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(defaultCurrency()),
RoundingMode.HALF_EVEN), defaultCurrency());
}
public Money(String amount) {
this(valueOf(amount), defaultCurrency());
}
public Money(BigDecimal amount, String currencyCode) {
this(amount, Currency.getInstance(currencyCode));
}
public Money(double amount, Currency currency) {
this(valueOf(amount), currency);
}
public Money(double amount, String currencyCode) {
this(valueOf(amount), Currency.getInstance(currencyCode));
}
public Money(int amount, Currency currency) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN), currency);
}
public Money(int amount, String currencyCode) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(Currency.getInstance(currencyCode)), RoundingMode.HALF_EVEN), Currency.getInstance(currencyCode));
}
public Money(long amount, Currency currency) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN), currency);
}
public Money(long amount, String currencyCode) {
this(BigDecimal.valueOf(amount).setScale(BankersRounding.getScaleForCurrency(Currency.getInstance(currencyCode)), RoundingMode.HALF_EVEN), Currency.getInstance(currencyCode));
}
public Money(String amount, Currency currency) {
this(valueOf(amount), currency);
}
public Money(String amount, String currencyCode) {
this(valueOf(amount), Currency.getInstance(currencyCode));
}
public Money(BigDecimal amount, Currency currency) {
if (currency == null) {
throw new IllegalArgumentException("currency cannot be null");
}
this.currency = currency;
this.amount = BankersRounding.setScale(BankersRounding.getScaleForCurrency(currency), amount);
}
public Money(BigDecimal amount, Currency currency, int scale) {
if (currency == null) {
throw new IllegalArgumentException("currency cannot be null");
}
this.currency = currency;
this.amount = BankersRounding.setScale(amount, scale);
}
public BigDecimal getAmount() {
return amount;
}
public Currency getCurrency() {
return currency;
}
public Money add(Money other) {
if (!other.getCurrency().equals(getCurrency())) {
if (
CurrencyConversionContext.getCurrencyConversionContext() != null &&
CurrencyConversionContext.getCurrencyConversionContext().size() > 0 &&
CurrencyConversionContext.getCurrencyConversionService() != null
) {
other = CurrencyConversionContext.getCurrencyConversionService().convertCurrency(other, getCurrency(), amount.scale());
} else {
throw new UnsupportedOperationException("No currency conversion service is registered, cannot add different currency " +
"types together (" + getCurrency().getCurrencyCode() + " " + other.getCurrency().getCurrencyCode() + ")");
}
}
return new Money(amount.add(other.amount), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money subtract(Money other) {
if (!other.getCurrency().equals(getCurrency())) {
if (
CurrencyConversionContext.getCurrencyConversionContext() != null &&
CurrencyConversionContext.getCurrencyConversionContext().size() > 0 &&
CurrencyConversionContext.getCurrencyConversionService() != null
) {
other = CurrencyConversionContext.getCurrencyConversionService().convertCurrency(other, getCurrency(), amount.scale());
} else {
throw new UnsupportedOperationException("No currency conversion service is registered, cannot subtract different currency " +
"types (" + getCurrency().getCurrencyCode() + ", " + other.getCurrency().getCurrencyCode() + ")");
}
}
return new Money(amount.subtract(other.amount), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money multiply(double amount) {
return multiply(valueOf(amount));
}
public Money multiply(int amount) {
BigDecimal value = BigDecimal.valueOf(amount);
value = value.setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN);
return multiply(value);
}
public Money multiply(BigDecimal multiplier) {
return new Money(amount.multiply(multiplier), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money divide(double amount) {
return this.divide(amount, RoundingMode.HALF_EVEN);
}
public Money divide(double amount, RoundingMode roundingMode) {
return divide(valueOf(amount), roundingMode);
}
public Money divide(int amount) {
return this.divide(amount, RoundingMode.HALF_EVEN);
}
public Money divide(int amount, RoundingMode roundingMode) {
BigDecimal value = BigDecimal.valueOf(amount);
value = value.setScale(BankersRounding.getScaleForCurrency(currency), RoundingMode.HALF_EVEN);
return divide(value, roundingMode);
}
public Money divide(BigDecimal divisor) {
return this.divide(divisor, RoundingMode.HALF_EVEN);
}
public Money divide(BigDecimal divisor, RoundingMode roundingMode) {
return new Money(amount.divide(divisor, amount.scale(), roundingMode), currency, amount.scale() == 0 ? BankersRounding.getScaleForCurrency(currency) : amount.scale());
}
public Money abs() {
return new Money(amount.abs(), currency);
}
public Money min(Money other) {
if (other == null) { return this; }
return lessThan(other) ? this : other;
}
public Money max(Money other) {
if (other == null) { return this; }
return greaterThan(other) ? this : other;
}
public Money negate() {
return new Money(amount.negate(), currency);
}
public boolean isZero() {
return amount.compareTo(BankersRounding.zeroAmount()) == 0;
}
public Money zero() {
return Money.zero(currency);
}
public boolean lessThan(Money other) {
return compareTo(other) < 0;
}
public boolean lessThan(BigDecimal value) {
return amount.compareTo(value) < 0;
}
public boolean lessThanOrEqual(Money other) {
return compareTo(other) <= 0;
}
public boolean lessThanOrEqual(BigDecimal value) {
return amount.compareTo(value) <= 0;
}
public boolean greaterThan(Money other) {
return compareTo(other) > 0;
}
public boolean greaterThan(BigDecimal value) {
return amount.compareTo(value) > 0;
}
public boolean greaterThanOrEqual(Money other) {
return compareTo(other) >= 0;
}
public boolean greaterThanOrEqual(BigDecimal value) {
return amount.compareTo(value) >= 0;
}
@Override
public int compareTo(Money other) {
return amount.compareTo(other.amount);
}
public int compareTo(BigDecimal value) {
return amount.compareTo(value);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Money)) {
return false;
}
Money money = (Money) o;
if (amount != null ? !amount.equals(money.amount) : money.amount != null) {
return false;
}
if (isZero()) {
return true;
}
if (currency != null ? !currency.equals(money.currency) : money.currency != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = amount != null ? amount.hashCode() : 0;
result = 31 * result + (currency != null ? currency.hashCode() : 0);
return result;
}
@Override
public Object clone() {
return new Money(amount, currency);
}
@Override
public String toString() {
return amount.toString();
}
public double doubleValue() {
try {
return amount.doubleValue();
} catch (NumberFormatException e) {
// HotSpot bug in JVM < 1.4.2_06.
if (e.getMessage().equals("For input string: \"0.00null\"")) {
return amount.doubleValue();
} else {
throw e;
}
}
}
public String stringValue() {
return amount.toString() + " " + currency.getCurrencyCode();
}
public static Money zero(String currencyCode) {
return zero(Currency.getInstance(currencyCode));
}
public static Money zero(Currency currency) {
return new Money(BankersRounding.zeroAmount(), currency);
}
public static Money abs(Money money) {
return new Money(money.amount.abs(), money.currency);
}
public static Money min(Money left, Money right) {
return left.min(right);
}
public static Money max(Money left, Money right) {
return left.max(right);
}
public static BigDecimal toAmount(Money money) {
return ((money == null) ? null : money.amount);
}
public static Currency toCurrency(Money money) {
return ((money == null) ? null : money.currency);
}
/**
* Ensures predictable results by converting the double into a string then calling the BigDecimal string constructor.
* @param amount The amount
* @return BigDecimal a big decimal with a predictable value
*/
private static BigDecimal valueOf(double amount) {
return valueOf(String.valueOf(amount));
}
private static BigDecimal valueOf(String amount) {
BigDecimal value = new BigDecimal(amount);
if (value.scale() < 2) {
value = value.setScale(BankersRounding.getScaleForCurrency(defaultCurrency()), RoundingMode.HALF_EVEN);
}
return value;
}
/**
* Attempts to load a default currency by using the default locale. {@link Currency#getInstance(Locale)} uses the country component of the locale to resolve the currency. In some instances, the locale may not have a country component, in which case the default currency can be controlled with a
* system property.
* @return The default currency to use when none is specified
*/
public static Currency defaultCurrency() {
if (
CurrencyConsiderationContext.getCurrencyConsiderationContext() != null &&
CurrencyConsiderationContext.getCurrencyConsiderationContext().size() > 0 &&
CurrencyConsiderationContext.getCurrencyDeterminationService() != null
) {
return Currency.getInstance(CurrencyConsiderationContext.getCurrencyDeterminationService().getCurrencyCode(CurrencyConsiderationContext.getCurrencyConsiderationContext()));
}
// Check the BLC Thread
BroadleafRequestContext brc = BroadleafRequestContext.getBroadleafRequestContext();
if (brc != null && brc.getBroadleafCurrency() != null) {
assert brc.getBroadleafCurrency().getCurrencyCode()!=null;
return Currency.getInstance(brc.getBroadleafCurrency().getCurrencyCode());
}
if (System.getProperty("currency.default") != null) {
return Currency.getInstance(System.getProperty("currency.default"));
}
Locale locale = Locale.getDefault();
if (locale.getCountry() != null && locale.getCountry().length() == 2) {
return Currency.getInstance(locale);
}
return Currency.getInstance("USD");
}
@Override
public void readExternal(ObjectInput in) throws IOException,ClassNotFoundException {
// Read in the server properties from the client representation.
amount = new BigDecimal( in.readFloat());
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
// Write out the client properties from the server representation.
out.writeFloat(amount.floatValue());
// out.writeObject(currency);
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_money_Money.java |
69 | class AssignToIfExistsProposal extends LocalProposal {
protected DocumentChange createChange(IDocument document, Node expanse,
Integer stopIndex) {
DocumentChange change =
new DocumentChange("Assign to If Exists", document);
change.setEdit(new MultiTextEdit());
change.addEdit(new InsertEdit(offset, "if (exists " + initialName + " = "));
String terminal = expanse.getEndToken().getText();
if (!terminal.equals(";")) {
change.addEdit(new InsertEdit(stopIndex+1, ") {}"));
exitPos = stopIndex+9;
}
else {
change.addEdit(new ReplaceEdit(stopIndex, 1, ") {}"));
exitPos = stopIndex+8;
}
return change;
}
public AssignToIfExistsProposal(Tree.CompilationUnit cu,
Node node, int currentOffset) {
super(cu, node, currentOffset);
}
protected void addLinkedPositions(IDocument document, Unit unit)
throws BadLocationException {
// ProposalPosition typePosition =
// new ProposalPosition(document, offset, 5, 1,
// getSupertypeProposals(offset, unit,
// type, true, "value"));
ProposalPosition namePosition =
new ProposalPosition(document, offset+11, initialName.length(), 0,
getNameProposals(offset+11, 0, nameProposals));
// LinkedMode.addLinkedPosition(linkedModeModel, typePosition);
LinkedMode.addLinkedPosition(linkedModeModel, namePosition);
}
@Override
String[] computeNameProposals(Node expression) {
return super.computeNameProposals(expression);
}
@Override
public String getDisplayString() {
return "Assign expression to 'if (exists)' condition";
}
@Override
boolean isEnabled(ProducedType resultType) {
return resultType!=null &&
rootNode.getUnit().isOptionalType(resultType);
}
static void addAssignToIfExistsProposal(Tree.CompilationUnit cu,
Collection<ICompletionProposal> proposals,
Node node, int currentOffset) {
AssignToIfExistsProposal prop =
new AssignToIfExistsProposal(cu, node, currentOffset);
if (prop.isEnabled()) {
proposals.add(prop);
}
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_AssignToIfExistsProposal.java |
1,485 | public static class Builder {
private long version;
private final Map<String, IndexRoutingTable> indicesRouting = newHashMap();
public Builder() {
}
public Builder(RoutingTable routingTable) {
version = routingTable.version;
for (IndexRoutingTable indexRoutingTable : routingTable) {
indicesRouting.put(indexRoutingTable.index(), indexRoutingTable);
}
}
public Builder updateNodes(RoutingNodes routingNodes) {
// this is being called without pre initializing the routing table, so we must copy over the version as well
this.version = routingNodes.routingTable().version();
Map<String, IndexRoutingTable.Builder> indexRoutingTableBuilders = newHashMap();
for (RoutingNode routingNode : routingNodes) {
for (MutableShardRouting shardRoutingEntry : routingNode) {
// every relocating shard has a double entry, ignore the target one.
if (shardRoutingEntry.state() == ShardRoutingState.INITIALIZING && shardRoutingEntry.relocatingNodeId() != null)
continue;
String index = shardRoutingEntry.index();
IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index);
if (indexBuilder == null) {
indexBuilder = new IndexRoutingTable.Builder(index);
indexRoutingTableBuilders.put(index, indexBuilder);
}
IndexShardRoutingTable refData = routingNodes.routingTable().index(shardRoutingEntry.index()).shard(shardRoutingEntry.id());
indexBuilder.addShard(refData, shardRoutingEntry);
}
}
for (MutableShardRouting shardRoutingEntry : Iterables.concat(routingNodes.unassigned(), routingNodes.ignoredUnassigned())) {
String index = shardRoutingEntry.index();
IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index);
if (indexBuilder == null) {
indexBuilder = new IndexRoutingTable.Builder(index);
indexRoutingTableBuilders.put(index, indexBuilder);
}
IndexShardRoutingTable refData = routingNodes.routingTable().index(shardRoutingEntry.index()).shard(shardRoutingEntry.id());
indexBuilder.addShard(refData, shardRoutingEntry);
}
for (ShardId shardId : routingNodes.getShardsToClearPostAllocationFlag()) {
IndexRoutingTable.Builder indexRoutingBuilder = indexRoutingTableBuilders.get(shardId.index().name());
if (indexRoutingBuilder != null) {
indexRoutingBuilder.clearPostAllocationFlag(shardId);
}
}
for (IndexRoutingTable.Builder indexBuilder : indexRoutingTableBuilders.values()) {
add(indexBuilder);
}
return this;
}
public Builder updateNumberOfReplicas(int numberOfReplicas, String... indices) throws IndexMissingException {
if (indices == null || indices.length == 0) {
indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
}
for (String index : indices) {
IndexRoutingTable indexRoutingTable = indicesRouting.get(index);
if (indexRoutingTable == null) {
// ignore index missing failure, its closed...
continue;
}
int currentNumberOfReplicas = indexRoutingTable.shards().get(0).size() - 1; // remove the required primary
IndexRoutingTable.Builder builder = new IndexRoutingTable.Builder(index);
// re-add all the shards
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
builder.addIndexShard(indexShardRoutingTable);
}
if (currentNumberOfReplicas < numberOfReplicas) {
// now, add "empty" ones
for (int i = 0; i < (numberOfReplicas - currentNumberOfReplicas); i++) {
builder.addReplica();
}
} else if (currentNumberOfReplicas > numberOfReplicas) {
int delta = currentNumberOfReplicas - numberOfReplicas;
if (delta <= 0) {
// ignore, can't remove below the current one...
} else {
for (int i = 0; i < delta; i++) {
builder.removeReplica();
}
}
}
indicesRouting.put(index, builder.build());
}
return this;
}
public Builder addAsNew(IndexMetaData indexMetaData) {
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsNew(indexMetaData);
add(indexRoutingBuilder);
}
return this;
}
public Builder addAsRecovery(IndexMetaData indexMetaData) {
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsRecovery(indexMetaData);
add(indexRoutingBuilder);
}
return this;
}
public Builder addAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsRestore(indexMetaData, restoreSource);
add(indexRoutingBuilder);
return this;
}
public Builder addAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
.initializeAsNewRestore(indexMetaData, restoreSource);
add(indexRoutingBuilder);
return this;
}
public Builder add(IndexRoutingTable indexRoutingTable) {
indexRoutingTable.validate();
indicesRouting.put(indexRoutingTable.index(), indexRoutingTable);
return this;
}
public Builder add(IndexRoutingTable.Builder indexRoutingTableBuilder) {
add(indexRoutingTableBuilder.build());
return this;
}
public Builder remove(String index) {
indicesRouting.remove(index);
return this;
}
public Builder version(long version) {
this.version = version;
return this;
}
public RoutingTable build() {
// normalize the versions right before we build it...
for (IndexRoutingTable indexRoutingTable : indicesRouting.values()) {
indicesRouting.put(indexRoutingTable.index(), indexRoutingTable.normalizeVersions());
}
return new RoutingTable(version, indicesRouting);
}
public static RoutingTable readFrom(StreamInput in) throws IOException {
Builder builder = new Builder();
builder.version = in.readLong();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in);
builder.add(index);
}
return builder.build();
}
public static void writeTo(RoutingTable table, StreamOutput out) throws IOException {
out.writeLong(table.version);
out.writeVInt(table.indicesRouting.size());
for (IndexRoutingTable index : table.indicesRouting.values()) {
IndexRoutingTable.Builder.writeTo(index, out);
}
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_RoutingTable.java |
624 | indexEngine.getEntriesMajor(iRangeFrom, isInclusive, null, new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
}); | 1no label
| core_src_main_java_com_orientechnologies_orient_core_index_OIndexOneValue.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.