Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
51 |
public class InstanceId implements Externalizable, Comparable<InstanceId>
{
private int serverId;
public InstanceId()
{}
public InstanceId( int serverId )
{
this.serverId = serverId;
}
@Override
public int compareTo( InstanceId o )
{
return serverId - o.serverId;
}
@Override
public int hashCode()
{
return serverId;
}
@Override
public String toString()
{
return Integer.toString( serverId );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
InstanceId instanceId1 = (InstanceId) o;
if ( serverId != instanceId1.serverId )
{
return false;
}
return true;
}
@Override
public void writeExternal( ObjectOutput out ) throws IOException
{
out.writeInt( serverId );
}
@Override
public void readExternal( ObjectInput in ) throws IOException, ClassNotFoundException
{
serverId = in.readInt();
}
public int toIntegerIndex()
{
return serverId;
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_InstanceId.java
|
795 |
public class ODatabaseFunction implements OSQLFunction {
private final OFunction f;
public ODatabaseFunction(final OFunction f) {
this.f = f;
}
@Override
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iFuncParams, final OCommandContext iContext) {
return f.executeInContext(iContext, iFuncParams);
}
@Override
public boolean aggregateResults() {
return false;
}
@Override
public boolean filterResult() {
return false;
}
@Override
public String getName() {
return f.getName();
}
@Override
public int getMinParams() {
return 0;
}
@Override
public int getMaxParams() {
return f.getParameters().size();
}
@Override
public String getSyntax() {
final StringBuilder buffer = new StringBuilder();
buffer.append(f.getName());
buffer.append('(');
final List<String> params = f.getParameters();
for (int p = 0; p < params.size(); ++p) {
if (p > 0)
buffer.append(',');
buffer.append(params.get(p));
}
buffer.append(')');
return buffer.toString();
}
@Override
public Object getResult() {
return null;
}
@Override
public void setResult(final Object iResult) {
}
@Override
public void config(final Object[] configuredParameters) {
}
@Override
public boolean shouldMergeDistributedResult() {
return false;
}
@Override
public Object mergeDistributedResult(List<Object> resultsToMerge) {
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_metadata_function_ODatabaseFunction.java
|
380 |
public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {
AllocationCommands commands = new AllocationCommands();
boolean dryRun;
public ClusterRerouteRequest() {
}
/**
* Adds allocation commands to be applied to the cluster. Note, can be empty, in which case
* will simply run a simple "reroute".
*/
public ClusterRerouteRequest add(AllocationCommand... commands) {
this.commands.add(commands);
return this;
}
/**
* Sets a dry run flag (defaults to <tt>false</tt>) allowing to run the commands without
* actually applying them to the cluster state, and getting the resulting cluster state back.
*/
public ClusterRerouteRequest dryRun(boolean dryRun) {
this.dryRun = dryRun;
return this;
}
/**
* Returns the current dry run flag which allows to run the commands without actually applying them,
* just to get back the resulting cluster state back.
*/
public boolean dryRun() {
return this.dryRun;
}
/**
* Sets the source for the request.
*/
public ClusterRerouteRequest source(BytesReference source) throws Exception {
XContentParser parser = XContentHelper.createParser(source);
try {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("commands".equals(currentFieldName)) {
this.commands = AllocationCommands.fromXContent(parser);
} else {
throw new ElasticsearchParseException("failed to parse reroute request, got start array with wrong field name [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("dry_run".equals(currentFieldName) || "dryRun".equals(currentFieldName)) {
dryRun = parser.booleanValue();
} else {
throw new ElasticsearchParseException("failed to parse reroute request, got value with wrong field name [" + currentFieldName + "]");
}
}
}
} finally {
parser.close();
}
return this;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
commands = AllocationCommands.readFrom(in);
dryRun = in.readBoolean();
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
AllocationCommands.writeTo(commands, out);
out.writeBoolean(dryRun);
writeTimeout(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_reroute_ClusterRerouteRequest.java
|
270 |
public interface OCommandPredicate {
/**
* Evaluates the predicate.
*
* @param iRecord
* Target record
* @param iCurrentResult TODO
* @param iContext
* Context of execution
* @return The result of predicate
*/
public Object evaluate(final ORecord<?> iRecord, ODocument iCurrentResult, final OCommandContext iContext);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_OCommandPredicate.java
|
3,793 |
public class PercolateStats implements Streamable, ToXContent {
private long percolateCount;
private long percolateTimeInMillis;
private long current;
private long memorySizeInBytes;
private long numQueries;
/**
* Noop constructor for serialazation purposes.
*/
public PercolateStats() {
}
PercolateStats(long percolateCount, long percolateTimeInMillis, long current, long memorySizeInBytes, long numQueries) {
this.percolateCount = percolateCount;
this.percolateTimeInMillis = percolateTimeInMillis;
this.current = current;
this.memorySizeInBytes = memorySizeInBytes;
this.numQueries = numQueries;
}
/**
* @return The number of times the percolate api has been invoked.
*/
public long getCount() {
return percolateCount;
}
/**
* @return The total amount of time spend in the percolate api
*/
public long getTimeInMillis() {
return percolateTimeInMillis;
}
/**
* @return The total amount of time spend in the percolate api
*/
public TimeValue getTime() {
return new TimeValue(getTimeInMillis());
}
/**
* @return The total amount of active percolate api invocations.
*/
public long getCurrent() {
return current;
}
/**
* @return The total number of loaded percolate queries.
*/
public long getNumQueries() {
return numQueries;
}
/**
* @return The total size the loaded queries take in memory.
*/
public long getMemorySizeInBytes() {
return memorySizeInBytes;
}
/**
* @return The total size the loaded queries take in memory.
*/
public ByteSizeValue getMemorySize() {
return new ByteSizeValue(memorySizeInBytes);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.PERCOLATE);
builder.field(Fields.TOTAL, percolateCount);
builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, percolateTimeInMillis);
builder.field(Fields.CURRENT, current);
builder.field(Fields.MEMORY_SIZE_IN_BYTES, memorySizeInBytes);
builder.field(Fields.MEMORY_SIZE, getMemorySize());
builder.field(Fields.QUERIES, getNumQueries());
builder.endObject();
return builder;
}
public void add(PercolateStats percolate) {
if (percolate == null) {
return;
}
percolateCount += percolate.getCount();
percolateTimeInMillis += percolate.getTimeInMillis();
current += percolate.getCurrent();
memorySizeInBytes += percolate.getMemorySizeInBytes();
numQueries += percolate.getNumQueries();
}
static final class Fields {
static final XContentBuilderString PERCOLATE = new XContentBuilderString("percolate");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString TIME = new XContentBuilderString("getTime");
static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis");
static final XContentBuilderString CURRENT = new XContentBuilderString("current");
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
static final XContentBuilderString QUERIES = new XContentBuilderString("queries");
}
public static PercolateStats readPercolateStats(StreamInput in) throws IOException {
PercolateStats stats = new PercolateStats();
stats.readFrom(in);
return stats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
percolateCount = in.readVLong();
percolateTimeInMillis = in.readVLong();
current = in.readVLong();
memorySizeInBytes = in.readVLong();
numQueries = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(percolateCount);
out.writeVLong(percolateTimeInMillis);
out.writeVLong(current);
out.writeVLong(memorySizeInBytes);
out.writeVLong(numQueries);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_percolator_stats_PercolateStats.java
|
16 |
final class DescendingEntrySetView extends EntrySetView {
@Override
public Iterator<Map.Entry<K, V>> iterator() {
return new DescendingSubMapEntryIterator(absHighest(), absLowFence());
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
687 |
@Entity
@Polymorphism(type = PolymorphismType.EXPLICIT)
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CATEGORY_XREF")
@AdminPresentationClass(excludeFromPolymorphism = false)
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
public class CategoryXrefImpl implements CategoryXref {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
/** The category id. */
@EmbeddedId
CategoryXrefPK categoryXrefPK = new CategoryXrefPK();
public CategoryXrefPK getCategoryXrefPK() {
return categoryXrefPK;
}
public void setCategoryXrefPK(final CategoryXrefPK categoryXrefPK) {
this.categoryXrefPK = categoryXrefPK;
}
@Column(name = "DISPLAY_ORDER")
@AdminPresentation(visibility = VisibilityEnum.HIDDEN_ALL)
protected Long displayOrder;
public Long getDisplayOrder() {
return displayOrder;
}
public void setDisplayOrder(final Long displayOrder) {
this.displayOrder = displayOrder;
}
/**
* @return
* @see org.broadleafcommerce.core.catalog.domain.CategoryXrefImpl.CategoryXrefPK#getCategory()
*/
public Category getCategory() {
return categoryXrefPK.getCategory();
}
/**
* @param category
* @see org.broadleafcommerce.core.catalog.domain.CategoryXrefImpl.CategoryXrefPK#setCategory(org.broadleafcommerce.core.catalog.domain.Category)
*/
public void setCategory(Category category) {
categoryXrefPK.setCategory(category);
}
/**
* @return
* @see org.broadleafcommerce.core.catalog.domain.CategoryXrefImpl.CategoryXrefPK#getSubCategory()
*/
public Category getSubCategory() {
return categoryXrefPK.getSubCategory();
}
/**
* @param subCategory
* @see org.broadleafcommerce.core.catalog.domain.CategoryXrefImpl.CategoryXrefPK#setSubCategory(org.broadleafcommerce.core.catalog.domain.Category)
*/
public void setSubCategory(Category subCategory) {
categoryXrefPK.setSubCategory(subCategory);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof CategoryXrefImpl)) return false;
CategoryXrefImpl that = (CategoryXrefImpl) o;
if (categoryXrefPK != null ? !categoryXrefPK.equals(that.categoryXrefPK) : that.categoryXrefPK != null)
return false;
if (displayOrder != null ? !displayOrder.equals(that.displayOrder) : that.displayOrder != null) return false;
return true;
}
@Override
public int hashCode() {
int result = categoryXrefPK != null ? categoryXrefPK.hashCode() : 0;
result = 31 * result + (displayOrder != null ? displayOrder.hashCode() : 0);
return result;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_CategoryXrefImpl.java
|
12 |
static final class AsyncCombine<T,U,V> extends Async {
final T arg1;
final U arg2;
final BiFun<? super T,? super U,? extends V> fn;
final CompletableFuture<V> dst;
AsyncCombine(T arg1, U arg2,
BiFun<? super T,? super U,? extends V> fn,
CompletableFuture<V> dst) {
this.arg1 = arg1; this.arg2 = arg2; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<V> d; V v; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
v = fn.apply(arg1, arg2);
ex = null;
} catch (Throwable rex) {
ex = rex;
v = null;
}
d.internalComplete(v, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
2,368 |
public class TrackableJobFuture<V>
extends AbstractCompletableFuture<V>
implements TrackableJob<V>, JobCompletableFuture<V> {
private final String name;
private final String jobId;
private final JobTracker jobTracker;
private final Collator collator;
private final CountDownLatch latch;
private final MapReduceService mapReduceService;
private volatile boolean cancelled;
public TrackableJobFuture(String name, String jobId, JobTracker jobTracker, NodeEngine nodeEngine, Collator collator) {
super(nodeEngine, nodeEngine.getLogger(TrackableJobFuture.class));
this.name = name;
this.jobId = jobId;
this.jobTracker = jobTracker;
this.collator = collator;
this.latch = new CountDownLatch(1);
this.mapReduceService = ((NodeEngineImpl) nodeEngine).getService(MapReduceService.SERVICE_NAME);
}
@Override
public void setResult(Object result) {
Object finalResult = result;
// If collator is available we need to execute it now
if (collator != null) {
finalResult = collator.collate(((Map) finalResult).entrySet());
}
if (finalResult instanceof Throwable && !(finalResult instanceof CancellationException)) {
finalResult = new ExecutionException((Throwable) finalResult);
}
super.setResult(finalResult);
latch.countDown();
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
Address jobOwner = mapReduceService.getLocalAddress();
if (!mapReduceService.registerJobSupervisorCancellation(name, jobId, jobOwner)) {
return false;
}
JobSupervisor supervisor = mapReduceService.getJobSupervisor(name, jobId);
if (supervisor == null || !supervisor.isOwnerNode()) {
return false;
}
Exception exception = new CancellationException("Operation was cancelled by the user");
cancelled = supervisor.cancelAndNotify(exception);
return cancelled;
}
@Override
public boolean isCancelled() {
return cancelled;
}
@Override
public V get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
ValidationUtil.isNotNull(unit, "unit");
if (!latch.await(timeout, unit) || !isDone()) {
throw new TimeoutException("timeout reached");
}
return getResult();
}
@Override
public JobTracker getJobTracker() {
return jobTracker;
}
@Override
public String getName() {
return name;
}
@Override
public String getJobId() {
return jobId;
}
@Override
public ICompletableFuture<V> getCompletableFuture() {
JobSupervisor supervisor = mapReduceService.getJobSupervisor(name, jobId);
if (supervisor == null || !supervisor.isOwnerNode()) {
return null;
}
return this;
}
@Override
public JobProcessInformation getJobProcessInformation() {
JobSupervisor supervisor = mapReduceService.getJobSupervisor(name, jobId);
if (supervisor == null || !supervisor.isOwnerNode()) {
return null;
}
return new JobProcessInformationAdapter(supervisor.getJobProcessInformation());
}
/**
* This class is just an adapter for retrieving the JobProcess information
* from user codebase to prevent exposing the internal array.
*/
private static final class JobProcessInformationAdapter
implements JobProcessInformation {
private final JobProcessInformation processInformation;
private JobProcessInformationAdapter(JobProcessInformation processInformation) {
this.processInformation = processInformation;
}
@Override
public JobPartitionState[] getPartitionStates() {
JobPartitionState[] partitionStates = processInformation.getPartitionStates();
return Arrays.copyOf(partitionStates, partitionStates.length);
}
@Override
public int getProcessedRecords() {
return processInformation.getProcessedRecords();
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_TrackableJobFuture.java
|
3,327 |
public abstract class FloatArrayAtomicFieldData extends AbstractAtomicNumericFieldData {
public static FloatArrayAtomicFieldData empty(int numDocs) {
return new Empty(numDocs);
}
private final int numDocs;
protected long size = -1;
public FloatArrayAtomicFieldData(int numDocs) {
super(true);
this.numDocs = numDocs;
}
@Override
public void close() {
}
@Override
public int getNumDocs() {
return numDocs;
}
static class Empty extends FloatArrayAtomicFieldData {
Empty(int numDocs) {
super(numDocs);
}
@Override
public LongValues getLongValues() {
return LongValues.EMPTY;
}
@Override
public DoubleValues getDoubleValues() {
return DoubleValues.EMPTY;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public long getNumberUniqueValues() {
return 0;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getMemorySizeInBytes() {
return 0;
}
@Override
public BytesValues getBytesValues(boolean needsHashes) {
return BytesValues.EMPTY;
}
@Override
public ScriptDocValues getScriptValues() {
return ScriptDocValues.EMPTY;
}
}
public static class WithOrdinals extends FloatArrayAtomicFieldData {
private final Ordinals ordinals;
private final BigFloatArrayList values;
public WithOrdinals(BigFloatArrayList values, int numDocs, Ordinals ordinals) {
super(numDocs);
this.values = values;
this.ordinals = ordinals;
}
@Override
public boolean isMultiValued() {
return ordinals.isMultiValued();
}
@Override
public boolean isValuesOrdered() {
return true;
}
@Override
public long getNumberUniqueValues() {
return ordinals.getNumOrds();
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_INT/*size*/ + RamUsageEstimator.NUM_BYTES_INT/*numDocs*/ + values.sizeInBytes() + ordinals.getMemorySizeInBytes();
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values, ordinals.ordinals());
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values, ordinals.ordinals());
}
static class LongValues extends org.elasticsearch.index.fielddata.LongValues.WithOrdinals {
private final BigFloatArrayList values;
LongValues(BigFloatArrayList values, Ordinals.Docs ordinals) {
super(ordinals);
this.values = values;
}
@Override
public long getValueByOrd(long ord) {
assert ord != Ordinals.MISSING_ORDINAL;
return (long) values.get(ord);
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues.WithOrdinals {
private final BigFloatArrayList values;
DoubleValues(BigFloatArrayList values, Ordinals.Docs ordinals) {
super(ordinals);
this.values = values;
}
@Override
public double getValueByOrd(long ord) {
return values.get(ord);
}
}
}
/**
* A single valued case, where not all values are "set", so we have a FixedBitSet that
* indicates which values have an actual value.
*/
public static class SingleFixedSet extends FloatArrayAtomicFieldData {
private final BigFloatArrayList values;
private final FixedBitSet set;
private final long numOrd;
public SingleFixedSet(BigFloatArrayList values, int numDocs, FixedBitSet set, long numOrd) {
super(numDocs);
this.values = values;
this.set = set;
this.numOrd = numOrd;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return numOrd;
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes() + RamUsageEstimator.sizeOf(set.getBits());
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values, set);
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values, set);
}
static class LongValues extends org.elasticsearch.index.fielddata.LongValues {
private final BigFloatArrayList values;
private final FixedBitSet set;
LongValues(BigFloatArrayList values, FixedBitSet set) {
super(false);
this.values = values;
this.set = set;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return set.get(docId) ? 1 : 0;
}
@Override
public long nextValue() {
return (long) values.get(docId);
}
}
static class DoubleValues extends org.elasticsearch.index.fielddata.DoubleValues {
private final BigFloatArrayList values;
private final FixedBitSet set;
DoubleValues(BigFloatArrayList values, FixedBitSet set) {
super(false);
this.values = values;
this.set = set;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return set.get(docId) ? 1 : 0;
}
@Override
public double nextValue() {
return values.get(docId);
}
}
}
/**
* Assumes all the values are "set", and docId is used as the index to the value array.
*/
public static class Single extends FloatArrayAtomicFieldData {
private final BigFloatArrayList values;
private final long numOrd;
/**
* Note, here, we assume that there is no offset by 1 from docId, so position 0
* is the value for docId 0.
*/
public Single(BigFloatArrayList values, int numDocs, long numOrd) {
super(numDocs);
this.values = values;
this.numOrd = numOrd;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public boolean isValuesOrdered() {
return false;
}
@Override
public long getNumberUniqueValues() {
return numOrd;
}
@Override
public long getMemorySizeInBytes() {
if (size == -1) {
size = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + values.sizeInBytes();
}
return size;
}
@Override
public LongValues getLongValues() {
return new LongValues(values);
}
@Override
public DoubleValues getDoubleValues() {
return new DoubleValues(values);
}
static class LongValues extends DenseLongValues {
private final BigFloatArrayList values;
LongValues(BigFloatArrayList values) {
super(false);
this.values = values;
}
@Override
public long nextValue() {
return (long) values.get(docId);
}
}
static class DoubleValues extends DenseDoubleValues {
private final BigFloatArrayList values;
DoubleValues(BigFloatArrayList values) {
super(false);
this.values = values;
}
@Override
public double nextValue() {
return values.get(docId);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_plain_FloatArrayAtomicFieldData.java
|
397 |
context.getExecutionService().execute(new Runnable() {
public void run() {
try {
TreeSet<CacheRecord<K>> records = new TreeSet<CacheRecord<K>>(comparator);
records.addAll(cache.values());
int evictSize = cache.size() * EVICTION_PERCENTAGE / 100;
int i = 0;
for (CacheRecord<K> record : records) {
cache.remove(record.key);
if (++i > evictSize) {
break;
}
}
} finally {
canEvict.set(true);
}
}
});
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_nearcache_ClientNearCache.java
|
617 |
new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return indexEntriesResultListener.addResult(entry);
}
});
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
|
1,378 |
public static class Builder {
private String index;
private State state = State.OPEN;
private long version = 1;
private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
private final ImmutableOpenMap.Builder<String, Custom> customs;
public Builder(String index) {
this.index = index;
this.mappings = ImmutableOpenMap.builder();
this.aliases = ImmutableOpenMap.builder();
this.customs = ImmutableOpenMap.builder();
}
public Builder(IndexMetaData indexMetaData) {
this.index = indexMetaData.index();
this.state = indexMetaData.state;
this.version = indexMetaData.version;
this.settings = indexMetaData.settings();
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
}
public String index() {
return index;
}
public Builder index(String index) {
this.index = index;
return this;
}
public Builder numberOfShards(int numberOfShards) {
settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
return this;
}
public int numberOfShards() {
return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
}
public Builder numberOfReplicas(int numberOfReplicas) {
settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
return this;
}
public int numberOfReplicas() {
return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
}
public Builder settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
public Builder settings(Settings settings) {
this.settings = settings;
return this;
}
public MappingMetaData mapping(String type) {
return mappings.get(type);
}
public Builder removeMapping(String mappingType) {
mappings.remove(mappingType);
return this;
}
public Builder putMapping(String type, String source) throws IOException {
XContentParser parser = XContentFactory.xContent(source).createParser(source);
try {
putMapping(new MappingMetaData(type, parser.mapOrdered()));
} finally {
parser.close();
}
return this;
}
public Builder putMapping(MappingMetaData mappingMd) {
mappings.put(mappingMd.type(), mappingMd);
return this;
}
public Builder state(State state) {
this.state = state;
return this;
}
public Builder putAlias(AliasMetaData aliasMetaData) {
aliases.put(aliasMetaData.alias(), aliasMetaData);
return this;
}
public Builder putAlias(AliasMetaData.Builder aliasMetaData) {
aliases.put(aliasMetaData.alias(), aliasMetaData.build());
return this;
}
public Builder removerAlias(String alias) {
aliases.remove(alias);
return this;
}
public Builder putCustom(String type, Custom customIndexMetaData) {
this.customs.put(type, customIndexMetaData);
return this;
}
public Builder removeCustom(String type) {
this.customs.remove(type);
return this;
}
public Custom getCustom(String type) {
return this.customs.get(type);
}
public long version() {
return this.version;
}
public Builder version(long version) {
this.version = version;
return this;
}
public IndexMetaData build() {
ImmutableOpenMap.Builder<String, AliasMetaData> tmpAliases = aliases;
Settings tmpSettings = settings;
// For backward compatibility
String[] legacyAliases = settings.getAsArray("index.aliases");
if (legacyAliases.length > 0) {
tmpAliases = ImmutableOpenMap.builder();
for (String alias : legacyAliases) {
AliasMetaData aliasMd = AliasMetaData.newAliasMetaDataBuilder(alias).build();
tmpAliases.put(alias, aliasMd);
}
tmpAliases.putAll(aliases);
// Remove index.aliases from settings once they are migrated to the new data structure
tmpSettings = ImmutableSettings.settingsBuilder().put(settings).putArray("index.aliases").build();
}
// update default mapping on the MappingMetaData
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
MappingMetaData defaultMapping = mappings.get(MapperService.DEFAULT_MAPPING);
for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
cursor.value.updateDefaultMapping(defaultMapping);
}
}
return new IndexMetaData(index, version, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build());
}
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("version", indexMetaData.version());
builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
boolean binary = params.paramAsBoolean("binary", false);
builder.startObject("settings");
for (Map.Entry<String, String> entry : indexMetaData.settings().getAsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
builder.endObject();
builder.startArray("mappings");
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
if (binary) {
builder.value(cursor.value.source().compressed());
} else {
byte[] data = cursor.value.source().uncompressed();
XContentParser parser = XContentFactory.xContent(data).createParser(data);
Map<String, Object> mapping = parser.mapOrdered();
parser.close();
builder.map(mapping);
}
}
builder.endArray();
for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.customs()) {
builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
builder.startObject("aliases");
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
}
builder.endObject();
builder.endObject();
}
public static IndexMetaData fromXContent(XContentParser parser) throws IOException {
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
parser.nextToken();
}
Builder builder = new Builder(parser.currentName());
String currentFieldName = null;
XContentParser.Token token = parser.nextToken();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("settings".equals(currentFieldName)) {
builder.settings(ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
} else if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String mappingType = currentFieldName;
Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
builder.putMapping(new MappingMetaData(mappingType, mappingSource));
}
}
} else if ("aliases".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
}
} else {
// check if its a custom index metadata
Custom.Factory<Custom> factory = lookupFactory(currentFieldName);
if (factory == null) {
//TODO warn
parser.skipChildren();
} else {
builder.putCustom(factory.type(), factory.fromXContent(parser));
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
builder.putMapping(new MappingMetaData(new CompressedString(parser.binaryValue())));
} else {
Map<String, Object> mapping = parser.mapOrdered();
if (mapping.size() == 1) {
String mappingType = mapping.keySet().iterator().next();
builder.putMapping(new MappingMetaData(mappingType, mapping));
}
}
}
}
} else if (token.isValue()) {
if ("state".equals(currentFieldName)) {
builder.state(State.fromString(parser.text()));
} else if ("version".equals(currentFieldName)) {
builder.version(parser.longValue());
}
}
}
return builder.build();
}
public static IndexMetaData readFrom(StreamInput in) throws IOException {
Builder builder = new Builder(in.readString());
builder.version(in.readLong());
builder.state(State.fromId(in.readByte()));
builder.settings(readSettingsFromStream(in));
int mappingsSize = in.readVInt();
for (int i = 0; i < mappingsSize; i++) {
MappingMetaData mappingMd = MappingMetaData.readFrom(in);
builder.putMapping(mappingMd);
}
int aliasesSize = in.readVInt();
for (int i = 0; i < aliasesSize; i++) {
AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
builder.putAlias(aliasMd);
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
return builder.build();
}
public static void writeTo(IndexMetaData indexMetaData, StreamOutput out) throws IOException {
out.writeString(indexMetaData.index());
out.writeLong(indexMetaData.version());
out.writeByte(indexMetaData.state().id());
writeSettingsToStream(indexMetaData.settings(), out);
out.writeVInt(indexMetaData.mappings().size());
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
MappingMetaData.writeTo(cursor.value, out);
}
out.writeVInt(indexMetaData.aliases().size());
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
AliasMetaData.Builder.writeTo(cursor.value, out);
}
out.writeVInt(indexMetaData.customs().size());
for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.customs()) {
out.writeString(cursor.key);
lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_metadata_IndexMetaData.java
|
5 |
public class NoOpCommand extends AbstractTextCommand {
final ByteBuffer response;
public NoOpCommand(byte[] response) {
super(TextCommandType.NO_OP);
this.response = ByteBuffer.wrap(response);
}
public boolean readFrom(ByteBuffer cb) {
return true;
}
public boolean writeTo(ByteBuffer bb) {
while (bb.hasRemaining() && response.hasRemaining()) {
bb.put(response.get());
}
return !response.hasRemaining();
}
@Override
public String toString() {
return "NoOpCommand {" + bytesToString(response.array()) + "}";
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_NoOpCommand.java
|
4,197 |
public class BlobStoreIndexShardSnapshot {
/**
* Information about snapshotted file
*/
public static class FileInfo {
private final String name;
private final String physicalName;
private final long length;
private final String checksum;
private final ByteSizeValue partSize;
private final long partBytes;
private final long numberOfParts;
/**
* Constructs a new instance of file info
*
* @param name file name as stored in the blob store
* @param physicalName original file name
* @param length total length of the file
* @param partSize size of the single chunk
* @param checksum checksum for the file
*/
public FileInfo(String name, String physicalName, long length, ByteSizeValue partSize, String checksum) {
this.name = name;
this.physicalName = physicalName;
this.length = length;
this.checksum = checksum;
long partBytes = Long.MAX_VALUE;
if (partSize != null) {
partBytes = partSize.bytes();
}
long totalLength = length;
long numberOfParts = totalLength / partBytes;
if (totalLength % partBytes > 0) {
numberOfParts++;
}
if (numberOfParts == 0) {
numberOfParts++;
}
this.numberOfParts = numberOfParts;
this.partSize = partSize;
this.partBytes = partBytes;
}
/**
* Returns the base file name
*
* @return file name
*/
public String name() {
return name;
}
/**
* Returns part name if file is stored as multiple parts
*
* @param part part number
* @return part name
*/
public String partName(long part) {
if (numberOfParts > 1) {
return name + ".part" + part;
} else {
return name;
}
}
/**
* Returns base file name from part name
*
* @param blobName part name
* @return base file name
*/
public static String canonicalName(String blobName) {
if (blobName.contains(".part")) {
return blobName.substring(0, blobName.indexOf(".part"));
}
return blobName;
}
/**
* Returns original file name
*
* @return original file name
*/
public String physicalName() {
return this.physicalName;
}
/**
* File length
*
* @return file length
*/
public long length() {
return length;
}
/**
* Returns part size
*
* @return part size
*/
public ByteSizeValue partSize() {
return partSize;
}
/**
* Return maximum number of bytes in a part
*
* @return maximum number of bytes in a part
*/
public long partBytes() {
return partBytes;
}
/**
* Returns number of parts
*
* @return number of parts
*/
public long numberOfParts() {
return numberOfParts;
}
/**
* Returns file md5 checksum provided by {@link org.elasticsearch.index.store.Store}
*
* @return file checksum
*/
@Nullable
public String checksum() {
return checksum;
}
/**
* Checks if a file in a store is the same file
*
* @param md file in a store
* @return true if file in a store this this file have the same checksum and length
*/
public boolean isSame(StoreFileMetaData md) {
if (checksum == null || md.checksum() == null) {
return false;
}
return length == md.length() && checksum.equals(md.checksum());
}
static final class Fields {
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString PHYSICAL_NAME = new XContentBuilderString("physical_name");
static final XContentBuilderString LENGTH = new XContentBuilderString("length");
static final XContentBuilderString CHECKSUM = new XContentBuilderString("checksum");
static final XContentBuilderString PART_SIZE = new XContentBuilderString("part_size");
}
/**
* Serializes file info into JSON
*
* @param file file info
* @param builder XContent builder
* @param params parameters
* @throws IOException
*/
public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
builder.field(Fields.NAME, file.name);
builder.field(Fields.PHYSICAL_NAME, file.physicalName);
builder.field(Fields.LENGTH, file.length);
if (file.checksum != null) {
builder.field(Fields.CHECKSUM, file.checksum);
}
if (file.partSize != null) {
builder.field(Fields.PART_SIZE, file.partSize.bytes());
}
builder.endObject();
}
/**
* Parses JSON that represents file info
*
* @param parser parser
* @return file info
* @throws IOException
*/
public static FileInfo fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
String name = null;
String physicalName = null;
long length = -1;
String checksum = null;
ByteSizeValue partSize = null;
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
if ("name".equals(currentFieldName)) {
name = parser.text();
} else if ("physical_name".equals(currentFieldName)) {
physicalName = parser.text();
} else if ("length".equals(currentFieldName)) {
length = parser.longValue();
} else if ("checksum".equals(currentFieldName)) {
checksum = parser.text();
} else if ("part_size".equals(currentFieldName)) {
partSize = new ByteSizeValue(parser.longValue());
} else {
throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "]");
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
}
}
// TODO: Verify???
return new FileInfo(name, physicalName, length, partSize, checksum);
}
}
private final String snapshot;
private final long indexVersion;
private final ImmutableList<FileInfo> indexFiles;
/**
* Constructs new shard snapshot metadata from snapshot metadata
*
* @param snapshot snapshot id
* @param indexVersion index version
* @param indexFiles list of files in the shard
*/
public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List<FileInfo> indexFiles) {
assert snapshot != null;
assert indexVersion >= 0;
this.snapshot = snapshot;
this.indexVersion = indexVersion;
this.indexFiles = ImmutableList.copyOf(indexFiles);
}
/**
* Returns index version
*
* @return index version
*/
public long indexVersion() {
return indexVersion;
}
/**
* Returns snapshot id
*
* @return snapshot id
*/
public String snapshot() {
return snapshot;
}
/**
* Returns list of files in the shard
*
* @return list of files
*/
public ImmutableList<FileInfo> indexFiles() {
return indexFiles;
}
/**
* Serializes shard snapshot metadata info into JSON
*
* @param snapshot shard snapshot metadata
* @param builder XContent builder
* @param params parameters
* @throws IOException
*/
public static void toXContent(BlobStoreIndexShardSnapshot snapshot, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
builder.field("name", snapshot.snapshot);
builder.field("index-version", snapshot.indexVersion);
builder.startArray("files");
for (FileInfo fileInfo : snapshot.indexFiles) {
FileInfo.toXContent(fileInfo, builder, params);
}
builder.endArray();
builder.endObject();
}
/**
* Parses shard snapshot metadata
*
* @param parser parser
* @return shard snapshot metadata
* @throws IOException
*/
public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) throws IOException {
String snapshot = null;
long indexVersion = -1;
List<FileInfo> indexFiles = newArrayList();
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
if ("name".equals(currentFieldName)) {
snapshot = parser.text();
} else if ("index-version".equals(currentFieldName)) {
indexVersion = parser.longValue();
} else {
throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
indexFiles.add(FileInfo.fromXContent(parser));
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
}
}
return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, ImmutableList.<FileInfo>copyOf(indexFiles));
}
/**
* Returns true if this snapshot contains a file with a given original name
*
* @param physicalName original file name
* @return true if the file was found, false otherwise
*/
public boolean containPhysicalIndexFile(String physicalName) {
return findPhysicalIndexFile(physicalName) != null;
}
public FileInfo findPhysicalIndexFile(String physicalName) {
for (FileInfo file : indexFiles) {
if (file.physicalName().equals(physicalName)) {
return file;
}
}
return null;
}
/**
* Returns true if this snapshot contains a file with a given name
*
* @param name file name
* @return true if file was found, false otherwise
*/
public FileInfo findNameFile(String name) {
for (FileInfo file : indexFiles) {
if (file.name().equals(name)) {
return file;
}
}
return null;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_snapshots_blobstore_BlobStoreIndexShardSnapshot.java
|
73 |
public abstract class BaseClientRemoveListenerRequest extends CallableClientRequest {
protected String name;
protected String registrationId;
protected BaseClientRemoveListenerRequest() {
}
protected BaseClientRemoveListenerRequest(String name, String registrationId) {
this.name = name;
this.registrationId = registrationId;
}
public String getRegistrationId() {
return registrationId;
}
public void setRegistrationId(String registrationId) {
this.registrationId = registrationId;
}
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@Override
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
writer.writeUTF("r", registrationId);
}
@Override
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
registrationId = reader.readUTF("r");
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_BaseClientRemoveListenerRequest.java
|
358 |
@SuppressWarnings("unchecked")
public class ODatabaseDocumentTx extends ODatabaseRecordWrapperAbstract<ODatabaseRecordTx> implements ODatabaseDocument {
public ODatabaseDocumentTx(final String iURL) {
super(new ODatabaseRecordTx(iURL, ODocument.RECORD_TYPE));
}
public ODatabaseDocumentTx(final ODatabaseRecordTx iSource) {
super(iSource);
}
private void freezeIndexes(final List<OIndexAbstract<?>> indexesToFreeze, boolean throwException) {
if (indexesToFreeze != null) {
for (OIndexAbstract<?> indexToLock : indexesToFreeze) {
indexToLock.freeze(throwException);
}
}
}
private void flushIndexes(List<OIndexAbstract<?>> indexesToFlush) {
for (OIndexAbstract<?> index : indexesToFlush) {
index.flush();
}
}
private List<OIndexAbstract<?>> prepareIndexesToFreeze(Collection<? extends OIndex<?>> indexes) {
List<OIndexAbstract<?>> indexesToFreeze = null;
if (indexes != null && !indexes.isEmpty()) {
indexesToFreeze = new ArrayList<OIndexAbstract<?>>(indexes.size());
for (OIndex<?> index : indexes) {
indexesToFreeze.add((OIndexAbstract<?>) index.getInternal());
}
Collections.sort(indexesToFreeze, new Comparator<OIndex<?>>() {
public int compare(OIndex<?> o1, OIndex<?> o2) {
return o1.getName().compareTo(o2.getName());
}
});
}
return indexesToFreeze;
}
private void releaseIndexes(Collection<? extends OIndex<?>> indexesToRelease) {
if (indexesToRelease != null) {
Iterator<? extends OIndex<?>> it = indexesToRelease.iterator();
while (it.hasNext()) {
it.next().getInternal().release();
it.remove();
}
}
}
@Override
public void freeze(final boolean throwException) {
if (!(getStorage() instanceof OFreezableStorage)) {
OLogManager.instance().error(this,
"We can not freeze non local storage. " + "If you use remote client please use OServerAdmin instead.");
return;
}
final long startTime = Orient.instance().getProfiler().startChrono();
final Collection<? extends OIndex<?>> indexes = getMetadata().getIndexManager().getIndexes();
final List<OIndexAbstract<?>> indexesToLock = prepareIndexesToFreeze(indexes);
freezeIndexes(indexesToLock, true);
flushIndexes(indexesToLock);
super.freeze(throwException);
Orient.instance().getProfiler()
.stopChrono("db." + getName() + ".freeze", "Time to freeze the database", startTime, "db.*.freeze");
}
@Override
public void freeze() {
if (!(getStorage() instanceof OFreezableStorage)) {
OLogManager.instance().error(this,
"We can not freeze non local storage. " + "If you use remote client please use OServerAdmin instead.");
return;
}
final long startTime = Orient.instance().getProfiler().startChrono();
final Collection<? extends OIndex<?>> indexes = getMetadata().getIndexManager().getIndexes();
final List<OIndexAbstract<?>> indexesToLock = prepareIndexesToFreeze(indexes);
freezeIndexes(indexesToLock, false);
flushIndexes(indexesToLock);
super.freeze();
Orient.instance().getProfiler()
.stopChrono("db." + getName() + ".freeze", "Time to freeze the database", startTime, "db.*.freeze");
}
@Override
public void release() {
if (!(getStorage() instanceof OFreezableStorage)) {
OLogManager.instance().error(this,
"We can not release non local storage. " + "If you use remote client please use OServerAdmin instead.");
return;
}
final long startTime = Orient.instance().getProfiler().startChrono();
super.release();
Collection<? extends OIndex<?>> indexes = getMetadata().getIndexManager().getIndexes();
releaseIndexes(indexes);
Orient.instance().getProfiler()
.stopChrono("db." + getName() + ".release", "Time to release the database", startTime, "db.*.release");
}
/**
* Creates a new ODocument.
*/
@Override
public ODocument newInstance() {
return new ODocument();
}
public ODocument newInstance(final String iClassName) {
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, iClassName);
return new ODocument(iClassName);
}
public ORecordIteratorClass<ODocument> browseClass(final String iClassName) {
return browseClass(iClassName, true);
}
public ORecordIteratorClass<ODocument> browseClass(final String iClassName, final boolean iPolymorphic) {
if (getMetadata().getSchema().getClass(iClassName) == null)
throw new IllegalArgumentException("Class '" + iClassName + "' not found in current database");
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_READ, iClassName);
return new ORecordIteratorClass<ODocument>(this, underlying, iClassName, iPolymorphic, true, false);
}
@Override
public ORecordIteratorCluster<ODocument> browseCluster(final String iClusterName) {
checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, iClusterName);
return new ORecordIteratorCluster<ODocument>(this, underlying, getClusterIdByName(iClusterName), true);
}
@Override
public ORecordIteratorCluster<ODocument> browseCluster(String iClusterName, OClusterPosition startClusterPosition,
OClusterPosition endClusterPosition, boolean loadTombstones) {
checkSecurity(ODatabaseSecurityResources.CLUSTER, ORole.PERMISSION_READ, iClusterName);
return new ORecordIteratorCluster<ODocument>(this, underlying, getClusterIdByName(iClusterName), startClusterPosition,
endClusterPosition, true, loadTombstones);
}
/**
* Saves a document to the database. Behavior depends by the current running transaction if any. If no transaction is running then
* changes apply immediately. If an Optimistic transaction is running then the record will be changed at commit time. The current
* transaction will continue to see the record as modified, while others not. If a Pessimistic transaction is running, then an
* exclusive lock is acquired against the record. Current transaction will continue to see the record as modified, while others
* cannot access to it since it's locked.
* <p/>
* If MVCC is enabled and the version of the document is different by the version stored in the database, then a
* {@link OConcurrentModificationException} exception is thrown.Before to save the document it must be valid following the
* constraints declared in the schema if any (can work also in schema-less mode). To validate the document the
* {@link ODocument#validate()} is called.
*
* @param iRecord
* Record to save.
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
* @throws OConcurrentModificationException
* if the version of the document is different by the version contained in the database.
* @throws OValidationException
* if the document breaks some validation constraints defined in the schema
* @see #setMVCC(boolean), {@link #isMVCC()}
*/
@Override
public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord) {
return (RET) save(iRecord, OPERATION_MODE.SYNCHRONOUS, false, null, null);
}
/**
* Saves a document to the database. Behavior depends by the current running transaction if any. If no transaction is running then
* changes apply immediately. If an Optimistic transaction is running then the record will be changed at commit time. The current
* transaction will continue to see the record as modified, while others not. If a Pessimistic transaction is running, then an
* exclusive lock is acquired against the record. Current transaction will continue to see the record as modified, while others
* cannot access to it since it's locked.
* <p/>
* If MVCC is enabled and the version of the document is different by the version stored in the database, then a
* {@link OConcurrentModificationException} exception is thrown.Before to save the document it must be valid following the
* constraints declared in the schema if any (can work also in schema-less mode). To validate the document the
* {@link ODocument#validate()} is called.
*
*
*
* @param iRecord
* Record to save.
* @param iForceCreate
* Flag that indicates that record should be created. If record with current rid already exists, exception is thrown
* @param iRecordCreatedCallback
* @param iRecordUpdatedCallback
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
* @throws OConcurrentModificationException
* if the version of the document is different by the version contained in the database.
* @throws OValidationException
* if the document breaks some validation constraints defined in the schema
* @see #setMVCC(boolean), {@link #isMVCC()}
*/
@Override
public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord, final OPERATION_MODE iMode,
boolean iForceCreate, final ORecordCallback<? extends Number> iRecordCreatedCallback,
ORecordCallback<ORecordVersion> iRecordUpdatedCallback) {
if (!(iRecord instanceof ODocument))
return (RET) super.save(iRecord, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback);
ODocument doc = (ODocument) iRecord;
doc.validate();
doc.convertAllMultiValuesToTrackedVersions();
try {
if (iForceCreate || doc.getIdentity().isNew()) {
// NEW RECORD
if (doc.getClassName() != null)
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, doc.getClassName());
if (doc.getSchemaClass() != null && doc.getIdentity().getClusterId() < 0) {
// CLASS FOUND: FORCE THE STORING IN THE CLUSTER CONFIGURED
String clusterName = getClusterNameById(doc.getSchemaClass().getDefaultClusterId());
return (RET) super.save(doc, clusterName, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback);
}
} else {
// UPDATE: CHECK ACCESS ON SCHEMA CLASS NAME (IF ANY)
if (doc.getClassName() != null)
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_UPDATE, doc.getClassName());
}
doc = super.save(doc, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback);
} catch (OException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
OLogManager.instance().exception("Error on saving record %s of class '%s'", e, ODatabaseException.class,
iRecord.getIdentity(), (doc.getClassName() != null ? doc.getClassName() : "?"));
}
return (RET) doc;
}
/**
* Saves a document specifying a cluster where to store the record. Behavior depends by the current running transaction if any. If
* no transaction is running then changes apply immediately. If an Optimistic transaction is running then the record will be
* changed at commit time. The current transaction will continue to see the record as modified, while others not. If a Pessimistic
* transaction is running, then an exclusive lock is acquired against the record. Current transaction will continue to see the
* record as modified, while others cannot access to it since it's locked.
* <p/>
* If MVCC is enabled and the version of the document is different by the version stored in the database, then a
* {@link OConcurrentModificationException} exception is thrown. Before to save the document it must be valid following the
* constraints declared in the schema if any (can work also in schema-less mode). To validate the document the
* {@link ODocument#validate()} is called.
*
* @param iRecord
* Record to save
* @param iClusterName
* Cluster name where to save the record
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
* @throws OConcurrentModificationException
* if the version of the document is different by the version contained in the database.
* @throws OValidationException
* if the document breaks some validation constraints defined in the schema
* @see #setMVCC(boolean), {@link #isMVCC()}, ORecordSchemaAware#validate()
*/
@Override
public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord, final String iClusterName) {
return (RET) save(iRecord, iClusterName, OPERATION_MODE.SYNCHRONOUS, false, null, null);
}
/**
* Saves a document specifying a cluster where to store the record. Behavior depends by the current running transaction if any. If
* no transaction is running then changes apply immediately. If an Optimistic transaction is running then the record will be
* changed at commit time. The current transaction will continue to see the record as modified, while others not. If a Pessimistic
* transaction is running, then an exclusive lock is acquired against the record. Current transaction will continue to see the
* record as modified, while others cannot access to it since it's locked.
* <p/>
* If MVCC is enabled and the version of the document is different by the version stored in the database, then a
* {@link OConcurrentModificationException} exception is thrown. Before to save the document it must be valid following the
* constraints declared in the schema if any (can work also in schema-less mode). To validate the document the
* {@link ODocument#validate()} is called.
*
*
* @param iRecord
* Record to save
* @param iClusterName
* Cluster name where to save the record
* @param iMode
* Mode of save: synchronous (default) or asynchronous
* @param iForceCreate
* Flag that indicates that record should be created. If record with current rid already exists, exception is thrown
* @param iRecordCreatedCallback
* @param iRecordUpdatedCallback
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
* @throws OConcurrentModificationException
* if the version of the document is different by the version contained in the database.
* @throws OValidationException
* if the document breaks some validation constraints defined in the schema
* @see #setMVCC(boolean), {@link #isMVCC()}, ORecordSchemaAware#validate()
*/
@Override
public <RET extends ORecordInternal<?>> RET save(final ORecordInternal<?> iRecord, String iClusterName,
final OPERATION_MODE iMode, boolean iForceCreate, final ORecordCallback<? extends Number> iRecordCreatedCallback,
ORecordCallback<ORecordVersion> iRecordUpdatedCallback) {
if (!(iRecord instanceof ODocument))
return (RET) super.save(iRecord, iClusterName, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback);
ODocument doc = (ODocument) iRecord;
if (iForceCreate || !doc.getIdentity().isValid()) {
if (doc.getClassName() != null)
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_CREATE, doc.getClassName());
if (iClusterName == null && doc.getSchemaClass() != null)
// FIND THE RIGHT CLUSTER AS CONFIGURED IN CLASS
iClusterName = getClusterNameById(doc.getSchemaClass().getDefaultClusterId());
int id = getClusterIdByName(iClusterName);
if (id == -1)
throw new IllegalArgumentException("Cluster name " + iClusterName + " is not configured");
final int[] clusterIds;
if (doc.getSchemaClass() != null) {
// CHECK IF THE CLUSTER IS PART OF THE CONFIGURED CLUSTERS
clusterIds = doc.getSchemaClass().getClusterIds();
int i = 0;
for (; i < clusterIds.length; ++i)
if (clusterIds[i] == id)
break;
if (i == clusterIds.length)
throw new IllegalArgumentException("Cluster name " + iClusterName + " is not configured to store the class "
+ doc.getClassName());
} else
clusterIds = new int[] { id };
} else {
// UPDATE: CHECK ACCESS ON SCHEMA CLASS NAME (IF ANY)
if (doc.getClassName() != null)
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_UPDATE, doc.getClassName());
}
doc.validate();
doc.convertAllMultiValuesToTrackedVersions();
doc = super.save(doc, iClusterName, iMode, iForceCreate, iRecordCreatedCallback, iRecordUpdatedCallback);
return (RET) doc;
}
/**
* Deletes a document. Behavior depends by the current running transaction if any. If no transaction is running then the record is
* deleted immediately. If an Optimistic transaction is running then the record will be deleted at commit time. The current
* transaction will continue to see the record as deleted, while others not. If a Pessimistic transaction is running, then an
* exclusive lock is acquired against the record. Current transaction will continue to see the record as deleted, while others
* cannot access to it since it's locked.
* <p/>
* If MVCC is enabled and the version of the document is different by the version stored in the database, then a
* {@link OConcurrentModificationException} exception is thrown.
*
* @param iRecord
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
* @see #setMVCC(boolean), {@link #isMVCC()}
*/
public ODatabaseDocumentTx delete(final ORecordInternal<?> iRecord) {
if (iRecord == null)
throw new ODatabaseException("Cannot delete null document");
// CHECK ACCESS ON SCHEMA CLASS NAME (IF ANY)
if (iRecord instanceof ODocument && ((ODocument) iRecord).getClassName() != null)
checkSecurity(ODatabaseSecurityResources.CLASS, ORole.PERMISSION_DELETE, ((ODocument) iRecord).getClassName());
try {
underlying.delete(iRecord);
} catch (Exception e) {
if (iRecord instanceof ODocument)
OLogManager.instance().exception("Error on deleting record %s of class '%s'", e, ODatabaseException.class,
iRecord.getIdentity(), ((ODocument) iRecord).getClassName());
else
OLogManager.instance().exception("Error on deleting record %s", e, ODatabaseException.class, iRecord.getIdentity());
}
return this;
}
/**
* Returns the number of the records of the class iClassName.
*/
public long countClass(final String iClassName) {
final OClass cls = getMetadata().getSchema().getClass(iClassName);
if (cls == null)
throw new IllegalArgumentException("Class '" + iClassName + "' not found in database");
return cls.count();
}
public ODatabaseComplex<ORecordInternal<?>> commit() {
try {
return underlying.commit();
} finally {
getTransaction().close();
}
}
public ODatabaseComplex<ORecordInternal<?>> rollback() {
try {
return underlying.rollback();
} finally {
getTransaction().close();
}
}
public String getType() {
return TYPE;
}
@Override
public OSBTreeCollectionManager getSbTreeCollectionManager() {
return underlying.getSbTreeCollectionManager();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_db_document_ODatabaseDocumentTx.java
|
164 |
@Repository("blURLHandlerDao")
public class URlHandlerDaoImpl implements URLHandlerDao {
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Override
public URLHandler findURLHandlerByURI(String uri) {
Query query;
query = em.createNamedQuery("BC_READ_OUTGOING_URL");
query.setParameter("incomingURL", uri);
@SuppressWarnings("unchecked")
List<URLHandler> results = query.getResultList();
if (results != null && !results.isEmpty()) {
return results.get(0);
} else {
return null;
}
}
@Override
public List<URLHandler> findAllURLHandlers() {
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<URLHandler> criteria = builder.createQuery(URLHandler.class);
Root<URLHandlerImpl> handler = criteria.from(URLHandlerImpl.class);
criteria.select(handler);
try {
return em.createQuery(criteria).getResultList();
} catch (NoResultException e) {
return new ArrayList<URLHandler>();
}
}
public URLHandler saveURLHandler(URLHandler handler) {
return em.merge(handler);
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_url_dao_URlHandlerDaoImpl.java
|
5,840 |
public class DefaultSearchContext extends SearchContext {
private final long id;
private final ShardSearchRequest request;
private final SearchShardTarget shardTarget;
private SearchType searchType;
private final Engine.Searcher engineSearcher;
private final ScriptService scriptService;
private final CacheRecycler cacheRecycler;
private final PageCacheRecycler pageCacheRecycler;
private final IndexShard indexShard;
private final IndexService indexService;
private final ContextIndexSearcher searcher;
private final DfsSearchResult dfsResult;
private final QuerySearchResult queryResult;
private final FetchSearchResult fetchResult;
// lazy initialized only if needed
private ScanContext scanContext;
private float queryBoost = 1.0f;
// timeout in millis
private long timeoutInMillis = -1;
private List<String> groupStats;
private Scroll scroll;
private boolean explain;
private boolean version = false; // by default, we don't return versions
private List<String> fieldNames;
private FieldDataFieldsContext fieldDataFields;
private ScriptFieldsContext scriptFields;
private PartialFieldsContext partialFields;
private FetchSourceContext fetchSourceContext;
private int from = -1;
private int size = -1;
private Sort sort;
private Float minimumScore;
private boolean trackScores = false; // when sorting, track scores as well...
private ParsedQuery originalQuery;
private Query query;
private ParsedFilter postFilter;
private Filter aliasFilter;
private int[] docIdsToLoad;
private int docsIdsToLoadFrom;
private int docsIdsToLoadSize;
private SearchContextAggregations aggregations;
private SearchContextFacets facets;
private SearchContextHighlight highlight;
private SuggestionSearchContext suggest;
private List<RescoreSearchContext> rescore;
private SearchLookup searchLookup;
private boolean queryRewritten;
private volatile long keepAlive;
private volatile long lastAccessTime;
private List<Releasable> clearables = null;
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard,
ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler) {
this.id = id;
this.request = request;
this.searchType = request.searchType();
this.shardTarget = shardTarget;
this.engineSearcher = engineSearcher;
this.scriptService = scriptService;
this.cacheRecycler = cacheRecycler;
this.pageCacheRecycler = pageCacheRecycler;
this.dfsResult = new DfsSearchResult(id, shardTarget);
this.queryResult = new QuerySearchResult(id, shardTarget);
this.fetchResult = new FetchSearchResult(id, shardTarget);
this.indexShard = indexShard;
this.indexService = indexService;
this.searcher = new ContextIndexSearcher(this, engineSearcher);
// initialize the filtering alias based on the provided filters
aliasFilter = indexService.aliasesService().aliasFilter(request.filteringAliases());
}
@Override
public boolean release() throws ElasticsearchException {
if (scanContext != null) {
scanContext.clear();
}
// clear and scope phase we have
searcher.release();
engineSearcher.release();
return true;
}
public boolean clearAndRelease() {
clearReleasables();
return release();
}
/**
* Should be called before executing the main query and after all other parameters have been set.
*/
public void preProcess() {
if (query() == null) {
parsedQuery(ParsedQuery.parsedMatchAllQuery());
}
if (queryBoost() != 1.0f) {
parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new BoostScoreFunction(queryBoost)), parsedQuery()));
}
Filter searchFilter = searchFilter(types());
if (searchFilter != null) {
if (Queries.isConstantMatchAllQuery(query())) {
Query q = new XConstantScoreQuery(searchFilter);
q.setBoost(query().getBoost());
parsedQuery(new ParsedQuery(q, parsedQuery()));
} else {
parsedQuery(new ParsedQuery(new XFilteredQuery(query(), searchFilter), parsedQuery()));
}
}
}
public Filter searchFilter(String[] types) {
Filter filter = mapperService().searchFilter(types);
if (filter == null) {
return aliasFilter;
} else {
filter = filterCache().cache(filter);
if (aliasFilter != null) {
return new AndFilter(ImmutableList.of(filter, aliasFilter));
}
return filter;
}
}
public long id() {
return this.id;
}
public String source() {
return engineSearcher.source();
}
public ShardSearchRequest request() {
return this.request;
}
public SearchType searchType() {
return this.searchType;
}
public SearchContext searchType(SearchType searchType) {
this.searchType = searchType;
return this;
}
public SearchShardTarget shardTarget() {
return this.shardTarget;
}
public int numberOfShards() {
return request.numberOfShards();
}
public boolean hasTypes() {
return request.types() != null && request.types().length > 0;
}
public String[] types() {
return request.types();
}
public float queryBoost() {
return queryBoost;
}
public SearchContext queryBoost(float queryBoost) {
this.queryBoost = queryBoost;
return this;
}
public long nowInMillis() {
return request.nowInMillis();
}
public Scroll scroll() {
return this.scroll;
}
public SearchContext scroll(Scroll scroll) {
this.scroll = scroll;
return this;
}
@Override
public SearchContextAggregations aggregations() {
return aggregations;
}
@Override
public SearchContext aggregations(SearchContextAggregations aggregations) {
this.aggregations = aggregations;
return this;
}
public SearchContextFacets facets() {
return facets;
}
public SearchContext facets(SearchContextFacets facets) {
this.facets = facets;
return this;
}
public SearchContextHighlight highlight() {
return highlight;
}
public void highlight(SearchContextHighlight highlight) {
this.highlight = highlight;
}
public SuggestionSearchContext suggest() {
return suggest;
}
public void suggest(SuggestionSearchContext suggest) {
this.suggest = suggest;
}
public List<RescoreSearchContext> rescore() {
if (rescore == null) {
return Collections.emptyList();
}
return rescore;
}
public void addRescore(RescoreSearchContext rescore) {
if (this.rescore == null) {
this.rescore = new ArrayList<RescoreSearchContext>();
}
this.rescore.add(rescore);
}
public boolean hasFieldDataFields() {
return fieldDataFields != null;
}
public FieldDataFieldsContext fieldDataFields() {
if (fieldDataFields == null) {
fieldDataFields = new FieldDataFieldsContext();
}
return this.fieldDataFields;
}
public boolean hasScriptFields() {
return scriptFields != null;
}
public ScriptFieldsContext scriptFields() {
if (scriptFields == null) {
scriptFields = new ScriptFieldsContext();
}
return this.scriptFields;
}
public boolean hasPartialFields() {
return partialFields != null;
}
public PartialFieldsContext partialFields() {
if (partialFields == null) {
partialFields = new PartialFieldsContext();
}
return this.partialFields;
}
/**
* A shortcut function to see whether there is a fetchSourceContext and it says the source is requested.
*
* @return
*/
public boolean sourceRequested() {
return fetchSourceContext != null && fetchSourceContext.fetchSource();
}
public boolean hasFetchSourceContext() {
return fetchSourceContext != null;
}
public FetchSourceContext fetchSourceContext() {
return this.fetchSourceContext;
}
public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
this.fetchSourceContext = fetchSourceContext;
return this;
}
public ContextIndexSearcher searcher() {
return this.searcher;
}
public IndexShard indexShard() {
return this.indexShard;
}
public MapperService mapperService() {
return indexService.mapperService();
}
public AnalysisService analysisService() {
return indexService.analysisService();
}
public IndexQueryParserService queryParserService() {
return indexService.queryParserService();
}
public SimilarityService similarityService() {
return indexService.similarityService();
}
public ScriptService scriptService() {
return scriptService;
}
public CacheRecycler cacheRecycler() {
return cacheRecycler;
}
public PageCacheRecycler pageCacheRecycler() {
return pageCacheRecycler;
}
public FilterCache filterCache() {
return indexService.cache().filter();
}
public DocSetCache docSetCache() {
return indexService.cache().docSet();
}
public IndexFieldDataService fieldData() {
return indexService.fieldData();
}
public IdCache idCache() {
return indexService.cache().idCache();
}
public long timeoutInMillis() {
return timeoutInMillis;
}
public void timeoutInMillis(long timeoutInMillis) {
this.timeoutInMillis = timeoutInMillis;
}
public SearchContext minimumScore(float minimumScore) {
this.minimumScore = minimumScore;
return this;
}
public Float minimumScore() {
return this.minimumScore;
}
public SearchContext sort(Sort sort) {
this.sort = sort;
return this;
}
public Sort sort() {
return this.sort;
}
public SearchContext trackScores(boolean trackScores) {
this.trackScores = trackScores;
return this;
}
public boolean trackScores() {
return this.trackScores;
}
public SearchContext parsedPostFilter(ParsedFilter postFilter) {
this.postFilter = postFilter;
return this;
}
public ParsedFilter parsedPostFilter() {
return this.postFilter;
}
public Filter aliasFilter() {
return aliasFilter;
}
public SearchContext parsedQuery(ParsedQuery query) {
queryRewritten = false;
this.originalQuery = query;
this.query = query.query();
return this;
}
public ParsedQuery parsedQuery() {
return this.originalQuery;
}
/**
* The query to execute, might be rewritten.
*/
public Query query() {
return this.query;
}
/**
* Has the query been rewritten already?
*/
public boolean queryRewritten() {
return queryRewritten;
}
/**
* Rewrites the query and updates it. Only happens once.
*/
public SearchContext updateRewriteQuery(Query rewriteQuery) {
query = rewriteQuery;
queryRewritten = true;
return this;
}
public int from() {
return from;
}
public SearchContext from(int from) {
this.from = from;
return this;
}
public int size() {
return size;
}
public SearchContext size(int size) {
this.size = size;
return this;
}
public boolean hasFieldNames() {
return fieldNames != null;
}
public List<String> fieldNames() {
if (fieldNames == null) {
fieldNames = Lists.newArrayList();
}
return fieldNames;
}
public void emptyFieldNames() {
this.fieldNames = ImmutableList.of();
}
public boolean explain() {
return explain;
}
public void explain(boolean explain) {
this.explain = explain;
}
@Nullable
public List<String> groupStats() {
return this.groupStats;
}
public void groupStats(List<String> groupStats) {
this.groupStats = groupStats;
}
public boolean version() {
return version;
}
public void version(boolean version) {
this.version = version;
}
public int[] docIdsToLoad() {
return docIdsToLoad;
}
public int docIdsToLoadFrom() {
return docsIdsToLoadFrom;
}
public int docIdsToLoadSize() {
return docsIdsToLoadSize;
}
public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
this.docIdsToLoad = docIdsToLoad;
this.docsIdsToLoadFrom = docsIdsToLoadFrom;
this.docsIdsToLoadSize = docsIdsToLoadSize;
return this;
}
public void accessed(long accessTime) {
this.lastAccessTime = accessTime;
}
public long lastAccessTime() {
return this.lastAccessTime;
}
public long keepAlive() {
return this.keepAlive;
}
public void keepAlive(long keepAlive) {
this.keepAlive = keepAlive;
}
public SearchLookup lookup() {
// TODO: The types should take into account the parsing context in QueryParserContext...
if (searchLookup == null) {
searchLookup = new SearchLookup(mapperService(), fieldData(), request.types());
}
return searchLookup;
}
public DfsSearchResult dfsResult() {
return dfsResult;
}
public QuerySearchResult queryResult() {
return queryResult;
}
public FetchSearchResult fetchResult() {
return fetchResult;
}
@Override
public void addReleasable(Releasable releasable) {
if (clearables == null) {
clearables = new ArrayList<Releasable>();
}
clearables.add(releasable);
}
@Override
public void clearReleasables() {
if (clearables != null) {
Throwable th = null;
for (Releasable releasable : clearables) {
try {
releasable.release();
} catch (Throwable t) {
if (th == null) {
th = t;
}
}
}
clearables.clear();
if (th != null) {
throw new RuntimeException(th);
}
}
}
public ScanContext scanContext() {
if (scanContext == null) {
scanContext = new ScanContext();
}
return scanContext;
}
public MapperService.SmartNameFieldMappers smartFieldMappers(String name) {
return mapperService().smartName(name, request.types());
}
public FieldMappers smartNameFieldMappers(String name) {
return mapperService().smartNameFieldMappers(name, request.types());
}
public FieldMapper smartNameFieldMapper(String name) {
return mapperService().smartNameFieldMapper(name, request.types());
}
public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
return mapperService().smartNameObjectMapper(name, request.types());
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_internal_DefaultSearchContext.java
|
57 |
public class HighAvailabilityConsoleLogger
implements ClusterMemberListener, ClusterListener, AvailabilityGuard.AvailabilityListener
{
private ConsoleLogger console;
private InstanceId myId;
private URI myUri;
public HighAvailabilityConsoleLogger( ConsoleLogger console, InstanceId myId )
{
this.console = console;
this.myId = myId;
}
// Cluster events
/**
* Logged when the instance itself joins or rejoins a cluster
*
* @param clusterConfiguration
*/
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
myUri = clusterConfiguration.getUriForId( myId );
console.log( String.format( "Instance %s joined the cluster", printId( myId, myUri )) );
}
/**
* Logged when the instance itself leaves the cluster
*/
@Override
public void leftCluster()
{
console.log( String.format( "Instance %s left the cluster", printId( myId, myUri ) ) );
}
/**
* Logged when another instance joins the cluster
*
* @param instanceId
* @param member
*/
@Override
public void joinedCluster( InstanceId instanceId, URI member )
{
console.log( "Instance " + printId(instanceId, member) + " joined the cluster" );
}
/**
* Logged when another instance leaves the cluster
*
* @param instanceId
*/
@Override
public void leftCluster( InstanceId instanceId )
{
console.log( "Instance " + instanceId + " has left the cluster" );
}
/**
* Logged when an instance is elected for a role, such as coordinator of a cluster.
*
* @param role
* @param instanceId
* @param electedMember
*/
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
console.log( "Instance " + printId( instanceId, electedMember ) + "was elected as " + role );
}
/**
* Logged when an instance is demoted from a role.
*
* @param role
* @param instanceId
* @param electedMember
*/
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
console.log( "Instance " + printId( instanceId, electedMember ) + "was demoted as " + role );
}
// HA events
@Override
public void coordinatorIsElected( InstanceId coordinatorId )
{
}
/**
* Logged when a member becomes available as a role, such as MASTER or SLAVE.
*
* @param role
* @param availableId the role connection information for the new role holder
* @param atUri the URI at which the instance is available at
*/
@Override
public void memberIsAvailable( String role, InstanceId availableId, URI atUri )
{
console.log( "Instance " + printId( availableId, atUri ) + "is available as " + role + " at " + atUri.toASCIIString() );
}
/**
* Logged when a member becomes unavailable as a role, such as MASTER or SLAVE.
*
* @param role The role for which the member is unavailable
* @param unavailableId The id of the member which became unavailable for that role
*/
@Override
public void memberIsUnavailable( String role, InstanceId unavailableId )
{
console.log( "Instance " + printId( unavailableId, null ) + "is unavailable as " + role );
}
/**
* Logged when another instance is detected as being failed.
*
* @param instanceId
*/
@Override
public void memberIsFailed( InstanceId instanceId )
{
console.log( "Instance " + printId( instanceId, null ) + "has failed" );
}
/**
* Logged when another instance is detected as being alive again.
*
* @param instanceId
*/
@Override
public void memberIsAlive( InstanceId instanceId )
{
console.log( "Instance " + printId( instanceId, null ) + "is alive" );
}
// InstanceAccessGuard events
/**
* Logged when users are allowed to access the database for transactions.
*/
@Override
public void available()
{
console.log( "Database available for write transactions" );
}
/**
* Logged when users are not allowed to access the database for transactions.
*/
@Override
public void unavailable()
{
console.log( "Write transactions to database disabled" );
}
private String printId( InstanceId id, URI member )
{
String memberName = member == null ? null : parameter( "memberName" ).apply( member );
String memberNameOrId = memberName == null ? id.toString() : memberName;
return memberNameOrId + (id.equals( myId ) ? " (this server) " : " ");
}
}
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighAvailabilityConsoleLogger.java
|
11 |
class BasicCompletionProposal extends CompletionProposal {
static void addImportProposal(int offset, String prefix,
CeylonParseController cpc, List<ICompletionProposal> result,
Declaration dec, Scope scope) {
result.add(new BasicCompletionProposal(offset, prefix,
dec.getName(), escapeName(dec), dec, cpc));
}
static void addDocLinkProposal(int offset, String prefix,
CeylonParseController cpc, List<ICompletionProposal> result,
Declaration dec, Scope scope) {
//for doc links, propose both aliases and unaliased qualified form
//we don't need to do this in code b/c there is no fully-qualified form
String name = dec.getName();
String aliasedName = dec.getName(cpc.getRootNode().getUnit());
if (!name.equals(aliasedName)) {
result.add(new BasicCompletionProposal(offset, prefix,
aliasedName, aliasedName, dec, cpc));
}
result.add(new BasicCompletionProposal(offset, prefix,
name, getTextForDocLink(cpc, dec), dec, cpc));
}
private final CeylonParseController cpc;
private final Declaration declaration;
private BasicCompletionProposal(int offset, String prefix,
String desc, String text, Declaration dec,
CeylonParseController cpc) {
super(offset, prefix, getImageForDeclaration(dec),
desc, text);
this.cpc = cpc;
this.declaration = dec;
}
public String getAdditionalProposalInfo() {
return getDocumentationFor(cpc, declaration);
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_BasicCompletionProposal.java
|
714 |
public class CountResponse extends BroadcastOperationResponse {
private long count;
CountResponse() {
}
CountResponse(long count, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.count = count;
}
/**
* The count of documents matching the query provided.
*/
public long getCount() {
return count;
}
public RestStatus status() {
if (getFailedShards() == 0) {
if (getSuccessfulShards() == 0 && getTotalShards() > 0) {
return RestStatus.SERVICE_UNAVAILABLE;
}
return RestStatus.OK;
}
// if total failure, bubble up the status code to the response level
if (getSuccessfulShards() == 0 && getTotalShards() > 0) {
RestStatus status = RestStatus.OK;
for (ShardOperationFailedException shardFailure : getShardFailures()) {
RestStatus shardStatus = shardFailure.status();
if (shardStatus.getStatus() >= status.getStatus()) {
status = shardStatus;
}
}
return status;
}
return RestStatus.OK;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
count = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVLong(count);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_count_CountResponse.java
|
230 |
@Repository("blSystemPropertiesDao")
public class SystemPropertiesDaoImpl implements SystemPropertiesDao{
@PersistenceContext(unitName="blPU")
protected EntityManager em;
@Resource(name="blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Override
public SystemProperty saveSystemProperty(SystemProperty systemProperty) {
return em.merge(systemProperty);
}
@Override
public void deleteSystemProperty(SystemProperty systemProperty) {
em.remove(systemProperty);
}
@Override
public List<SystemProperty> readAllSystemProperties() {
Query query = em.createNamedQuery("BC_READ_ALL_SYSTEM_PROPERTIES");
query.setHint(QueryHints.HINT_CACHEABLE, true);
return query.getResultList();
}
@Override
public SystemProperty readSystemPropertyByName(String name) {
Query query = em.createNamedQuery("BC_READ_SYSTEM_PROPERTIES_BY_NAME");
query.setParameter("propertyName", name);
query.setHint(QueryHints.HINT_CACHEABLE, true);
List<SystemProperty> props = query.getResultList();
if (props != null && ! props.isEmpty()) {
return props.get(0);
}
return null;
}
@Override
public SystemProperty createNewSystemProperty() {
return (SystemProperty)entityConfiguration.createEntityInstance(SystemProperty.class.getName());
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_dao_SystemPropertiesDaoImpl.java
|
406 |
public class DeleteSnapshotAction extends ClusterAction<DeleteSnapshotRequest, DeleteSnapshotResponse, DeleteSnapshotRequestBuilder> {
public static final DeleteSnapshotAction INSTANCE = new DeleteSnapshotAction();
public static final String NAME = "cluster/snapshot/delete";
private DeleteSnapshotAction() {
super(NAME);
}
@Override
public DeleteSnapshotResponse newResponse() {
return new DeleteSnapshotResponse();
}
@Override
public DeleteSnapshotRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new DeleteSnapshotRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_delete_DeleteSnapshotAction.java
|
1,530 |
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean processVertices;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.processVertices = context.getConfiguration().getBoolean(PROCESS_VERTICES, true);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.processVertices) {
value.clearPaths();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
long edgesProcessed = 0;
for (final Edge edge : value.getEdges(Direction.IN)) {
((StandardFaunusEdge) edge).startPath();
edgesProcessed++;
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_PROCESSED, edgesProcessed);
edgesProcessed = 0;
for (final Edge edge : value.getEdges(Direction.OUT)) {
((StandardFaunusEdge) edge).startPath();
edgesProcessed++;
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
context.write(NullWritable.get(), value);
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_EdgesMap.java
|
659 |
new MetaDataIndexTemplateService.PutListener() {
@Override
public void onResponse(MetaDataIndexTemplateService.PutResponse response) {
listener.onResponse(new PutIndexTemplateResponse(response.acknowledged()));
}
@Override
public void onFailure(Throwable t) {
logger.debug("failed to delete template [{}]", t, request.name());
listener.onFailure(t);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_put_TransportPutIndexTemplateAction.java
|
487 |
class ReSendTask implements Runnable {
public void run() {
try {
sleep();
invocationService.reSend(ClientCallFuture.this);
} catch (Exception e) {
if (handler != null) {
invocationService.registerFailedListener(ClientCallFuture.this);
} else {
setResponse(e);
}
}
}
private void sleep(){
try {
Thread.sleep(250);
} catch (InterruptedException ignored) {
}
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientCallFuture.java
|
332 |
new Thread() {
public void run() {
boolean result = map.tryRemove("key2", 1, TimeUnit.SECONDS);
if (!result) {
latch.countDown();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
821 |
public class MultiSearchRequestBuilder extends ActionRequestBuilder<MultiSearchRequest, MultiSearchResponse, MultiSearchRequestBuilder> {
public MultiSearchRequestBuilder(Client client) {
super((InternalClient) client, new MultiSearchRequest());
}
/**
* Add a search request to execute. Note, the order is important, the search response will be returned in the
* same order as the search requests.
* <p/>
* If ignoreIndices has been set on the search request, then the indicesOptions of the multi search request
* will not be used (if set).
*/
public MultiSearchRequestBuilder add(SearchRequest request) {
if (request.indicesOptions() == IndicesOptions.strict() && request().indicesOptions() != IndicesOptions.strict()) {
request.indicesOptions(request().indicesOptions());
}
super.request.add(request);
return this;
}
/**
* Add a search request to execute. Note, the order is important, the search response will be returned in the
* same order as the search requests.
*/
public MultiSearchRequestBuilder add(SearchRequestBuilder request) {
if (request.request().indicesOptions() == IndicesOptions.strict() && request().indicesOptions() != IndicesOptions.strict()) {
request.request().indicesOptions(request().indicesOptions());
}
super.request.add(request);
return this;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions.
* For example indices that don't exist.
*
* Invoke this method before invoking {@link #add(SearchRequestBuilder)}.
*/
public MultiSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return this;
}
@Override
protected void doExecute(ActionListener<MultiSearchResponse> listener) {
((Client) client).multiSearch(request, listener);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_MultiSearchRequestBuilder.java
|
259 |
@TestMethodProviders({
LuceneJUnit3MethodProvider.class,
JUnit4MethodProvider.class
})
@Listeners({
ReproduceInfoPrinter.class
})
@RunWith(value = com.carrotsearch.randomizedtesting.RandomizedRunner.class)
@SuppressCodecs(value = "Lucene3x")
// NOTE: this class is in o.a.lucene.util since it uses some classes that are related
// to the test framework that didn't make sense to copy but are package private access
public abstract class AbstractRandomizedTest extends RandomizedTest {
/**
* Annotation for integration tests
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@TestGroup(enabled = true, sysProperty = SYSPROP_INTEGRATION)
public @interface IntegrationTests {
}
// --------------------------------------------------------------------
// Test groups, system properties and other annotations modifying tests
// --------------------------------------------------------------------
/**
* @see #ignoreAfterMaxFailures
*/
public static final String SYSPROP_MAXFAILURES = "tests.maxfailures";
/**
* @see #ignoreAfterMaxFailures
*/
public static final String SYSPROP_FAILFAST = "tests.failfast";
public static final String SYSPROP_INTEGRATION = "tests.integration";
// -----------------------------------------------------------------
// Truly immutable fields and constants, initialized once and valid
// for all suites ever since.
// -----------------------------------------------------------------
/**
* Use this constant when creating Analyzers and any other version-dependent stuff.
* <p><b>NOTE:</b> Change this when development starts for new Lucene version:
*/
public static final Version TEST_VERSION_CURRENT = Lucene.VERSION;
/**
* True if and only if tests are run in verbose mode. If this flag is false
* tests are not expected to print any messages.
*/
public static final boolean VERBOSE = systemPropertyAsBoolean("tests.verbose", false);
/**
* A random multiplier which you should use when writing random tests:
* multiply it by the number of iterations to scale your tests (for nightly builds).
*/
public static final int RANDOM_MULTIPLIER = systemPropertyAsInt("tests.multiplier", 1);
/**
* TODO: javadoc?
*/
public static final String DEFAULT_LINE_DOCS_FILE = "europarl.lines.txt.gz";
/**
* the line file used by LineFileDocs
*/
public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", DEFAULT_LINE_DOCS_FILE);
/**
* Create indexes in this directory, optimally use a subdir, named after the test
*/
public static final File TEMP_DIR;
static {
String s = System.getProperty("tempDir", System.getProperty("java.io.tmpdir"));
if (s == null)
throw new RuntimeException("To run tests, you need to define system property 'tempDir' or 'java.io.tmpdir'.");
TEMP_DIR = new File(s);
TEMP_DIR.mkdirs();
}
/**
* These property keys will be ignored in verification of altered properties.
*
* @see SystemPropertiesInvariantRule
* @see #ruleChain
* @see #classRules
*/
private static final String[] IGNORED_INVARIANT_PROPERTIES = {
"user.timezone", "java.rmi.server.randomIDs", "sun.nio.ch.bugLevel"
};
// -----------------------------------------------------------------
// Fields initialized in class or instance rules.
// -----------------------------------------------------------------
// -----------------------------------------------------------------
// Class level (suite) rules.
// -----------------------------------------------------------------
/**
* Stores the currently class under test.
*/
private static final TestRuleStoreClassName classNameRule;
/**
* Class environment setup rule.
*/
static final TestRuleSetupAndRestoreClassEnv classEnvRule;
/**
* Suite failure marker (any error in the test or suite scope).
*/
public final static TestRuleMarkFailure suiteFailureMarker =
new TestRuleMarkFailure();
/**
* Ignore tests after hitting a designated number of initial failures. This
* is truly a "static" global singleton since it needs to span the lifetime of all
* test classes running inside this JVM (it cannot be part of a class rule).
* <p/>
* <p>This poses some problems for the test framework's tests because these sometimes
* trigger intentional failures which add up to the global count. This field contains
* a (possibly) changing reference to {@link TestRuleIgnoreAfterMaxFailures} and we
* dispatch to its current value from the {@link #classRules} chain using {@link TestRuleDelegate}.
*/
private static final AtomicReference<TestRuleIgnoreAfterMaxFailures> ignoreAfterMaxFailuresDelegate;
private static final TestRule ignoreAfterMaxFailures;
static {
int maxFailures = systemPropertyAsInt(SYSPROP_MAXFAILURES, Integer.MAX_VALUE);
boolean failFast = systemPropertyAsBoolean(SYSPROP_FAILFAST, false);
if (failFast) {
if (maxFailures == Integer.MAX_VALUE) {
maxFailures = 1;
} else {
Logger.getLogger(LuceneTestCase.class.getSimpleName()).warning(
"Property '" + SYSPROP_MAXFAILURES + "'=" + maxFailures + ", 'failfast' is" +
" ignored.");
}
}
ignoreAfterMaxFailuresDelegate =
new AtomicReference<TestRuleIgnoreAfterMaxFailures>(
new TestRuleIgnoreAfterMaxFailures(maxFailures));
ignoreAfterMaxFailures = TestRuleDelegate.of(ignoreAfterMaxFailuresDelegate);
}
/**
* Temporarily substitute the global {@link TestRuleIgnoreAfterMaxFailures}. See
* {@link #ignoreAfterMaxFailuresDelegate} for some explanation why this method
* is needed.
*/
public static TestRuleIgnoreAfterMaxFailures replaceMaxFailureRule(TestRuleIgnoreAfterMaxFailures newValue) {
return ignoreAfterMaxFailuresDelegate.getAndSet(newValue);
}
/**
* Max 10mb of static data stored in a test suite class after the suite is complete.
* Prevents static data structures leaking and causing OOMs in subsequent tests.
*/
private final static long STATIC_LEAK_THRESHOLD = 10 * 1024 * 1024;
/**
* By-name list of ignored types like loggers etc.
*/
private final static Set<String> STATIC_LEAK_IGNORED_TYPES =
Collections.unmodifiableSet(new HashSet<String>(Arrays.asList(
EnumSet.class.getName())));
private final static Set<Class<?>> TOP_LEVEL_CLASSES =
Collections.unmodifiableSet(new HashSet<Class<?>>(Arrays.asList(
AbstractRandomizedTest.class, LuceneTestCase.class,
ElasticsearchIntegrationTest.class, ElasticsearchTestCase.class)));
/**
* This controls how suite-level rules are nested. It is important that _all_ rules declared
* in {@link LuceneTestCase} are executed in proper order if they depend on each
* other.
*/
@ClassRule
public static TestRule classRules = RuleChain
.outerRule(new TestRuleIgnoreTestSuites())
.around(ignoreAfterMaxFailures)
.around(suiteFailureMarker)
.around(new TestRuleAssertionsRequired())
.around(new StaticFieldsInvariantRule(STATIC_LEAK_THRESHOLD, true) {
@Override
protected boolean accept(java.lang.reflect.Field field) {
// Don't count known classes that consume memory once.
if (STATIC_LEAK_IGNORED_TYPES.contains(field.getType().getName())) {
return false;
}
// Don't count references from ourselves, we're top-level.
if (TOP_LEVEL_CLASSES.contains(field.getDeclaringClass())) {
return false;
}
return super.accept(field);
}
})
.around(new NoClassHooksShadowingRule())
.around(new NoInstanceHooksOverridesRule() {
@Override
protected boolean verify(Method key) {
String name = key.getName();
return !(name.equals("setUp") || name.equals("tearDown"));
}
})
.around(new SystemPropertiesInvariantRule(IGNORED_INVARIANT_PROPERTIES))
.around(classNameRule = new TestRuleStoreClassName())
.around(classEnvRule = new TestRuleSetupAndRestoreClassEnv());
// -----------------------------------------------------------------
// Test level rules.
// -----------------------------------------------------------------
/**
* Enforces {@link #setUp()} and {@link #tearDown()} calls are chained.
*/
private TestRuleSetupTeardownChained parentChainCallRule = new TestRuleSetupTeardownChained();
/**
* Save test thread and name.
*/
private TestRuleThreadAndTestName threadAndTestNameRule = new TestRuleThreadAndTestName();
/**
* Taint suite result with individual test failures.
*/
private TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker);
/**
* This controls how individual test rules are nested. It is important that
* _all_ rules declared in {@link LuceneTestCase} are executed in proper order
* if they depend on each other.
*/
@Rule
public final TestRule ruleChain = RuleChain
.outerRule(testFailureMarker)
.around(ignoreAfterMaxFailures)
.around(threadAndTestNameRule)
.around(new SystemPropertiesInvariantRule(IGNORED_INVARIANT_PROPERTIES))
.around(new TestRuleSetupAndRestoreInstanceEnv())
.around(new TestRuleFieldCacheSanity())
.around(parentChainCallRule);
// -----------------------------------------------------------------
// Suite and test case setup/ cleanup.
// -----------------------------------------------------------------
/**
* For subclasses to override. Overrides must call {@code super.setUp()}.
*/
@Before
public void setUp() throws Exception {
parentChainCallRule.setupCalled = true;
}
/**
* For subclasses to override. Overrides must call {@code super.tearDown()}.
*/
@After
public void tearDown() throws Exception {
parentChainCallRule.teardownCalled = true;
}
// -----------------------------------------------------------------
// Test facilities and facades for subclasses.
// -----------------------------------------------------------------
/**
* Registers a {@link Closeable} resource that should be closed after the test
* completes.
*
* @return <code>resource</code> (for call chaining).
*/
public <T extends Closeable> T closeAfterTest(T resource) {
return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.TEST);
}
/**
* Registers a {@link Closeable} resource that should be closed after the suite
* completes.
*
* @return <code>resource</code> (for call chaining).
*/
public static <T extends Closeable> T closeAfterSuite(T resource) {
return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.SUITE);
}
/**
* Return the current class being tested.
*/
public static Class<?> getTestClass() {
return classNameRule.getTestClass();
}
/**
* Return the name of the currently executing test case.
*/
public String getTestName() {
return threadAndTestNameRule.testMethodName;
}
}
| 0true
|
src_test_java_org_apache_lucene_util_AbstractRandomizedTest.java
|
57 |
public class AbstractContentService {
private static final Log LOG = LogFactory.getLog(AbstractContentService.class);
public <T, U> List<T> findItems(SandBox sandbox, Criteria c, Class<T> baseClass, Class<U> concreteClass, String originalIdProperty) {
c.add(Restrictions.eq("archivedFlag", false));
if (sandbox == null) {
// Query is hitting the production sandbox for a single site
c.add(Restrictions.isNull("sandbox"));
return (List<T>) c.list();
} if (SandBoxType.PRODUCTION.equals(sandbox.getSandBoxType())) {
// Query is hitting the production sandbox for a multi-site
c.add(Restrictions.eq("sandbox", sandbox));
return (List<T>) c.list();
} else {
addSandboxCriteria(sandbox, c, concreteClass, originalIdProperty);
return (List<T>) c.list();
}
}
public <T> Long countItems(SandBox sandbox, Criteria c, Class<T> concreteClass, String originalIdProperty) {
c.add(Restrictions.eq("archivedFlag", false));
c.setProjection(Projections.rowCount());
if (sandbox == null) {
// Query is hitting the production sandbox for a single site
c.add(Restrictions.isNull("sandbox"));
return (Long) c.uniqueResult();
} if (SandBoxType.PRODUCTION.equals(sandbox.getSandBoxType())) {
// Query is hitting the production sandbox for a multi-site
c.add(Restrictions.eq("sandbox", sandbox));
return (Long) c.uniqueResult();
} else {
addSandboxCriteria(sandbox, c, concreteClass, originalIdProperty);
return (Long) c.uniqueResult();
}
}
private <T> void addSandboxCriteria(SandBox sandbox, Criteria c, Class<T> type, String originalIdProperty) {
Criterion originalSandboxExpression = Restrictions.eq("originalSandBox", sandbox);
Criterion currentSandboxExpression = Restrictions.eq("sandbox", sandbox);
Criterion userSandboxExpression = Restrictions.or(currentSandboxExpression, originalSandboxExpression);
Criterion productionSandboxExpression = null;
if (sandbox.getSite() == null || sandbox.getSite().getProductionSandbox() == null) {
productionSandboxExpression = Restrictions.isNull("sandbox");
} else {
productionSandboxExpression = Restrictions.eq("sandbox", sandbox.getSite().getProductionSandbox());
}
if (productionSandboxExpression != null) {
c.add(Restrictions.or(userSandboxExpression, productionSandboxExpression));
} else {
c.add(userSandboxExpression);
}
// Build a sub-query to exclude items from production that are also in my sandbox.
// (e.g. my sandbox always wins even if the items in my sandbox don't match the
// current criteria.)
//
// This subquery prevents the following:
// 1. Duplicate items (one for sbox, one for prod)
// 2. Filter issues where the production item qualifies for the passed in criteria
// but has been modified so that the item in the sandbox no longer does.
// 3. Inverse of #2.
DetachedCriteria existsInSboxCriteria = DetachedCriteria.forClass(type, "sboxItem");
existsInSboxCriteria.add(userSandboxExpression);
existsInSboxCriteria.add(Restrictions.eq("archivedFlag", false));
String outerAlias = c.getAlias();
existsInSboxCriteria.add(Property.forName(outerAlias + ".id").eqProperty("sboxItem."+originalIdProperty));
existsInSboxCriteria.setProjection(Projections.id());
c.add(Subqueries.notExists(existsInSboxCriteria));
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_common_AbstractContentService.java
|
441 |
public static class JvmStats implements Streamable, ToXContent {
ObjectIntOpenHashMap<JvmVersion> versions;
long threads;
long maxUptime;
long heapUsed;
long heapMax;
JvmStats() {
versions = new ObjectIntOpenHashMap<JvmVersion>();
threads = 0;
maxUptime = 0;
heapMax = 0;
heapUsed = 0;
}
public ObjectIntOpenHashMap<JvmVersion> getVersions() {
return versions;
}
/**
* The total number of threads in the cluster
*/
public long getThreads() {
return threads;
}
/**
* The maximum uptime of a node in the cluster
*/
public TimeValue getMaxUpTime() {
return new TimeValue(maxUptime);
}
/**
* Total heap used in the cluster
*/
public ByteSizeValue getHeapUsed() {
return new ByteSizeValue(heapUsed);
}
/**
* Maximum total heap available to the cluster
*/
public ByteSizeValue getHeapMax() {
return new ByteSizeValue(heapMax);
}
public void addNodeInfoStats(NodeInfo nodeInfo, NodeStats nodeStats) {
versions.addTo(new JvmVersion(nodeInfo.getJvm()), 1);
org.elasticsearch.monitor.jvm.JvmStats js = nodeStats.getJvm();
if (js == null) {
return;
}
if (js.threads() != null) {
threads += js.threads().count();
}
maxUptime = Math.max(maxUptime, js.uptime().millis());
if (js.mem() != null) {
heapUsed += js.mem().getHeapUsed().bytes();
heapMax += js.mem().getHeapMax().bytes();
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
versions = new ObjectIntOpenHashMap<JvmVersion>(size);
for (; size > 0; size--) {
versions.addTo(JvmVersion.readJvmVersion(in), in.readVInt());
}
threads = in.readVLong();
maxUptime = in.readVLong();
heapUsed = in.readVLong();
heapMax = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(versions.size());
for (ObjectIntCursor<JvmVersion> v : versions) {
v.key.writeTo(out);
out.writeVInt(v.value);
}
out.writeVLong(threads);
out.writeVLong(maxUptime);
out.writeVLong(heapUsed);
out.writeVLong(heapMax);
}
public static JvmStats readJvmStats(StreamInput in) throws IOException {
JvmStats jvmStats = new JvmStats();
jvmStats.readFrom(in);
return jvmStats;
}
static final class Fields {
static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString VM_NAME = new XContentBuilderString("vm_name");
static final XContentBuilderString VM_VERSION = new XContentBuilderString("vm_version");
static final XContentBuilderString VM_VENDOR = new XContentBuilderString("vm_vendor");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
static final XContentBuilderString THREADS = new XContentBuilderString("threads");
static final XContentBuilderString MAX_UPTIME = new XContentBuilderString("max_uptime");
static final XContentBuilderString MAX_UPTIME_IN_MILLIS = new XContentBuilderString("max_uptime_in_millis");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString HEAP_USED = new XContentBuilderString("heap_used");
static final XContentBuilderString HEAP_USED_IN_BYTES = new XContentBuilderString("heap_used_in_bytes");
static final XContentBuilderString HEAP_MAX = new XContentBuilderString("heap_max");
static final XContentBuilderString HEAP_MAX_IN_BYTES = new XContentBuilderString("heap_max_in_bytes");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime);
builder.startArray(Fields.VERSIONS);
for (ObjectIntCursor<JvmVersion> v : versions) {
builder.startObject();
builder.field(Fields.VERSION, v.key.version);
builder.field(Fields.VM_NAME, v.key.vmName);
builder.field(Fields.VM_VERSION, v.key.vmVersion);
builder.field(Fields.VM_VENDOR, v.key.vmVendor);
builder.field(Fields.COUNT, v.value);
builder.endObject();
}
builder.endArray();
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, heapUsed);
builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, heapMax);
builder.endObject();
builder.field(Fields.THREADS, threads);
return builder;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java
|
379 |
public class ClusterRerouteAction extends ClusterAction<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction();
public static final String NAME = "cluster/reroute";
private ClusterRerouteAction() {
super(NAME);
}
@Override
public ClusterRerouteResponse newResponse() {
return new ClusterRerouteResponse();
}
@Override
public ClusterRerouteRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new ClusterRerouteRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_reroute_ClusterRerouteAction.java
|
42 |
public class StatsCommand extends AbstractTextCommand {
static final byte[] STAT = stringToBytes("STAT ");
static final byte[] UPTIME = stringToBytes("uptime ");
static final byte[] BYTES = stringToBytes("bytes ");
static final byte[] CMD_SET = stringToBytes("cmd_set ");
static final byte[] CMD_GET = stringToBytes("cmd_get ");
static final byte[] CMD_TOUCH = stringToBytes("cmd_touch ");
static final byte[] THREADS = stringToBytes("threads ");
static final byte[] WAITING_REQUESTS = stringToBytes("waiting_requests ");
static final byte[] GET_HITS = stringToBytes("get_hits ");
static final byte[] GET_MISSES = stringToBytes("get_misses ");
static final byte[] DELETE_HITS = stringToBytes("delete_hits ");
static final byte[] DELETE_MISSES = stringToBytes("delete_misses ");
static final byte[] INCR_HITS = stringToBytes("incr_hits ");
static final byte[] INCR_MISSES = stringToBytes("incr_misses ");
static final byte[] DECR_HITS = stringToBytes("decr_hits ");
static final byte[] DECR_MISSES = stringToBytes("decr_misses ");
static final byte[] CURR_CONNECTIONS = stringToBytes("curr_connections ");
static final byte[] TOTAL_CONNECTIONS = stringToBytes("total_connections ");
ByteBuffer response;
public StatsCommand() {
super(TextCommandType.STATS);
}
public boolean readFrom(ByteBuffer cb) {
return true;
}
public void setResponse(Stats stats) {
response = ByteBuffer.allocate(1000);
putInt(UPTIME, stats.uptime);
putInt(THREADS, stats.threads);
putInt(WAITING_REQUESTS, stats.waiting_requests);
putInt(CURR_CONNECTIONS, stats.curr_connections);
putInt(TOTAL_CONNECTIONS, stats.total_connections);
putLong(BYTES, stats.bytes);
putLong(CMD_GET, stats.cmd_get);
putLong(CMD_SET, stats.cmd_set);
putLong(CMD_TOUCH, stats.cmd_touch);
putLong(GET_HITS, stats.get_hits);
putLong(GET_MISSES, stats.get_misses);
putLong(DELETE_HITS, stats.delete_hits);
putLong(DELETE_MISSES, stats.delete_misses);
putLong(INCR_HITS, stats.incr_hits);
putLong(INCR_MISSES, stats.incr_misses);
putLong(DECR_HITS, stats.decr_hits);
putLong(DECR_MISSES, stats.decr_misses);
response.put(END);
response.flip();
}
private void putInt(byte[] name, int value) {
response.put(STAT);
response.put(name);
response.put(stringToBytes(String.valueOf(value)));
response.put(RETURN);
}
private void putLong(byte[] name, long value) {
response.put(STAT);
response.put(name);
response.put(stringToBytes(String.valueOf(value)));
response.put(RETURN);
}
public boolean writeTo(ByteBuffer bb) {
if (response == null) {
response = ByteBuffer.allocate(0);
}
IOUtil.copyToHeapBuffer(response, bb);
return !response.hasRemaining();
}
@Override
public String toString() {
return "StatsCommand{"
+ '}' + super.toString();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_StatsCommand.java
|
479 |
public class TransportGetAliasesAction extends TransportMasterNodeReadOperationAction<GetAliasesRequest, GetAliasesResponse> {
@Inject
public TransportGetAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
}
@Override
protected String transportAction() {
return GetAliasesAction.NAME;
}
@Override
protected String executor() {
// very lightweight operation all in memory no need to fork to a thread pool
return ThreadPool.Names.SAME;
}
@Override
protected GetAliasesRequest newRequest() {
return new GetAliasesRequest();
}
@Override
protected GetAliasesResponse newResponse() {
return new GetAliasesResponse();
}
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) throws ElasticsearchException {
String[] concreteIndices = state.metaData().concreteIndices(request.indices(), request.indicesOptions());
request.indices(concreteIndices);
@SuppressWarnings("unchecked") // ImmutableList to List results incompatible type
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), request.indices());
listener.onResponse(new GetAliasesResponse(result));
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_alias_get_TransportGetAliasesAction.java
|
88 |
private enum State {
S, A, B, C, D, E, F
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_ODFACommandStream.java
|
613 |
public class CommonStatsFlags implements Streamable, Cloneable {
public final static CommonStatsFlags ALL = new CommonStatsFlags().all();
public final static CommonStatsFlags NONE = new CommonStatsFlags().clear();
private EnumSet<Flag> flags = EnumSet.allOf(Flag.class);
private String[] types = null;
private String[] groups = null;
private String[] fieldDataFields = null;
private String[] completionDataFields = null;
/**
* @param flags flags to set. If no flags are supplied, default flags will be set.
*/
public CommonStatsFlags(Flag... flags) {
if (flags.length > 0) {
clear();
for (Flag f : flags) {
this.flags.add(f);
}
}
}
/**
* Sets all flags to return all stats.
*/
public CommonStatsFlags all() {
flags = EnumSet.allOf(Flag.class);
types = null;
groups = null;
fieldDataFields = null;
completionDataFields = null;
return this;
}
/**
* Clears all stats.
*/
public CommonStatsFlags clear() {
flags = EnumSet.noneOf(Flag.class);
types = null;
groups = null;
fieldDataFields = null;
completionDataFields = null;
return this;
}
public boolean anySet() {
return !flags.isEmpty();
}
public Flag[] getFlags() {
return flags.toArray(new Flag[flags.size()]);
}
/**
* Document types to return stats for. Mainly affects {@link Flag#Indexing} when
* enabled, returning specific indexing stats for those types.
*/
public CommonStatsFlags types(String... types) {
this.types = types;
return this;
}
/**
* Document types to return stats for. Mainly affects {@link Flag#Indexing} when
* enabled, returning specific indexing stats for those types.
*/
public String[] types() {
return this.types;
}
/**
* Sets specific search group stats to retrieve the stats for. Mainly affects search
* when enabled.
*/
public CommonStatsFlags groups(String... groups) {
this.groups = groups;
return this;
}
public String[] groups() {
return this.groups;
}
/**
* Sets specific search group stats to retrieve the stats for. Mainly affects search
* when enabled.
*/
public CommonStatsFlags fieldDataFields(String... fieldDataFields) {
this.fieldDataFields = fieldDataFields;
return this;
}
public String[] fieldDataFields() {
return this.fieldDataFields;
}
public CommonStatsFlags completionDataFields(String... completionDataFields) {
this.completionDataFields = completionDataFields;
return this;
}
public String[] completionDataFields() {
return this.completionDataFields;
}
public boolean isSet(Flag flag) {
return flags.contains(flag);
}
boolean unSet(Flag flag) {
return flags.remove(flag);
}
void set(Flag flag) {
flags.add(flag);
}
public CommonStatsFlags set(Flag flag, boolean add) {
if (add) {
set(flag);
} else {
unSet(flag);
}
return this;
}
public static CommonStatsFlags readCommonStatsFlags(StreamInput in) throws IOException {
CommonStatsFlags flags = new CommonStatsFlags();
flags.readFrom(in);
return flags;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
long longFlags = 0;
for (Flag flag : flags) {
longFlags |= (1 << flag.ordinal());
}
out.writeLong(longFlags);
out.writeStringArrayNullable(types);
out.writeStringArrayNullable(groups);
out.writeStringArrayNullable(fieldDataFields);
out.writeStringArrayNullable(completionDataFields);
}
@Override
public void readFrom(StreamInput in) throws IOException {
final long longFlags = in.readLong();
flags.clear();
for (Flag flag : Flag.values()) {
if ((longFlags & (1 << flag.ordinal())) != 0) {
flags.add(flag);
}
}
types = in.readStringArray();
groups = in.readStringArray();
fieldDataFields = in.readStringArray();
completionDataFields = in.readStringArray();
}
@Override
public CommonStatsFlags clone() {
try {
CommonStatsFlags cloned = (CommonStatsFlags) super.clone();
cloned.flags = flags.clone();
return cloned;
} catch (CloneNotSupportedException e) {
throw new AssertionError(e);
}
}
public static enum Flag {
// Do not change the order of these flags we use
// the ordinal for encoding! Only append to the end!
Store("store"),
Indexing("indexing"),
Get("get"),
Search("search"),
Merge("merge"),
Flush("flush"),
Refresh("refresh"),
FilterCache("filter_cache"),
IdCache("id_cache"),
FieldData("fielddata"),
Docs("docs"),
Warmer("warmer"),
Percolate("percolate"),
Completion("completion"),
Segments("segments"),
Translog("translog");
private final String restName;
Flag(String restName) {
this.restName = restName;
}
public String getRestName() {
return restName;
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_stats_CommonStatsFlags.java
|
841 |
searchServiceTransportAction.sendClearAllScrollContexts(node, request, new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean success) {
onFreedContext();
}
@Override
public void onFailure(Throwable e) {
onFailedFreedContext(e, node);
}
});
| 1no label
|
src_main_java_org_elasticsearch_action_search_TransportClearScrollAction.java
|
1,376 |
public class TitanCassandraHadoopGraph extends TitanHadoopGraph {
public TitanCassandraHadoopGraph(TitanHadoopSetup setup) {
super(setup);
}
public FaunusVertex readHadoopVertex(final Configuration configuration, final ByteBuffer key, final SortedMap<ByteBuffer, Column> value) {
return super.readHadoopVertex(configuration, StaticArrayBuffer.of(key), new CassandraMapIterable(value));
}
private static class CassandraMapIterable implements Iterable<Entry> {
private final SortedMap<ByteBuffer, Column> columnValues;
public CassandraMapIterable(final SortedMap<ByteBuffer, Column> columnValues) {
Preconditions.checkNotNull(columnValues);
this.columnValues = columnValues;
}
@Override
public Iterator<Entry> iterator() {
return new CassandraMapIterator(columnValues.entrySet().iterator());
}
}
private static class CassandraMapIterator implements Iterator<Entry> {
private final Iterator<Map.Entry<ByteBuffer, Column>> iterator;
public CassandraMapIterator(final Iterator<Map.Entry<ByteBuffer, Column>> iterator) {
this.iterator = iterator;
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Entry next() {
final Map.Entry<ByteBuffer, Column> entry = iterator.next();
ByteBuffer col = entry.getKey();
ByteBuffer val = entry.getValue().value();
return StaticArrayEntry.of(StaticArrayBuffer.of(col), StaticArrayBuffer.of(val));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_cassandra_TitanCassandraHadoopGraph.java
|
1,057 |
public class TaxType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, TaxType> TYPES = new LinkedHashMap<String, TaxType>();
public static final TaxType CITY = new TaxType("CITY", "City");
public static final TaxType STATE = new TaxType("STATE", "State");
public static final TaxType DISTRICT = new TaxType("DISTRICT", "District");
public static final TaxType COUNTY = new TaxType("COUNTY", "County");
public static final TaxType COUNTRY = new TaxType("COUNTRY", "Country");
public static final TaxType SHIPPING = new TaxType("SHIPPING", "Shipping");
// Used by SimpleTaxModule to represent total taxes owed.
public static final TaxType COMBINED = new TaxType("COMBINED", "Combined");
public static TaxType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public TaxType() {
//do nothing
}
public TaxType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TaxType other = (TaxType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_TaxType.java
|
340 |
private static class IncrementorEntryProcessor extends AbstractEntryProcessor implements DataSerializable {
IncrementorEntryProcessor() {
super(true);
}
public Object process(Map.Entry entry) {
Integer value = (Integer) entry.getValue();
if (value == null) {
value = 0;
}
if (value == -1) {
entry.setValue(null);
return null;
}
value++;
entry.setValue(value);
return value;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
}
@Override
public void readData(ObjectDataInput in) throws IOException {
}
public void processBackup(Map.Entry entry) {
entry.setValue((Integer) entry.getValue() + 1);
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
172 |
public abstract class OSoftThread extends Thread implements OService {
private volatile boolean shutdownFlag;
public OSoftThread() {
}
public OSoftThread(final ThreadGroup iThreadGroup) {
super(iThreadGroup, OSoftThread.class.getSimpleName());
setDaemon(true);
}
public OSoftThread(final String name) {
super(name);
setDaemon(true);
}
public OSoftThread(final ThreadGroup group, final String name) {
super(group, name);
setDaemon(true);
}
protected abstract void execute() throws Exception;
public void startup() {
}
public void shutdown() {
}
public void sendShutdown() {
shutdownFlag = true;
}
@Override
public void run() {
startup();
while (!shutdownFlag && !isInterrupted()) {
try {
beforeExecution();
execute();
afterExecution();
} catch (Throwable t) {
t.printStackTrace();
}
}
shutdown();
}
/**
* Pauses current thread until iTime timeout or a wake up by another thread.
*
* @param iTime
* @return true if timeout has reached, otherwise false. False is the case of wake-up by another thread.
*/
public static boolean pauseCurrentThread(long iTime) {
try {
if (iTime <= 0)
iTime = Long.MAX_VALUE;
Thread.sleep(iTime);
return true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
}
}
protected void beforeExecution() throws InterruptedException {
return;
}
protected void afterExecution() throws InterruptedException {
return;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_thread_OSoftThread.java
|
201 |
public class AuditableListener {
public static final String customerRequestAttributeName = "customer";
@PrePersist
public void setAuditCreatedBy(Object entity) throws Exception {
if (entity.getClass().isAnnotationPresent(Entity.class)) {
Field field = getSingleField(entity.getClass(), "auditable");
field.setAccessible(true);
if (field.isAnnotationPresent(Embedded.class)) {
Object auditable = field.get(entity);
if (auditable == null) {
field.set(entity, new Auditable());
auditable = field.get(entity);
}
Field temporalField = auditable.getClass().getDeclaredField("dateCreated");
Field agentField = auditable.getClass().getDeclaredField("createdBy");
setAuditValueTemporal(temporalField, auditable);
setAuditValueAgent(agentField, auditable);
}
}
}
@PreUpdate
public void setAuditUpdatedBy(Object entity) throws Exception {
if (entity.getClass().isAnnotationPresent(Entity.class)) {
Field field = getSingleField(entity.getClass(), "auditable");
field.setAccessible(true);
if (field.isAnnotationPresent(Embedded.class)) {
Object auditable = field.get(entity);
if (auditable == null) {
field.set(entity, new Auditable());
auditable = field.get(entity);
}
Field temporalField = auditable.getClass().getDeclaredField("dateUpdated");
Field agentField = auditable.getClass().getDeclaredField("updatedBy");
setAuditValueTemporal(temporalField, auditable);
setAuditValueAgent(agentField, auditable);
}
}
}
protected void setAuditValueTemporal(Field field, Object entity) throws IllegalArgumentException, IllegalAccessException {
Calendar cal = SystemTime.asCalendar();
field.setAccessible(true);
field.set(entity, cal.getTime());
}
protected void setAuditValueAgent(Field field, Object entity) throws IllegalArgumentException, IllegalAccessException {
Long customerId = 0L;
try {
BroadleafRequestContext requestContext = BroadleafRequestContext.getBroadleafRequestContext();
if (requestContext != null && requestContext.getWebRequest() != null) {
Object customer = requestContext.getWebRequest().getAttribute(customerRequestAttributeName, RequestAttributes.SCOPE_REQUEST);
if (customer != null) {
Class<?> customerClass = customer.getClass();
Field userNameField = getSingleField(customerClass, "username");
userNameField.setAccessible(true);
String username = (String) userNameField.get(customer);
if (username != null) {
//the customer has been persisted
Field idField = getSingleField(customerClass, "id");
idField.setAccessible(true);
customerId = (Long) idField.get(customer);
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
field.setAccessible(true);
field.set(entity, customerId);
}
private Field getSingleField(Class<?> clazz, String fieldName) throws IllegalStateException {
try {
return clazz.getDeclaredField(fieldName);
} catch (NoSuchFieldException nsf) {
// Try superclass
if (clazz.getSuperclass() != null) {
return getSingleField(clazz.getSuperclass(), fieldName);
}
return null;
}
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_audit_AuditableListener.java
|
398 |
context.getExecutionService().execute(new Runnable() {
public void run() {
try {
lastCleanup = Clock.currentTimeMillis();
for (Map.Entry<K, CacheRecord<K>> entry : cache.entrySet()) {
if (entry.getValue().expired()) {
cache.remove(entry.getKey());
}
}
} finally {
canCleanUp.set(true);
}
}
});
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_nearcache_ClientNearCache.java
|
2,237 |
public abstract class ScoreFunction {
private final CombineFunction scoreCombiner;
public abstract void setNextReader(AtomicReaderContext context);
public abstract double score(int docId, float subQueryScore);
public abstract Explanation explainScore(int docId, Explanation subQueryExpl);
public CombineFunction getDefaultScoreCombiner() {
return scoreCombiner;
}
protected ScoreFunction(CombineFunction scoreCombiner) {
this.scoreCombiner = scoreCombiner;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_search_function_ScoreFunction.java
|
1,354 |
public abstract class SourceFile extends CeylonUnit {
public SourceFile(IdePhasedUnit phasedUnit) {
createPhasedUnitRef(phasedUnit);
}
@Override
protected IdePhasedUnit setPhasedUnitIfNecessary() { return phasedUnitRef.get(); }
@Override
public String getSourceFullPath() {
return getFullPath();
}
@Override
public String getCeylonFileName() {
return getFilename();
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_model_SourceFile.java
|
3 |
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
clusterClient.performRoleElections();
clusterClient.removeClusterListener( this );
}
});
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_backup_HaBackupProvider.java
|
320 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(SlowTest.class)
public class ClientMapStoreTest {
static final String MAP_NAME = "clientMapStoreLoad";
Config nodeConfig;
@Before
public void setup() {
nodeConfig = new Config();
MapConfig mapConfig = new MapConfig();
MapStoreConfig mapStoreConfig = new MapStoreConfig();
mapStoreConfig.setEnabled(true);
mapStoreConfig.setImplementation(new SimpleMapStore());
mapStoreConfig.setInitialLoadMode(MapStoreConfig.InitialLoadMode.EAGER);
mapConfig.setName(MAP_NAME);
mapConfig.setMapStoreConfig(mapStoreConfig);
nodeConfig.addMapConfig(mapConfig);
}
@After
public void tearDown() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testOneClient_KickOffMapStoreLoad() throws InterruptedException {
Hazelcast.newHazelcastInstance(nodeConfig);
ClientThread client1 = new ClientThread();
client1.start();
HazelcastTestSupport.assertJoinable(client1);
assertEquals(SimpleMapStore.MAX_KEYS, client1.mapSize);
}
@Test
public void testTwoClient_KickOffMapStoreLoad() throws InterruptedException {
Hazelcast.newHazelcastInstance(nodeConfig);
ClientThread[] clientThreads = new ClientThread[2];
for (int i = 0; i < clientThreads.length; i++) {
ClientThread client1 = new ClientThread();
client1.start();
clientThreads[i] = client1;
}
HazelcastTestSupport.assertJoinable(clientThreads);
for (ClientThread c : clientThreads) {
assertEquals(SimpleMapStore.MAX_KEYS, c.mapSize);
}
}
@Test
public void testOneClientKickOffMapStoreLoad_ThenNodeJoins() {
Hazelcast.newHazelcastInstance(nodeConfig);
ClientThread client1 = new ClientThread();
client1.start();
Hazelcast.newHazelcastInstance(nodeConfig);
HazelcastTestSupport.assertJoinable(client1);
assertEquals(SimpleMapStore.MAX_KEYS, client1.mapSize);
}
@Test
public void testForIssue2112() {
Hazelcast.newHazelcastInstance(nodeConfig);
ClientThread client1 = new ClientThread();
client1.start();
Hazelcast.newHazelcastInstance(nodeConfig);
ClientThread client2 = new ClientThread();
client2.start();
HazelcastTestSupport.assertJoinable(client1);
HazelcastTestSupport.assertJoinable(client2);
assertEquals(SimpleMapStore.MAX_KEYS, client1.mapSize);
assertEquals(SimpleMapStore.MAX_KEYS, client2.mapSize);
}
static class SimpleMapStore implements MapStore<String, String>, MapLoader<String, String> {
public static final int MAX_KEYS = 30;
public static final int DELAY_SECONDS_PER_KEY = 1;
@Override
public String load(String key) {
sleepSeconds(DELAY_SECONDS_PER_KEY);
return key + "value";
}
@Override
public Map<String, String> loadAll(Collection<String> keys) {
Map<String, String> map = new HashMap<String, String>();
for (String key : keys) {
map.put(key, load(key));
}
return map;
}
@Override
public Set<String> loadAllKeys() {
Set<String> keys = new HashSet<String>();
for (int k = 0; k < MAX_KEYS; k++) { keys.add("key" + k); }
return keys;
}
@Override
public void delete(String key) {
sleepSeconds(DELAY_SECONDS_PER_KEY);
}
@Override
public void deleteAll(Collection<String> keys) {
for (String key : keys) {
delete(key);
}
}
@Override
public void store(String key, String value) {
sleepSeconds(DELAY_SECONDS_PER_KEY);
}
@Override
public void storeAll(Map<String, String> entries) {
for (Map.Entry<String, String> e : entries.entrySet()) {
store(e.getKey(), e.getValue());
}
}
}
private class ClientThread extends Thread {
public volatile int mapSize = 0;
public void run() {
HazelcastInstance client = HazelcastClient.newHazelcastClient();
IMap<String, String> map = client.getMap(ClientMapStoreTest.MAP_NAME);
mapSize = map.size();
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapStoreTest.java
|
19 |
Collections.sort(results, new Comparator<DeclarationWithProximity>() {
public int compare(DeclarationWithProximity x, DeclarationWithProximity y) {
if (x.getProximity()<y.getProximity()) return -1;
if (x.getProximity()>y.getProximity()) return 1;
int c = x.getDeclaration().getName().compareTo(y.getDeclaration().getName());
if (c!=0) return c;
return x.getDeclaration().getQualifiedNameString()
.compareTo(y.getDeclaration().getQualifiedNameString());
}
});
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_CompletionUtil.java
|
111 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientNearCacheConfigTest {
@Test
public void testSpecificNearCacheConfig_whenAsteriskAtTheEnd(){
final ClientConfig clientConfig = new ClientConfig();
final NearCacheConfig genericNearCacheConfig = new NearCacheConfig();
genericNearCacheConfig.setName("map*");
clientConfig.addNearCacheConfig(genericNearCacheConfig);
final NearCacheConfig specificNearCacheConfig = new NearCacheConfig();
specificNearCacheConfig.setName("mapStudent*");
clientConfig.addNearCacheConfig(specificNearCacheConfig);
final NearCacheConfig mapFoo = clientConfig.getNearCacheConfig("mapFoo");
final NearCacheConfig mapStudentFoo = clientConfig.getNearCacheConfig("mapStudentFoo");
assertEquals(genericNearCacheConfig, mapFoo);
assertEquals(specificNearCacheConfig, mapStudentFoo);
}
@Test
public void testSpecificNearCacheConfig_whenAsteriskAtTheBeginning(){
final ClientConfig clientConfig = new ClientConfig();
final NearCacheConfig genericNearCacheConfig = new NearCacheConfig();
genericNearCacheConfig.setName("*Map");
clientConfig.addNearCacheConfig(genericNearCacheConfig);
final NearCacheConfig specificNearCacheConfig = new NearCacheConfig();
specificNearCacheConfig.setName("*MapStudent");
clientConfig.addNearCacheConfig(specificNearCacheConfig);
final NearCacheConfig mapFoo = clientConfig.getNearCacheConfig("fooMap");
final NearCacheConfig mapStudentFoo = clientConfig.getNearCacheConfig("fooMapStudent");
assertEquals(genericNearCacheConfig, mapFoo);
assertEquals(specificNearCacheConfig, mapStudentFoo);
}
@Test
public void testSpecificNearCacheConfig_whenAsteriskInTheMiddle(){
final ClientConfig clientConfig = new ClientConfig();
final NearCacheConfig genericNearCacheConfig = new NearCacheConfig();
genericNearCacheConfig.setName("map*Bar");
clientConfig.addNearCacheConfig(genericNearCacheConfig);
final NearCacheConfig specificNearCacheConfig = new NearCacheConfig();
specificNearCacheConfig.setName("mapStudent*Bar");
clientConfig.addNearCacheConfig(specificNearCacheConfig);
final NearCacheConfig mapFoo = clientConfig.getNearCacheConfig("mapFooBar");
final NearCacheConfig mapStudentFoo = clientConfig.getNearCacheConfig("mapStudentFooBar");
assertEquals(genericNearCacheConfig, mapFoo);
assertEquals(specificNearCacheConfig, mapStudentFoo);
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientNearCacheConfigTest.java
|
392 |
public class ClusterSearchShardsAction extends ClusterAction<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction();
public static final String NAME = "cluster/shards/search_shards";
private ClusterSearchShardsAction() {
super(NAME);
}
@Override
public ClusterSearchShardsResponse newResponse() {
return new ClusterSearchShardsResponse();
}
@Override
public ClusterSearchShardsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new ClusterSearchShardsRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_shards_ClusterSearchShardsAction.java
|
5,399 |
public class ValueCountParser implements Aggregator.Parser {
@Override
public String type() {
return InternalValueCount.TYPE.name();
}
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceConfig<BytesValuesSource> config = new ValuesSourceConfig<BytesValuesSource>(BytesValuesSource.class);
String field = null;
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if ("field".equals(currentFieldName)) {
field = parser.text();
} else {
throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
}
} else {
throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
}
}
if (field == null) {
return new ValueCountAggregator.Factory(aggregationName, config);
}
FieldMapper<?> mapper = context.smartNameFieldMapper(field);
if (mapper == null) {
config.unmapped(true);
return new ValueCountAggregator.Factory(aggregationName, config);
}
IndexFieldData<?> indexFieldData = context.fieldData().getForField(mapper);
config.fieldContext(new FieldContext(field, indexFieldData));
return new ValueCountAggregator.Factory(aggregationName, config);
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_valuecount_ValueCountParser.java
|
26 |
public class GetCommand extends AbstractTextCommand {
final String key;
ByteBuffer value;
ByteBuffer lastOne;
public GetCommand(TextCommandType type, String key) {
super(type);
this.key = key;
}
public GetCommand(String key) {
this(TextCommandType.GET, key);
}
public String getKey() {
return key;
}
public boolean readFrom(ByteBuffer cb) {
return true;
}
public void setValue(MemcacheEntry entry, boolean singleGet) {
if (entry != null) {
value = entry.toNewBuffer();
}
lastOne = (singleGet) ? ByteBuffer.wrap(END) : null;
}
public boolean writeTo(ByteBuffer bb) {
if (value != null) {
IOUtil.copyToHeapBuffer(value, bb);
}
if (lastOne != null) {
IOUtil.copyToHeapBuffer(lastOne, bb);
}
return !((value != null && value.hasRemaining())
|| (lastOne != null && lastOne.hasRemaining()));
}
@Override
public String toString() {
return "GetCommand{"
+ "key='"
+ key
+ ", value="
+ value
+ '\''
+ "} "
+ super.toString();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_GetCommand.java
|
307 |
public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
public ClusterHealthRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterHealthRequest());
}
public ClusterHealthRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
public ClusterHealthRequestBuilder setTimeout(TimeValue timeout) {
request.timeout(timeout);
return this;
}
public ClusterHealthRequestBuilder setTimeout(String timeout) {
request.timeout(timeout);
return this;
}
public ClusterHealthRequestBuilder setWaitForStatus(ClusterHealthStatus waitForStatus) {
request.waitForStatus(waitForStatus);
return this;
}
public ClusterHealthRequestBuilder setWaitForGreenStatus() {
request.waitForGreenStatus();
return this;
}
public ClusterHealthRequestBuilder setWaitForYellowStatus() {
request.waitForYellowStatus();
return this;
}
public ClusterHealthRequestBuilder setWaitForRelocatingShards(int waitForRelocatingShards) {
request.waitForRelocatingShards(waitForRelocatingShards);
return this;
}
public ClusterHealthRequestBuilder setWaitForActiveShards(int waitForActiveShards) {
request.waitForActiveShards(waitForActiveShards);
return this;
}
/**
* Waits for N number of nodes. Use "12" for exact mapping, ">12" and "<12" for range.
*/
public ClusterHealthRequestBuilder setWaitForNodes(String waitForNodes) {
request.waitForNodes(waitForNodes);
return this;
}
public ClusterHealthRequestBuilder setWaitForEvents(Priority waitForEvents) {
request.waitForEvents(waitForEvents);
return this;
}
@Override
protected void doExecute(ActionListener<ClusterHealthResponse> listener) {
((ClusterAdminClient) client).health(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterHealthRequestBuilder.java
|
1,480 |
public class OSQLFunctionOutE extends OSQLFunctionMove {
public static final String NAME = "outE";
public OSQLFunctionOutE() {
super(NAME, 0, 1);
}
@Override
protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) {
return v2e(graph, iRecord, Direction.OUT, iLabels);
}
}
| 1no label
|
graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionOutE.java
|
1,665 |
@Service("blPersistencePackageFactory")
public class PersistencePackageFactoryImpl implements PersistencePackageFactory {
@Override
public PersistencePackage create(PersistencePackageRequest request) {
PersistencePerspective persistencePerspective = new PersistencePerspective();
persistencePerspective.setAdditionalForeignKeys(request.getAdditionalForeignKeys());
persistencePerspective.setAdditionalNonPersistentProperties(new String[] {});
if (request.getForeignKey() != null) {
persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.FOREIGNKEY,
request.getForeignKey());
}
switch (request.getType()) {
case STANDARD:
persistencePerspective.setOperationTypes(getDefaultOperationTypes());
break;
case ADORNED:
if (request.getAdornedList() == null) {
throw new IllegalArgumentException("ADORNED type requires the adornedList to be set");
}
persistencePerspective.setOperationTypes(getOperationTypes(OperationType.ADORNEDTARGETLIST));
persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.ADORNEDTARGETLIST,
request.getAdornedList());
break;
case MAP:
if (request.getMapStructure() == null) {
throw new IllegalArgumentException("MAP type requires the mapStructure to be set");
}
persistencePerspective.setOperationTypes(getOperationTypes(OperationType.MAP));
persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.MAPSTRUCTURE,
request.getMapStructure());
break;
}
if (request.getOperationTypesOverride() != null) {
persistencePerspective.setOperationTypes(request.getOperationTypesOverride());
}
PersistencePackage pp = new PersistencePackage();
pp.setCeilingEntityFullyQualifiedClassname(request.getCeilingEntityClassname());
pp.setFetchTypeFullyQualifiedClassname(null);
pp.setPersistencePerspective(persistencePerspective);
pp.setCustomCriteria(request.getCustomCriteria());
pp.setCsrfToken(null);
pp.setValidateUnsubmittedProperties(request.isValidateUnsubmittedProperties());
if (request.getEntity() != null) {
pp.setEntity(request.getEntity());
}
for (Map.Entry<String, PersistencePackageRequest> subRequest : request.getSubRequests().entrySet()) {
pp.getSubPackages().put(subRequest.getKey(), create(subRequest.getValue()));
}
return pp;
}
protected OperationTypes getDefaultOperationTypes() {
OperationTypes operationTypes = new OperationTypes();
operationTypes.setFetchType(OperationType.BASIC);
operationTypes.setRemoveType(OperationType.BASIC);
operationTypes.setAddType(OperationType.BASIC);
operationTypes.setUpdateType(OperationType.BASIC);
operationTypes.setInspectType(OperationType.BASIC);
return operationTypes;
}
protected OperationTypes getOperationTypes(OperationType nonInspectOperationType) {
OperationTypes operationTypes = new OperationTypes();
operationTypes.setFetchType(nonInspectOperationType);
operationTypes.setRemoveType(nonInspectOperationType);
operationTypes.setAddType(nonInspectOperationType);
operationTypes.setUpdateType(nonInspectOperationType);
operationTypes.setInspectType(OperationType.BASIC);
return operationTypes;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_factory_PersistencePackageFactoryImpl.java
|
490 |
client.getClientExecutionService().executeInternal(new Runnable() {
public void run() {
for (MembershipListener listener : listeners.values()) {
if (event.getEventType() == MembershipEvent.MEMBER_ADDED) {
listener.memberAdded(event);
} else {
listener.memberRemoved(event);
}
}
}
});
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientClusterServiceImpl.java
|
156 |
public class JMSArchivedStructuredContentSubscriber implements MessageListener {
@Resource(name = "blStructuredContentService")
private StructuredContentService structuredContentService;
/*
* (non-Javadoc)
* @see javax.jms.MessageListener#onMessage(javax.jms.Message)
*/
@SuppressWarnings("unchecked")
public void onMessage(Message message) {
String basePageCacheKey = null;
try {
HashMap<String,String> props = (HashMap<String,String>) ((ObjectMessage) message).getObject();
if (props != null) {
structuredContentService.removeItemFromCache(props.get("nameKey"), props.get("typeKey"));
}
} catch (JMSException e) {
throw new RuntimeException(e);
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_message_jms_JMSArchivedStructuredContentSubscriber.java
|
130 |
@Entity
@Table(name = "BLC_QUAL_CRIT_SC_XREF")
@Inheritance(strategy=InheritanceType.JOINED)
public class CriteriaStructuredContentXref {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
/** The category id. */
@EmbeddedId
CriteriaStructuredContentXrefPK criteriaStructuredContentXrefPK = new CriteriaStructuredContentXrefPK();
public CriteriaStructuredContentXrefPK getCriteriaStructuredContentXrefPK() {
return criteriaStructuredContentXrefPK;
}
public void setCriteriaStructuredContentXrefPK(final CriteriaStructuredContentXrefPK criteriaStructuredContentXrefPK) {
this.criteriaStructuredContentXrefPK = criteriaStructuredContentXrefPK;
}
public static class CriteriaStructuredContentXrefPK implements Serializable {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@ManyToOne(targetEntity = StructuredContentImpl.class, optional=false)
@JoinColumn(name = "SC_ID")
protected StructuredContent structuredContent = new StructuredContentImpl();
@ManyToOne(targetEntity = StructuredContentItemCriteriaImpl.class, optional=false)
@JoinColumn(name = "SC_ITEM_CRITERIA_ID")
protected StructuredContentItemCriteria structuredContentItemCriteria = new StructuredContentItemCriteriaImpl();
public StructuredContent getStructuredContent() {
return structuredContent;
}
public void setStructuredContent(StructuredContent structuredContent) {
this.structuredContent = structuredContent;
}
public StructuredContentItemCriteria getStructuredContentItemCriteria() {
return structuredContentItemCriteria;
}
public void setStructuredContentItemCriteria(StructuredContentItemCriteria structuredContentItemCriteria) {
this.structuredContentItemCriteria = structuredContentItemCriteria;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((structuredContent == null) ? 0 : structuredContent.hashCode());
result = prime * result + ((structuredContentItemCriteria == null) ? 0 : structuredContentItemCriteria.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CriteriaStructuredContentXrefPK other = (CriteriaStructuredContentXrefPK) obj;
if (structuredContent == null) {
if (other.structuredContent != null)
return false;
} else if (!structuredContent.equals(other.structuredContent))
return false;
if (structuredContentItemCriteria == null) {
if (other.structuredContentItemCriteria != null)
return false;
} else if (!structuredContentItemCriteria.equals(other.structuredContentItemCriteria))
return false;
return true;
}
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_CriteriaStructuredContentXref.java
|
50 |
final class ProposalComparator
implements Comparator<DeclarationWithProximity> {
private final String prefix;
private final ProducedType type;
ProposalComparator(String prefix, ProducedType type) {
this.prefix = prefix;
this.type = type;
}
public int compare(DeclarationWithProximity x, DeclarationWithProximity y) {
try {
boolean xbt = x.getDeclaration() instanceof NothingType;
boolean ybt = y.getDeclaration() instanceof NothingType;
if (xbt&&ybt) {
return 0;
}
if (xbt&&!ybt) {
return 1;
}
if (ybt&&!xbt) {
return -1;
}
ProducedType xtype = getResultType(x.getDeclaration());
ProducedType ytype = getResultType(y.getDeclaration());
boolean xbottom = xtype!=null && xtype.isNothing();
boolean ybottom = ytype!=null && ytype.isNothing();
if (xbottom && !ybottom) {
return 1;
}
if (ybottom && !xbottom) {
return -1;
}
String xName = x.getName();
String yName = y.getName();
boolean yUpperCase = isUpperCase(yName.charAt(0));
boolean xUpperCase = isUpperCase(xName.charAt(0));
if (!prefix.isEmpty()) {
boolean upperCasePrefix = isUpperCase(prefix.charAt(0));
if (!xUpperCase && yUpperCase) {
return upperCasePrefix ? 1 : -1;
}
else if (xUpperCase && !yUpperCase) {
return upperCasePrefix ? -1 : 1;
}
}
if (type!=null) {
boolean xassigns = xtype!=null && xtype.isSubtypeOf(type);
boolean yassigns = ytype!=null && ytype.isSubtypeOf(type);
if (xassigns && !yassigns) {
return -1;
}
if (yassigns && !xassigns) {
return 1;
}
if (xassigns && yassigns) {
boolean xtd = x.getDeclaration() instanceof TypedDeclaration;
boolean ytd = y.getDeclaration() instanceof TypedDeclaration;
if (xtd && !ytd) {
return -1;
}
if (ytd && !xtd) {
return 1;
}
}
}
if (x.getProximity()!=y.getProximity()) {
return new Integer(x.getProximity()).compareTo(y.getProximity());
}
//if (!prefix.isEmpty() && isLowerCase(prefix.charAt(0))) {
if (!xUpperCase && yUpperCase) {
return -1;
}
else if (xUpperCase && !yUpperCase) {
return 1;
}
int nc = xName.compareTo(yName);
if (nc==0) {
String xqn = x.getDeclaration().getQualifiedNameString();
String yqn = y.getDeclaration().getQualifiedNameString();
return xqn.compareTo(yqn);
}
else {
return nc;
}
}
catch (Exception e) {
e.printStackTrace();
return 0;
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_ProposalComparator.java
|
543 |
public class TransportDeleteMappingAction extends TransportMasterNodeOperationAction<DeleteMappingRequest, DeleteMappingResponse> {
private final MetaDataMappingService metaDataMappingService;
private final TransportFlushAction flushAction;
private final TransportDeleteByQueryAction deleteByQueryAction;
private final TransportRefreshAction refreshAction;
private final DestructiveOperations destructiveOperations;
@Inject
public TransportDeleteMappingAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataMappingService metaDataMappingService,
TransportDeleteByQueryAction deleteByQueryAction, TransportRefreshAction refreshAction,
TransportFlushAction flushAction, NodeSettingsService nodeSettingsService) {
super(settings, transportService, clusterService, threadPool);
this.metaDataMappingService = metaDataMappingService;
this.deleteByQueryAction = deleteByQueryAction;
this.refreshAction = refreshAction;
this.flushAction = flushAction;
this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
}
@Override
protected String executor() {
// no need for fork on another thread pool, we go async right away
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return DeleteMappingAction.NAME;
}
@Override
protected DeleteMappingRequest newRequest() {
return new DeleteMappingRequest();
}
@Override
protected DeleteMappingResponse newResponse() {
return new DeleteMappingResponse();
}
@Override
protected void doExecute(DeleteMappingRequest request, ActionListener<DeleteMappingResponse> listener) {
destructiveOperations.failDestructive(request.indices());
super.doExecute(request, listener);
}
@Override
protected ClusterBlockException checkBlock(DeleteMappingRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
}
@Override
protected void masterOperation(final DeleteMappingRequest request, final ClusterState state, final ActionListener<DeleteMappingResponse> listener) throws ElasticsearchException {
request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
flushAction.execute(Requests.flushRequest(request.indices()), new ActionListener<FlushResponse>() {
@Override
public void onResponse(FlushResponse flushResponse) {
// get all types that need to be deleted.
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> result = clusterService.state().metaData().findMappings(
request.indices(), request.types()
);
// create OrFilter with type filters within to account for different types
BoolFilterBuilder filterBuilder = new BoolFilterBuilder();
Set<String> types = new HashSet<String>();
for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> typesMeta : result) {
for (ObjectObjectCursor<String, MappingMetaData> type : typesMeta.value) {
filterBuilder.should(new TypeFilterBuilder(type.key));
types.add(type.key);
}
}
if (types.size() == 0) {
throw new TypeMissingException(new Index("_all"), request.types(), "No index has the type.");
}
request.types(types.toArray(new String[types.size()]));
QuerySourceBuilder querySourceBuilder = new QuerySourceBuilder()
.setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), filterBuilder));
deleteByQueryAction.execute(Requests.deleteByQueryRequest(request.indices()).source(querySourceBuilder), new ActionListener<DeleteByQueryResponse>() {
@Override
public void onResponse(DeleteByQueryResponse deleteByQueryResponse) {
refreshAction.execute(Requests.refreshRequest(request.indices()), new ActionListener<RefreshResponse>() {
@Override
public void onResponse(RefreshResponse refreshResponse) {
removeMapping();
}
@Override
public void onFailure(Throwable e) {
removeMapping();
}
protected void removeMapping() {
DeleteMappingClusterStateUpdateRequest clusterStateUpdateRequest = new DeleteMappingClusterStateUpdateRequest()
.indices(request.indices()).types(request.types())
.ackTimeout(request.timeout())
.masterNodeTimeout(request.masterNodeTimeout());
metaDataMappingService.removeMapping(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new DeleteMappingResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
});
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_TransportDeleteMappingAction.java
|
83 |
removeListenerActions.add(new Runnable() {
@Override
public void run() {
clientEngine.getProxyService().removeProxyListener(id);
}
});
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_ClientEndpoint.java
|
1,583 |
public abstract class CollectionMetadata extends FieldMetadata {
private PersistencePerspective persistencePerspective;
private String collectionCeilingEntity;
private boolean mutable = true;
private String[] customCriteria;
public PersistencePerspective getPersistencePerspective() {
return persistencePerspective;
}
public void setPersistencePerspective(PersistencePerspective persistencePerspective) {
this.persistencePerspective = persistencePerspective;
}
public String getCollectionCeilingEntity() {
return collectionCeilingEntity;
}
public void setCollectionCeilingEntity(String collectionCeilingEntity) {
this.collectionCeilingEntity = collectionCeilingEntity;
}
public boolean isMutable() {
return mutable;
}
public void setMutable(boolean mutable) {
this.mutable = mutable;
}
public String[] getCustomCriteria() {
return customCriteria;
}
public void setCustomCriteria(String[] customCriteria) {
this.customCriteria = customCriteria;
}
@Override
protected FieldMetadata populate(FieldMetadata metadata) {
super.populate(metadata);
((CollectionMetadata) metadata).setPersistencePerspective(persistencePerspective.clonePersistencePerspective());
((CollectionMetadata) metadata).setCollectionCeilingEntity(collectionCeilingEntity);
((CollectionMetadata) metadata).setMutable(mutable);
((CollectionMetadata) metadata).setCustomCriteria(customCriteria);
((CollectionMetadata) metadata).setTab(getTab());
((CollectionMetadata) metadata).setTabOrder(getTabOrder());
return metadata;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof CollectionMetadata)) return false;
CollectionMetadata metadata = (CollectionMetadata) o;
if (mutable != metadata.mutable) return false;
if (collectionCeilingEntity != null ? !collectionCeilingEntity.equals(metadata.collectionCeilingEntity) : metadata.collectionCeilingEntity != null)
return false;
if (!Arrays.equals(customCriteria, metadata.customCriteria)) return false;
if (persistencePerspective != null ? !persistencePerspective.equals(metadata.persistencePerspective) : metadata.persistencePerspective != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = persistencePerspective != null ? persistencePerspective.hashCode() : 0;
result = 31 * result + (collectionCeilingEntity != null ? collectionCeilingEntity.hashCode() : 0);
result = 31 * result + (mutable ? 1 : 0);
result = 31 * result + (customCriteria != null ? Arrays.hashCode(customCriteria) : 0);
return result;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_CollectionMetadata.java
|
2 |
public abstract class AbstractTextCommandProcessor<T> implements TextCommandProcessor<T>, TextCommandConstants {
protected final TextCommandService textCommandService;
protected AbstractTextCommandProcessor(TextCommandService textCommandService) {
this.textCommandService = textCommandService;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_AbstractTextCommandProcessor.java
|
41 |
static class BaseIterator<K,V> extends Traverser<K,V> {
final ConcurrentHashMapV8<K,V> map;
Node<K,V> lastReturned;
BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
ConcurrentHashMapV8<K,V> map) {
super(tab, size, index, limit);
this.map = map;
advance();
}
public final boolean hasNext() { return next != null; }
public final boolean hasMoreElements() { return next != null; }
public final void remove() {
Node<K,V> p;
if ((p = lastReturned) == null)
throw new IllegalStateException();
lastReturned = null;
map.replaceNode(p.key, null, null);
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
450 |
public class ClusterStatsResponse extends NodesOperationResponse<ClusterStatsNodeResponse> implements ToXContent {
ClusterStatsNodes nodesStats;
ClusterStatsIndices indicesStats;
String clusterUUID;
ClusterHealthStatus status;
long timestamp;
ClusterStatsResponse() {
}
public ClusterStatsResponse(long timestamp, ClusterName clusterName, String clusterUUID, ClusterStatsNodeResponse[] nodes) {
super(clusterName, null);
this.timestamp = timestamp;
this.clusterUUID = clusterUUID;
nodesStats = new ClusterStatsNodes(nodes);
indicesStats = new ClusterStatsIndices(nodes);
for (ClusterStatsNodeResponse response : nodes) {
// only the master node populates the status
if (response.clusterStatus() != null) {
status = response.clusterStatus();
break;
}
}
}
public long getTimestamp() {
return this.timestamp;
}
public ClusterHealthStatus getStatus() {
return this.status;
}
public ClusterStatsNodes getNodesStats() {
return nodesStats;
}
public ClusterStatsIndices getIndicesStats() {
return indicesStats;
}
@Override
public ClusterStatsNodeResponse[] getNodes() {
throw new UnsupportedOperationException();
}
@Override
public Map<String, ClusterStatsNodeResponse> getNodesMap() {
throw new UnsupportedOperationException();
}
@Override
public ClusterStatsNodeResponse getAt(int position) {
throw new UnsupportedOperationException();
}
@Override
public Iterator<ClusterStatsNodeResponse> iterator() {
throw new UnsupportedOperationException();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
timestamp = in.readVLong();
status = null;
if (in.readBoolean()) {
// it may be that the master switched on us while doing the operation. In this case the status may be null.
status = ClusterHealthStatus.fromValue(in.readByte());
}
clusterUUID = in.readString();
nodesStats = ClusterStatsNodes.readNodeStats(in);
indicesStats = ClusterStatsIndices.readIndicesStats(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVLong(timestamp);
if (status == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(status.value());
}
out.writeString(clusterUUID);
nodesStats.writeTo(out);
indicesStats.writeTo(out);
}
static final class Fields {
static final XContentBuilderString NODES = new XContentBuilderString("nodes");
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
static final XContentBuilderString UUID = new XContentBuilderString("uuid");
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("timestamp", getTimestamp());
builder.field(Fields.CLUSTER_NAME, getClusterName().value());
if (params.paramAsBoolean("output_uuid", false)) {
builder.field(Fields.UUID, clusterUUID);
}
if (status != null) {
builder.field(Fields.STATUS, status.name().toLowerCase(Locale.ROOT));
}
builder.startObject(Fields.INDICES);
indicesStats.toXContent(builder, params);
builder.endObject();
builder.startObject(Fields.NODES);
nodesStats.toXContent(builder, params);
builder.endObject();
return builder;
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsResponse.java
|
271 |
public abstract class OCommandProcess<C extends OCommand, T, R> {
protected final C command;
protected T target;
/**
* Create the process defining command and target.
*/
public OCommandProcess(final C iCommand, final T iTarget) {
command = iCommand;
target = iTarget;
}
public abstract R process();
public T getTarget() {
return target;
}
@Override
public String toString() {
return target.toString();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_OCommandProcess.java
|
6,067 |
public class TermSuggestion extends Suggestion<TermSuggestion.Entry> {
public static Comparator<Suggestion.Entry.Option> SCORE = new Score();
public static Comparator<Suggestion.Entry.Option> FREQUENCY = new Frequency();
// Same behaviour as comparators in suggest module, but for SuggestedWord
// Highest score first, then highest freq first, then lowest term first
public static class Score implements Comparator<Suggestion.Entry.Option> {
@Override
public int compare(Suggestion.Entry.Option first, Suggestion.Entry.Option second) {
// first criteria: the distance
int cmp = Float.compare(second.getScore(), first.getScore());
if (cmp != 0) {
return cmp;
}
return FREQUENCY.compare(first, second);
}
}
// Same behaviour as comparators in suggest module, but for SuggestedWord
// Highest freq first, then highest score first, then lowest term first
public static class Frequency implements Comparator<Suggestion.Entry.Option> {
@Override
public int compare(Suggestion.Entry.Option first, Suggestion.Entry.Option second) {
// first criteria: the popularity
int cmp = ((TermSuggestion.Entry.Option) second).getFreq() - ((TermSuggestion.Entry.Option) first).getFreq();
if (cmp != 0) {
return cmp;
}
// second criteria (if first criteria is equal): the distance
cmp = Float.compare(second.getScore(), first.getScore());
if (cmp != 0) {
return cmp;
}
// third criteria: term text
return first.getText().compareTo(second.getText());
}
}
public static final int TYPE = 1;
private Sort sort;
public TermSuggestion() {
}
public TermSuggestion(String name, int size, Sort sort) {
super(name, size);
this.sort = sort;
}
public int getType() {
return TYPE;
}
@Override
protected Comparator<Option> sortComparator() {
switch (sort) {
case SCORE:
return SCORE;
case FREQUENCY:
return FREQUENCY;
default:
throw new ElasticsearchException("Could not resolve comparator for sort key: [" + sort + "]");
}
}
@Override
protected void innerReadFrom(StreamInput in) throws IOException {
super.innerReadFrom(in);
sort = Sort.fromId(in.readByte());
}
@Override
public void innerWriteTo(StreamOutput out) throws IOException {
super.innerWriteTo(out);
out.writeByte(sort.id());
}
protected Entry newEntry() {
return new Entry();
}
/**
* Represents a part from the suggest text with suggested options.
*/
public static class Entry extends
org.elasticsearch.search.suggest.Suggest.Suggestion.Entry<TermSuggestion.Entry.Option> {
Entry(Text text, int offset, int length) {
super(text, offset, length);
}
Entry() {
}
@Override
protected Option newOption() {
return new Option();
}
/**
* Contains the suggested text with its document frequency and score.
*/
public static class Option extends org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option {
static class Fields {
static final XContentBuilderString FREQ = new XContentBuilderString("freq");
}
private int freq;
protected Option(Text text, int freq, float score) {
super(text, score);
this.freq = freq;
}
@Override
protected void mergeInto(Suggestion.Entry.Option otherOption) {
super.mergeInto(otherOption);
freq += ((Option) otherOption).freq;
}
protected Option() {
super();
}
public void setFreq(int freq) {
this.freq = freq;
}
/**
* @return How often this suggested text appears in the index.
*/
public int getFreq() {
return freq;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
freq = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(freq);
}
@Override
protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
builder = super.innerToXContent(builder, params);
builder.field(Fields.FREQ, freq);
return builder;
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_suggest_term_TermSuggestion.java
|
753 |
public class CopyFileRefactoringParticipant extends CopyParticipant {
private IFile file;
@Override
protected boolean initialize(Object element) {
file = (IFile) element;
return getProcessor() instanceof CopyProcessor &&
getProjectTypeChecker(file.getProject())!=null &&
file.getFileExtension()!=null &&
file.getFileExtension().equals("ceylon");
}
@Override
public String getName() {
return "Copy file participant for Ceylon source";
}
@Override
public RefactoringStatus checkConditions(IProgressMonitor pm,
CheckConditionsContext context) throws OperationCanceledException {
return new RefactoringStatus();
}
public Change createChange(IProgressMonitor pm) throws CoreException {
try {
IFolder dest = (IFolder) getArguments().getDestination();
final String newName = dest.getProjectRelativePath()
.removeFirstSegments(1).toPortableString()
.replace('/', '.');
IFile newFile = dest.getFile(file.getName());
String relFilePath = file.getProjectRelativePath()
.removeFirstSegments(1).toPortableString();
String relPath = file.getProjectRelativePath()
.removeFirstSegments(1).removeLastSegments(1)
.toPortableString();
final String oldName = relPath.replace('/', '.');
final IProject project = file.getProject();
TypeChecker tc = getProjectTypeChecker(project);
if (tc==null) return null;
PhasedUnit phasedUnit = tc.getPhasedUnitFromRelativePath(relFilePath);
if (phasedUnit==null) return null;
final List<ReplaceEdit> edits = new ArrayList<ReplaceEdit>();
final List<Declaration> declarations = phasedUnit.getDeclarations();
final Map<Declaration,String> imports = new HashMap<Declaration,String>();
phasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(), that.getDeclarationModel());
}
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(), that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(), that.getDeclarationModel());
}
@Override
public void visit(ModuleDescriptor that) {
super.visit(that);
visitIt(that.getImportPath());
}
@Override
public void visit(PackageDescriptor that) {
super.visit(that);
visitIt(that.getImportPath());
}
private void visitIt(Tree.ImportPath importPath) {
if (formatPath(importPath.getIdentifiers()).equals(oldName)) {
edits.add(new ReplaceEdit(importPath.getStartIndex(),
oldName.length(), newName));
}
}
private void visitIt(Tree.Identifier id, Declaration dec) {
if (dec!=null && !declarations.contains(dec)) {
String pn = dec.getUnit().getPackage().getNameAsString();
if (pn.equals(oldName) && !pn.isEmpty() &&
!pn.equals(Module.LANGUAGE_MODULE_NAME)) {
imports.put(dec, id.getText());
}
}
}
});
try {
TextFileChange change = new TextFileChange(file.getName(), newFile);
Tree.CompilationUnit cu = phasedUnit.getCompilationUnit();
change.setEdit(new MultiTextEdit());
for (ReplaceEdit edit: edits) {
change.addEdit(edit);
}
if (!imports.isEmpty()) {
List<InsertEdit> list = importEdits(cu,
imports.keySet(), imports.values(),
null, EditorUtil.getDocument(change));
for (TextEdit edit: list) {
change.addEdit(edit);
}
}
Tree.Import toDelete = findImportNode(cu, newName);
if (toDelete!=null) {
change.addEdit(new DeleteEdit(toDelete.getStartIndex(),
toDelete.getStopIndex()-toDelete.getStartIndex()+1));
}
if (change.getEdit().hasChildren()) {
return change;
}
}
catch (Exception e) {
e.printStackTrace();
}
return null;
}
catch (Exception e) {
e.printStackTrace();
return null;
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_refactor_CopyFileRefactoringParticipant.java
|
214 |
interface SocketChannelWrapperFactory {
SocketChannelWrapper wrapSocketChannel(SocketChannel socketChannel, boolean client) throws Exception;
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientConnectionManagerImpl.java
|
752 |
public class MultiGetItemResponse implements Streamable {
private GetResponse response;
private MultiGetResponse.Failure failure;
MultiGetItemResponse() {
}
public MultiGetItemResponse(GetResponse response, MultiGetResponse.Failure failure) {
this.response = response;
this.failure = failure;
}
/**
* The index name of the document.
*/
public String getIndex() {
if (failure != null) {
return failure.getIndex();
}
return response.getIndex();
}
/**
* The type of the document.
*/
public String getType() {
if (failure != null) {
return failure.getType();
}
return response.getType();
}
/**
* The id of the document.
*/
public String getId() {
if (failure != null) {
return failure.getId();
}
return response.getId();
}
/**
* Is this a failed execution?
*/
public boolean isFailed() {
return failure != null;
}
/**
* The actual get response, <tt>null</tt> if its a failure.
*/
public GetResponse getResponse() {
return this.response;
}
/**
* The failure if relevant.
*/
public MultiGetResponse.Failure getFailure() {
return this.failure;
}
public static MultiGetItemResponse readItemResponse(StreamInput in) throws IOException {
MultiGetItemResponse response = new MultiGetItemResponse();
response.readFrom(in);
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
if (in.readBoolean()) {
failure = MultiGetResponse.Failure.readFailure(in);
} else {
response = new GetResponse();
response.readFrom(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (failure != null) {
out.writeBoolean(true);
failure.writeTo(out);
} else {
out.writeBoolean(false);
response.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_get_MultiGetItemResponse.java
|
258 |
public final class StoreUtils {
private StoreUtils() {
}
public static String toString(Directory directory) {
if (directory instanceof NIOFSDirectory) {
NIOFSDirectory niofsDirectory = (NIOFSDirectory)directory;
return "niofs(" + niofsDirectory.getDirectory() + ")";
}
if (directory instanceof MMapDirectory) {
MMapDirectory mMapDirectory = (MMapDirectory)directory;
return "mmapfs(" + mMapDirectory.getDirectory() + ")";
}
if (directory instanceof SimpleFSDirectory) {
SimpleFSDirectory simpleFSDirectory = (SimpleFSDirectory)directory;
return "simplefs(" + simpleFSDirectory.getDirectory() + ")";
}
return directory.toString();
}
}
| 0true
|
src_main_java_org_apache_lucene_store_StoreUtils.java
|
1,175 |
public class OQueryOperatorContainsKey extends OQueryOperatorEqualityNotNulls {
public OQueryOperatorContainsKey() {
super("CONTAINSKEY", 5, false);
}
@Override
@SuppressWarnings("unchecked")
protected boolean evaluateExpression(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft,
final Object iRight, OCommandContext iContext) {
if (iLeft instanceof Map<?, ?>) {
final Map<String, ?> map = (Map<String, ?>) iLeft;
return map.containsKey(iRight);
} else if (iRight instanceof Map<?, ?>) {
final Map<String, ?> map = (Map<String, ?>) iRight;
return map.containsKey(iLeft);
}
return false;
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
return OIndexReuseType.INDEX_METHOD;
}
@Override
public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType,
List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) {
final OIndexDefinition indexDefinition = index.getDefinition();
final OIndexInternal<?> internalIndex = index.getInternal();
if (!internalIndex.canBeUsedInEqualityOperators())
return null;
final Object result;
if (indexDefinition.getParamCount() == 1) {
if (!((indexDefinition instanceof OPropertyMapIndexDefinition) && ((OPropertyMapIndexDefinition) indexDefinition)
.getIndexBy() == OPropertyMapIndexDefinition.INDEX_BY.KEY))
return null;
final Object key = ((OIndexDefinitionMultiValue) indexDefinition).createSingleValue(keyParams.get(0));
if (key == null)
return null;
final Object indexResult = index.get(key);
result = convertIndexResult(indexResult);
} else {
// in case of composite keys several items can be returned in case of we perform search
// using part of composite key stored in index.
final OCompositeIndexDefinition compositeIndexDefinition = (OCompositeIndexDefinition) indexDefinition;
if (!((compositeIndexDefinition.getMultiValueDefinition() instanceof OPropertyMapIndexDefinition) && ((OPropertyMapIndexDefinition) compositeIndexDefinition
.getMultiValueDefinition()).getIndexBy() == OPropertyMapIndexDefinition.INDEX_BY.KEY))
return null;
final Object keyOne = compositeIndexDefinition.createSingleValue(keyParams);
if (keyOne == null)
return null;
if (internalIndex.hasRangeQuerySupport()) {
final Object keyTwo = compositeIndexDefinition.createSingleValue(keyParams);
if (resultListener != null) {
index.getValuesBetween(keyOne, true, keyTwo, true, resultListener);
result = resultListener.getResult();
} else
result = index.getValuesBetween(keyOne, true, keyTwo, true);
} else {
if (indexDefinition.getParamCount() == keyParams.size()) {
final Object indexResult = index.get(keyOne);
result = convertIndexResult(indexResult);
} else
return null;
}
}
updateProfiler(iContext, index, keyParams, indexDefinition);
return result;
}
private Object convertIndexResult(Object indexResult) {
Object result;
if (indexResult instanceof Collection)
result = indexResult;
else if (indexResult == null)
result = Collections.emptyList();
else
result = Collections.singletonList((OIdentifiable) indexResult);
return result;
}
@Override
public ORID getBeginRidRange(Object iLeft, Object iRight) {
return null;
}
@Override
public ORID getEndRidRange(Object iLeft, Object iRight) {
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorContainsKey.java
|
345 |
static class MapTryLockTimeOutThread extends TestHelper {
public MapTryLockTimeOutThread(IMap map, String upKey, String downKey){
super(map, upKey, downKey);
}
public void doRun() throws Exception{
if(map.tryLock(upKey, 1, TimeUnit.MILLISECONDS)){
try{
if(map.tryLock(downKey, 1, TimeUnit.MILLISECONDS )){
try {
work();
}finally {
map.unlock(downKey);
}
}
}finally {
map.unlock(upKey);
}
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTryLockConcurrentTests.java
|
43 |
public class StatsCommandProcessor extends MemcacheCommandProcessor<StatsCommand> {
public StatsCommandProcessor(TextCommandService textCommandService) {
super(textCommandService);
}
public void handle(StatsCommand command) {
Stats stats = textCommandService.getStats();
command.setResponse(stats);
textCommandService.sendResponse(command);
}
public void handleRejection(StatsCommand command) {
handle(command);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_StatsCommandProcessor.java
|
1,082 |
public class OSQLFilterItemField extends OSQLFilterItemAbstract {
protected Set<String> preLoadedFields;
protected String[] preLoadedFieldsArray;
protected String name;
protected OCollate collate;
public OSQLFilterItemField(final OBaseParser iQueryToParse, final String iName) {
super(iQueryToParse, iName);
}
public Object getValue(final OIdentifiable iRecord, OCommandContext iContext) {
if (iRecord == null)
throw new OCommandExecutionException("expression item '" + name + "' cannot be resolved");
final ODocument doc = (ODocument) iRecord.getRecord();
if (preLoadedFieldsArray == null && preLoadedFields != null && preLoadedFields.size() > 0 && preLoadedFields.size() < 5) {
// TRANSFORM THE SET IN ARRAY ONLY THE FIRST TIME AND IF FIELDS ARE MORE THAN ONE, OTHERWISE GO WITH THE DEFAULT BEHAVIOR
preLoadedFieldsArray = new String[preLoadedFields.size()];
preLoadedFields.toArray(preLoadedFieldsArray);
}
// UNMARSHALL THE SINGLE FIELD
if (doc.deserializeFields(preLoadedFieldsArray)) {
// FIELD FOUND
Object v = ODocumentHelper.getFieldValue(doc, name);
collate = getCollateForField(doc, name);
return transformValue(iRecord, iContext, v);
}
return null;
}
public String getRoot() {
return name;
}
public void setRoot(final OBaseParser iQueryToParse, final String iRoot) {
this.name = iRoot;
}
/**
* Check whether or not this filter item is chain of fields (e.g. "field1.field2.field3"). Return true if filter item contains
* only field projections operators, if field item contains any other projection operator the method returns false. When filter
* item does not contains any chain operator, it is also field chain consist of one field.
*
* @return whether or not this filter item can be represented as chain of fields.
*/
public boolean isFieldChain() {
if (operationsChain == null) {
return true;
}
for (OPair<OSQLMethod, Object[]> pair : operationsChain) {
if (!pair.getKey().getName().equals(OSQLMethodField.NAME)) {
return false;
}
}
return true;
}
/**
* Creates {@code FieldChain} in case when filter item can have such representation.
*
* @return {@code FieldChain} representation of this filter item.
* @throws IllegalStateException
* if this filter item cannot be represented as {@code FieldChain}.
*/
public FieldChain getFieldChain() {
if (!isFieldChain()) {
throw new IllegalStateException("Filter item field contains not only field operators");
}
return new FieldChain();
}
/**
* Represents filter item as chain of fields. Provide interface to work with this chain like with sequence of field names.
*/
public class FieldChain {
private FieldChain() {
}
public String getItemName(int fieldIndex) {
if (fieldIndex == 0) {
return name;
} else {
return operationsChain.get(fieldIndex - 1).getValue()[0].toString();
}
}
public int getItemCount() {
if (operationsChain == null) {
return 1;
} else {
return operationsChain.size() + 1;
}
}
/**
* Field chain is considered as long chain if it contains more than one item.
*
* @return true if this chain is long and false in another case.
*/
public boolean isLong() {
return operationsChain != null && operationsChain.size() > 0;
}
}
public void setPreLoadedFields(final Set<String> iPrefetchedFieldList) {
this.preLoadedFields = iPrefetchedFieldList;
}
public OCollate getCollate() {
return collate;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_filter_OSQLFilterItemField.java
|
757 |
public static class Failure implements Streamable {
private String index;
private String type;
private String id;
private String message;
Failure() {
}
public Failure(String index, String type, String id, String message) {
this.index = index;
this.type = type;
this.id = id;
this.message = message;
}
/**
* The index name of the action.
*/
public String getIndex() {
return this.index;
}
/**
* The type of the action.
*/
public String getType() {
return type;
}
/**
* The id of the action.
*/
public String getId() {
return id;
}
/**
* The failure message.
*/
public String getMessage() {
return this.message;
}
public static Failure readFailure(StreamInput in) throws IOException {
Failure failure = new Failure();
failure.readFrom(in);
return failure;
}
@Override
public void readFrom(StreamInput in) throws IOException {
index = in.readString();
type = in.readOptionalString();
id = in.readString();
message = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(index);
out.writeOptionalString(type);
out.writeString(id);
out.writeString(message);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_get_MultiGetResponse.java
|
935 |
public class OfferItemRestrictionRuleType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, OfferItemRestrictionRuleType> TYPES = new LinkedHashMap<String, OfferItemRestrictionRuleType>();
public static final OfferItemRestrictionRuleType NONE = new OfferItemRestrictionRuleType("NONE", "None");
public static final OfferItemRestrictionRuleType QUALIFIER = new OfferItemRestrictionRuleType("QUALIFIER", "Qualifier Only");
public static final OfferItemRestrictionRuleType TARGET = new OfferItemRestrictionRuleType("TARGET", "Target Only");
public static final OfferItemRestrictionRuleType QUALIFIER_TARGET = new OfferItemRestrictionRuleType("QUALIFIER_TARGET", "Qualifier And Target");
public static OfferItemRestrictionRuleType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public OfferItemRestrictionRuleType() {
//do nothing
}
public OfferItemRestrictionRuleType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferItemRestrictionRuleType other = (OfferItemRestrictionRuleType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_type_OfferItemRestrictionRuleType.java
|
82 |
LESS_THAN_EQUAL {
@Override
public boolean isValidValueType(Class<?> clazz) {
Preconditions.checkNotNull(clazz);
return Comparable.class.isAssignableFrom(clazz);
}
@Override
public boolean isValidCondition(Object condition) {
return condition!=null && condition instanceof Comparable;
}
@Override
public boolean evaluate(Object value, Object condition) {
Integer cmp = AttributeUtil.compare(value,condition);
return cmp!=null?cmp<=0:false;
}
@Override
public String toString() {
return "<=";
}
@Override
public TitanPredicate negate() {
return GREATER_THAN;
}
},
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Cmp.java
|
1,570 |
public abstract class Decision {
public static final Decision ALWAYS = new Single(Type.YES);
public static final Decision YES = new Single(Type.YES);
public static final Decision NO = new Single(Type.NO);
public static final Decision THROTTLE = new Single(Type.THROTTLE);
/**
* Creates a simple decision
* @param type {@link Type} of the decision
* @param explanation explanation of the decision
* @param explanationParams additional parameters for the decision
* @return new {@link Decision} instance
*/
public static Decision single(Type type, String explanation, Object... explanationParams) {
return new Single(type, explanation, explanationParams);
}
/**
* This enumeration defines the
* possible types of decisions
*/
public static enum Type {
YES,
NO,
THROTTLE
}
/**
* Get the {@link Type} of this decision
* @return {@link Type} of this decision
*/
public abstract Type type();
/**
* Simple class representing a single decision
*/
public static class Single extends Decision {
private final Type type;
private final String explanation;
private final Object[] explanationParams;
/**
* Creates a new {@link Single} decision of a given type
* @param type {@link Type} of the decision
*/
public Single(Type type) {
this(type, null, (Object[]) null);
}
/**
* Creates a new {@link Single} decision of a given type
*
* @param type {@link Type} of the decision
* @param explanation An explanation of this {@link Decision}
* @param explanationParams A set of additional parameters
*/
public Single(Type type, String explanation, Object... explanationParams) {
this.type = type;
this.explanation = explanation;
this.explanationParams = explanationParams;
}
@Override
public Type type() {
return this.type;
}
@Override
public String toString() {
if (explanation == null) {
return type + "()";
}
return type + "(" + String.format(Locale.ROOT, explanation, explanationParams) + ")";
}
}
/**
* Simple class representing a list of decisions
*/
public static class Multi extends Decision {
private final List<Decision> decisions = Lists.newArrayList();
/**
* Add a decission to this {@link Multi}decision instance
* @param decision {@link Decision} to add
* @return {@link Multi}decision instance with the given decision added
*/
public Multi add(Decision decision) {
decisions.add(decision);
return this;
}
@Override
public Type type() {
Type ret = Type.YES;
for (int i = 0; i < decisions.size(); i++) {
Type type = decisions.get(i).type();
if (type == Type.NO) {
return type;
} else if (type == Type.THROTTLE) {
ret = type;
}
}
return ret;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (Decision decision : decisions) {
sb.append("[").append(decision.toString()).append("]");
}
return sb.toString();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_allocation_decider_Decision.java
|
1,096 |
public class BundleOrderItemRequest {
protected String name;
protected Category category;
protected int quantity;
protected Order order;
protected List<DiscreteOrderItemRequest> discreteOrderItems = new ArrayList<DiscreteOrderItemRequest>();
protected List<BundleOrderItemFeePrice> bundleOrderItemFeePrices = new ArrayList<BundleOrderItemFeePrice>();
protected Money salePriceOverride;
protected Money retailPriceOverride;
public Order getOrder() {
return order;
}
public void setOrder(Order order) {
this.order = order;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Category getCategory() {
return category;
}
public void setCategory(Category category) {
this.category = category;
}
public int getQuantity() {
return quantity;
}
public void setQuantity(int quantity) {
this.quantity = quantity;
}
public List<DiscreteOrderItemRequest> getDiscreteOrderItems() {
return discreteOrderItems;
}
public void setDiscreteOrderItems(List<DiscreteOrderItemRequest> discreteOrderItems) {
this.discreteOrderItems = discreteOrderItems;
}
public List<BundleOrderItemFeePrice> getBundleOrderItemFeePrices() {
return bundleOrderItemFeePrices;
}
public void setBundleOrderItemFeePrices(
List<BundleOrderItemFeePrice> bundleOrderItemFeePrices) {
this.bundleOrderItemFeePrices = bundleOrderItemFeePrices;
}
public Money getSalePriceOverride() {
return salePriceOverride;
}
public void setSalePriceOverride(Money salePriceOverride) {
this.salePriceOverride = salePriceOverride;
}
public Money getRetailPriceOverride() {
return retailPriceOverride;
}
public void setRetailPriceOverride(Money retailPriceOverride) {
this.retailPriceOverride = retailPriceOverride;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
BundleOrderItemRequest other = (BundleOrderItemRequest) obj;
if (category == null) {
if (other.category != null)
return false;
} else if (!category.equals(other.category))
return false;
if (discreteOrderItems == null) {
if (other.discreteOrderItems != null)
return false;
} else if (!discreteOrderItems.equals(other.discreteOrderItems))
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (quantity != other.quantity)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((category == null) ? 0 : category.hashCode());
result = prime * result + ((discreteOrderItems == null) ? 0 : discreteOrderItems.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + quantity;
return result;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_call_BundleOrderItemRequest.java
|
610 |
public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<UpdateSettingsRequest, UpdateSettingsResponse, UpdateSettingsRequestBuilder> {
public UpdateSettingsRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
super((InternalIndicesAdminClient) indicesClient, new UpdateSettingsRequest(indices));
}
/**
* Sets the indices the update settings will execute on
*/
public UpdateSettingsRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
public UpdateSettingsRequestBuilder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return this;
}
/**
* Sets the settings to be updated
*/
public UpdateSettingsRequestBuilder setSettings(Settings settings) {
request.settings(settings);
return this;
}
/**
* Sets the settings to be updated
*/
public UpdateSettingsRequestBuilder setSettings(Settings.Builder settings) {
request.settings(settings);
return this;
}
/**
* Sets the settings to be updated (either json/yaml/properties format)
*/
public UpdateSettingsRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets the settings to be updated (either json/yaml/properties format)
*/
public UpdateSettingsRequestBuilder setSettings(Map<String, Object> source) {
request.settings(source);
return this;
}
@Override
protected void doExecute(ActionListener<UpdateSettingsResponse> listener) {
((IndicesAdminClient) client).updateSettings(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_put_UpdateSettingsRequestBuilder.java
|
1,464 |
public class BroadleafLoginController extends BroadleafAbstractController {
@Resource(name="blCustomerService")
protected CustomerService customerService;
@Resource(name="blResetPasswordValidator")
protected ResetPasswordValidator resetPasswordValidator;
@Resource(name="blLoginService")
protected LoginService loginService;
protected static String loginView = "authentication/login";
protected static String forgotPasswordView = "authentication/forgotPassword";
protected static String forgotUsernameView = "authentication/forgotUsername";
protected static String forgotPasswordSuccessView = "authentication/forgotPasswordSuccess";
protected static String resetPasswordView = "authentication/resetPassword";
protected static String resetPasswordErrorView = "authentication/resetPasswordError";
protected static String resetPasswordSuccessView = "redirect:/";
protected static String resetPasswordFormView = "authentication/resetPasswordForm";
/**
* Redirects to the login view.
*
* @param request
* @param response
* @param model
* @return the return view
*/
public String login(HttpServletRequest request, HttpServletResponse response, Model model) {
if (StringUtils.isNotBlank(request.getParameter("successUrl"))) {
model.addAttribute("successUrl", request.getParameter("successUrl"));
}
return getLoginView();
}
/**
* Redirects to te forgot password view.
*
* @param request
* @param response
* @param model
* @return the return view
*/
public String forgotPassword(HttpServletRequest request, HttpServletResponse response, Model model) {
return getForgotPasswordView();
}
/**
* Looks up the passed in username and sends an email to the address on file with a
* reset password token.
*
* Returns error codes for invalid username.
*
* @param username
* @param request
* @param model
* @return the return view
*/
public String processForgotPassword(String username, HttpServletRequest request, Model model) {
GenericResponse errorResponse = customerService.sendForgotPasswordNotification(username, getResetPasswordUrl(request));
if (errorResponse.getHasErrors()) {
String errorCode = errorResponse.getErrorCodesList().get(0);
model.addAttribute("errorCode", errorCode);
return getForgotPasswordView();
} else {
request.getSession(true).setAttribute("forgot_password_username", username);
return getForgotPasswordSuccessView();
}
}
/**
* Returns the forgot username view.
*
* @param request
* @param response
* @param model
* @return the return view
*/
public String forgotUsername(HttpServletRequest request, HttpServletResponse response, Model model) {
return getForgotUsernameView();
}
/**
* Looks up an account by email address and if found, sends an email with the
* associated username.
*
* @param email
* @param request
* @param response
* @param model
* @return the return view
*/
public String processForgotUsername(String email, HttpServletRequest request, HttpServletResponse response, Model model) {
GenericResponse errorResponse = customerService.sendForgotUsernameNotification(email);
if (errorResponse.getHasErrors()) {
String errorCode = errorResponse.getErrorCodesList().get(0);
request.setAttribute("errorCode", errorCode);
return getForgotUsernameView();
} else {
return buildRedirectToLoginWithMessage("usernameSent");
}
}
/**
* Displays the reset password view. Expects a valid resetPasswordToken to exist
* that was generated by {@link processForgotPassword} or similar. Returns an error
* view if the token is invalid or expired.
*
* @param request
* @param response
* @param model
* @return the return view
*/
public String resetPassword(HttpServletRequest request, HttpServletResponse response, Model model) {
ResetPasswordForm resetPasswordForm = initResetPasswordForm(request);
model.addAttribute("resetPasswordForm", resetPasswordForm);
GenericResponse errorResponse = customerService.checkPasswordResetToken(resetPasswordForm.getToken());
if (errorResponse.getHasErrors()) {
String errorCode = errorResponse.getErrorCodesList().get(0);
request.setAttribute("errorCode", errorCode);
return getResetPasswordErrorView();
} else {
return getResetPasswordView();
}
}
/**
* Processes the reset password token and allows the user to change their password.
* Ensures that the password and confirm password match, that the token is valid,
* and that the token matches the provided email address.
*
* @param resetPasswordForm
* @param request
* @param response
* @param model
* @param errors
* @return the return view
* @throws ServiceException
*/
public String processResetPassword(ResetPasswordForm resetPasswordForm, HttpServletRequest request, HttpServletResponse response, Model model, BindingResult errors) throws ServiceException {
GenericResponse errorResponse = new GenericResponse();
resetPasswordValidator.validate(resetPasswordForm.getUsername(), resetPasswordForm.getPassword(), resetPasswordForm.getPasswordConfirm(), errors);
if (errorResponse.getHasErrors()) {
return getResetPasswordView();
}
errorResponse = customerService.resetPasswordUsingToken(
resetPasswordForm.getUsername(),
resetPasswordForm.getToken(),
resetPasswordForm.getPassword(),
resetPasswordForm.getPasswordConfirm());
if (errorResponse.getHasErrors()) {
String errorCode = errorResponse.getErrorCodesList().get(0);
request.setAttribute("errorCode", errorCode);
return getResetPasswordView();
} else {
// The reset password was successful, so log this customer in.
loginService.loginCustomer(resetPasswordForm.getUsername(), resetPasswordForm.getPassword());
return getResetPasswordSuccessView();
}
}
/**
* By default, redirects to the login page with a message.
*
* @param message
* @return the return view
*/
protected String buildRedirectToLoginWithMessage(String message) {
StringBuffer url = new StringBuffer("redirect:").append(getLoginView()).append("?messageCode=").append(message);
return url.toString();
}
/**
* Initializes the reset password by ensuring that the passed in token URL
* parameter initializes the hidden form field.
*
* Also, if the reset password request is in the same session as the
* forgotPassword request, the username will auto-populate
*
* @param request
* @return the return view
*/
public ResetPasswordForm initResetPasswordForm(HttpServletRequest request) {
ResetPasswordForm resetPasswordForm = new ResetPasswordForm();
String username = (String) request.getSession(true).getAttribute("forgot_password_username");
String token = request.getParameter("token");
resetPasswordForm.setToken(token);
resetPasswordForm.setUsername(username);
return resetPasswordForm;
}
/**
* @return the view representing the login page.
*/
public String getLoginView() {
return loginView;
}
/**
* @return the view displayed for the forgot username form.
*/
public String getForgotUsernameView() {
return forgotUsernameView;
}
/**
* @return the view displayed for the forgot password form.
*/
public String getForgotPasswordView() {
return forgotPasswordView;
}
/**
* @return the view displayed for the reset password form.
*/
public String getResetPasswordView() {
return resetPasswordView;
}
/**
* @return the view returned after a successful forgotPassword email has been sent.
*/
public String getForgotPasswordSuccessView() {
return forgotPasswordSuccessView;
}
/**
* @return the view name to use for the reset password model..
*/
public String getResetPasswordFormView() {
return resetPasswordFormView;
}
public String getResetPasswordScheme(HttpServletRequest request) {
return request.getScheme();
}
public String getResetPasswordPort(HttpServletRequest request, String scheme) {
if ("http".equalsIgnoreCase(scheme) && request.getServerPort() != 80) {
return ":" + request.getServerPort();
} else if ("https".equalsIgnoreCase(scheme) && request.getServerPort() != 443) {
return ":" + request.getServerPort();
}
return ""; // no port required
}
public String getResetPasswordUrl(HttpServletRequest request) {
String url = request.getScheme() + "://" + request.getServerName() + getResetPasswordPort(request, request.getScheme() + "/");
if (request.getContextPath() != null && ! "".equals(request.getContextPath())) {
url = url + request.getContextPath() + getResetPasswordView();
} else {
url = url + getResetPasswordView();
}
return url;
}
/**
* View user is directed to if they try to access the resetPasswordForm with an
* invalid token.
*
* @return the error view
*/
public String getResetPasswordErrorView() {
return resetPasswordErrorView;
}
/**
* View that a user is sent to after a successful reset password operations.
* Should be a redirect (e.g. start with "redirect:" since
* this will cause the entire SpringSecurity pipeline to be fulfilled.
*/
public String getResetPasswordSuccessView() {
return resetPasswordSuccessView;
}
}
| 1no label
|
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_controller_account_BroadleafLoginController.java
|
563 |
public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> {
private static ObjectOpenHashSet<String> RESERVED_FIELDS = ObjectOpenHashSet.from(
"_uid", "_id", "_type", "_source", "_all", "_analyzer", "_boost", "_parent", "_routing", "_index",
"_size", "_timestamp", "_ttl"
);
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
private String type;
private String source;
private boolean ignoreConflicts = false;
PutMappingRequest() {
}
/**
* Constructs a new put mapping request against one or more indices. If nothing is set then
* it will be executed against all indices.
*/
public PutMappingRequest(String... indices) {
this.indices = indices;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (type == null) {
validationException = addValidationError("mapping type is missing", validationException);
}
if (source == null) {
validationException = addValidationError("mapping source is missing", validationException);
}
return validationException;
}
/**
* Sets the indices this put mapping operation will execute on.
*/
public PutMappingRequest indices(String[] indices) {
this.indices = indices;
return this;
}
/**
* The indices the mappings will be put.
*/
public String[] indices() {
return indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public PutMappingRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* The mapping type.
*/
public String type() {
return type;
}
/**
* The type of the mappings.
*/
public PutMappingRequest type(String type) {
this.type = type;
return this;
}
/**
* The mapping source definition.
*/
public String source() {
return source;
}
/**
* A specialized simplified mapping source method, takes the form of simple properties definition:
* ("field1", "type=string,store=true").
*
* Also supports metadata mapping fields such as `_all` and `_parent` as property definition, these metadata
* mapping fields will automatically be put on the top level mapping object.
*/
public PutMappingRequest source(Object... source) {
return source(buildFromSimplifiedDef(type, source));
}
public static XContentBuilder buildFromSimplifiedDef(String type, Object... source) {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
if (type != null) {
builder.startObject(type);
}
for (int i = 0; i < source.length; i++) {
String fieldName = source[i++].toString();
if (RESERVED_FIELDS.contains(fieldName)) {
builder.startObject(fieldName);
String[] s1 = Strings.splitStringByCommaToArray(source[i].toString());
for (String s : s1) {
String[] s2 = Strings.split(s, "=");
if (s2.length != 2) {
throw new ElasticsearchIllegalArgumentException("malformed " + s);
}
builder.field(s2[0], s2[1]);
}
builder.endObject();
}
}
builder.startObject("properties");
for (int i = 0; i < source.length; i++) {
String fieldName = source[i++].toString();
if (RESERVED_FIELDS.contains(fieldName)) {
continue;
}
builder.startObject(fieldName);
String[] s1 = Strings.splitStringByCommaToArray(source[i].toString());
for (String s : s1) {
String[] s2 = Strings.split(s, "=");
if (s2.length != 2) {
throw new ElasticsearchIllegalArgumentException("malformed " + s);
}
builder.field(s2[0], s2[1]);
}
builder.endObject();
}
builder.endObject();
if (type != null) {
builder.endObject();
}
builder.endObject();
return builder;
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to generate simplified mapping definition", e);
}
}
/**
* The mapping source definition.
*/
public PutMappingRequest source(XContentBuilder mappingBuilder) {
try {
return source(mappingBuilder.string());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
}
}
/**
* The mapping source definition.
*/
@SuppressWarnings("unchecked")
public PutMappingRequest source(Map mappingSource) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(mappingSource);
return source(builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + mappingSource + "]", e);
}
}
/**
* The mapping source definition.
*/
public PutMappingRequest source(String mappingSource) {
this.source = mappingSource;
return this;
}
/**
* If there is already a mapping definition registered against the type, then it will be merged. If there are
* elements that can't be merged are detected, the request will be rejected unless the
* {@link #ignoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
*/
public boolean ignoreConflicts() {
return ignoreConflicts;
}
/**
* If there is already a mapping definition registered against the type, then it will be merged. If there are
* elements that can't be merged are detected, the request will be rejected unless the
* {@link #ignoreConflicts(boolean)} is set. In such a case, the duplicate mappings will be rejected.
*/
public PutMappingRequest ignoreConflicts(boolean ignoreDuplicates) {
this.ignoreConflicts = ignoreDuplicates;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
type = in.readOptionalString();
source = in.readString();
readTimeout(in);
ignoreConflicts = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
out.writeOptionalString(type);
out.writeString(source);
writeTimeout(out);
out.writeBoolean(ignoreConflicts);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_put_PutMappingRequest.java
|
1,633 |
public static final Validator BOOLEAN = new Validator() {
@Override
public String validate(String setting, String value) {
if (value != null && (Booleans.isExplicitFalse(value) || Booleans.isExplicitTrue(value))) {
return null;
}
return "cannot parse value [" + value + "] as a boolean";
}
};
| 1no label
|
src_main_java_org_elasticsearch_cluster_settings_Validator.java
|
258 |
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertTrue(service.isShutdown());
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceTest.java
|
952 |
transportService.sendRequest(nodes.masterNode(), transportAction, request, new BaseTransportResponseHandler<Response>() {
@Override
public Response newInstance() {
return newResponse();
}
@Override
public void handleResponse(Response response) {
listener.onResponse(response);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleException(final TransportException exp) {
if (exp.unwrapCause() instanceof ConnectTransportException) {
// we want to retry here a bit to see if a new master is elected
clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
@Override
public void postAdded() {
ClusterState clusterStateV2 = clusterService.state();
if (!clusterState.nodes().masterNodeId().equals(clusterStateV2.nodes().masterNodeId())) {
// master changes while adding the listener, try here
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
@Override
public void onClose() {
clusterService.remove(this);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
clusterService.remove(this);
listener.onFailure(new MasterNotDiscoveredException());
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.nodesDelta().masterNodeChanged()) {
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
});
} else {
listener.onFailure(exp);
}
}
});
| 1no label
|
src_main_java_org_elasticsearch_action_support_master_TransportMasterNodeOperationAction.java
|
821 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_OFFER_INFO")
public class OfferInfoImpl implements OfferInfo {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "OfferInfoId")
@GenericGenerator(
name="OfferInfoId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="OfferInfoImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.offer.domain.OfferInfoImpl")
}
)
@Column(name = "OFFER_INFO_ID")
protected Long id;
@ElementCollection
@MapKeyColumn(name="FIELD_NAME")
@Column(name="FIELD_VALUE")
@CollectionTable(name="BLC_OFFER_INFO_FIELDS", joinColumns=@JoinColumn(name="OFFER_INFO_FIELDS_ID"))
@BatchSize(size = 50)
protected Map<String, String> fieldValues = new HashMap<String, String>();
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Map<String, String> getFieldValues() {
return fieldValues;
}
@Override
public void setFieldValues(Map<String, String> fieldValues) {
this.fieldValues = fieldValues;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((fieldValues == null) ? 0 : fieldValues.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferInfoImpl other = (OfferInfoImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (fieldValues == null) {
if (other.fieldValues != null)
return false;
} else if (!fieldValues.equals(other.fieldValues))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_OfferInfoImpl.java
|
884 |
public class PromotableCandidateItemOfferImpl extends AbstractPromotionRounding implements PromotableCandidateItemOffer, OfferHolder {
private static final long serialVersionUID = 1L;
protected Offer offer;
protected PromotableOrder promotableOrder;
protected Money potentialSavings;
protected int uses = 0;
protected HashMap<OfferItemCriteria, List<PromotableOrderItem>> candidateQualifiersMap =
new HashMap<OfferItemCriteria, List<PromotableOrderItem>>();
protected HashMap<OfferItemCriteria, List<PromotableOrderItem>> candidateTargetsMap =
new HashMap<OfferItemCriteria, List<PromotableOrderItem>>();
protected List<PromotableOrderItem> legacyCandidateTargets = new ArrayList<PromotableOrderItem>();
public PromotableCandidateItemOfferImpl(PromotableOrder promotableOrder, Offer offer) {
assert (offer != null);
assert (promotableOrder != null);
this.offer = offer;
this.promotableOrder = promotableOrder;
}
@Override
public BroadleafCurrency getCurrency() {
return promotableOrder.getOrderCurrency();
}
@Override
public Money calculateSavingsForOrderItem(PromotableOrderItem orderItem, int qtyToReceiveSavings) {
Money savings = new Money(promotableOrder.getOrderCurrency());
Money price = orderItem.getPriceBeforeAdjustments(getOffer().getApplyDiscountToSalePrice());
BigDecimal offerUnitValue = PromotableOfferUtility.determineOfferUnitValue(offer, this);
savings = PromotableOfferUtility.computeAdjustmentValue(price, offerUnitValue, this, this);
return savings.multiply(qtyToReceiveSavings);
}
/**
* Returns the number of items that potentially could be targets for the offer. Due to combination or bogo
* logic, they may not all get the tiered offer price.
*/
@Override
public int calculateTargetQuantityForTieredOffer() {
int returnQty = 0;
for (OfferItemCriteria itemCriteria : getCandidateQualifiersMap().keySet()) {
List<PromotableOrderItem> candidateTargets = getCandidateTargetsMap().get(itemCriteria);
for (PromotableOrderItem promotableOrderItem : candidateTargets) {
returnQty += promotableOrderItem.getQuantity();
}
}
return returnQty;
}
@Override
public Money getPotentialSavings() {
if (potentialSavings == null) {
return new Money(promotableOrder.getOrderCurrency());
}
return potentialSavings;
}
@Override
public void setPotentialSavings(Money potentialSavings) {
this.potentialSavings = potentialSavings;
}
@Override
public boolean hasQualifyingItemCriteria() {
return (offer.getQualifyingItemCriteria() != null && !offer.getQualifyingItemCriteria().isEmpty());
}
/**
* Determines the maximum number of times this promotion can be used based on the
* ItemCriteria and promotion's maxQty setting.
*/
@Override
public int calculateMaximumNumberOfUses() {
int maxMatchesFound = 9999; // set arbitrarily high / algorithm will adjust down
//iterate through the target criteria and find the least amount of max uses. This will be the overall
//max usage, since the target criteria are grouped together in "and" style.
int numberOfUsesForThisItemCriteria = maxMatchesFound;
for (OfferItemCriteria targetCriteria : getOffer().getTargetItemCriteria()) {
int temp = calculateMaxUsesForItemCriteria(targetCriteria, getOffer());
numberOfUsesForThisItemCriteria = Math.min(numberOfUsesForThisItemCriteria, temp);
}
maxMatchesFound = Math.min(maxMatchesFound, numberOfUsesForThisItemCriteria);
int offerMaxUses = getOffer().isUnlimitedUsePerOrder() ? maxMatchesFound : getOffer().getMaxUsesPerOrder();
return Math.min(maxMatchesFound, offerMaxUses);
}
@Override
public int calculateMaxUsesForItemCriteria(OfferItemCriteria itemCriteria, Offer promotion) {
int numberOfTargets = 0;
int numberOfUsesForThisItemCriteria = 9999;
if (itemCriteria != null) {
List<PromotableOrderItem> candidateTargets = getCandidateTargetsMap().get(itemCriteria);
for(PromotableOrderItem potentialTarget : candidateTargets) {
numberOfTargets += potentialTarget.getQuantity();
}
numberOfUsesForThisItemCriteria = numberOfTargets / itemCriteria.getQuantity();
}
return numberOfUsesForThisItemCriteria;
}
@Override
public HashMap<OfferItemCriteria, List<PromotableOrderItem>> getCandidateQualifiersMap() {
return candidateQualifiersMap;
}
@Override
public void setCandidateQualifiersMap(HashMap<OfferItemCriteria, List<PromotableOrderItem>> candidateItemsMap) {
this.candidateQualifiersMap = candidateItemsMap;
}
@Override
public HashMap<OfferItemCriteria, List<PromotableOrderItem>> getCandidateTargetsMap() {
return candidateTargetsMap;
}
@Override
public void setCandidateTargetsMap(HashMap<OfferItemCriteria, List<PromotableOrderItem>> candidateItemsMap) {
this.candidateTargetsMap = candidateItemsMap;
}
@Override
public int getPriority() {
return offer.getPriority();
}
@Override
public Offer getOffer() {
return offer;
}
@Override
public int getUses() {
return uses;
}
@Override
public void addUse() {
uses++;
}
@Override
public void resetUses() {
uses = 0;
}
@Override
public boolean isLegacyOffer() {
return offer.getQualifyingItemCriteria().isEmpty() && offer.getTargetItemCriteria().isEmpty();
}
@Override
public List<PromotableOrderItem> getLegacyCandidateTargets() {
return legacyCandidateTargets;
}
@Override
public void setLegacyCandidateTargets(List<PromotableOrderItem> candidateTargets) {
this.legacyCandidateTargets = candidateTargets;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_discount_domain_PromotableCandidateItemOfferImpl.java
|
5,393 |
public class InternalValueCount extends MetricsAggregation implements ValueCount {
public static final Type TYPE = new Type("value_count", "vcount");
private static final AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalValueCount readResult(StreamInput in) throws IOException {
InternalValueCount count = new InternalValueCount();
count.readFrom(in);
return count;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
private long value;
InternalValueCount() {} // for serialization
public InternalValueCount(String name, long value) {
super(name);
this.value = value;
}
@Override
public long getValue() {
return value;
}
@Override
public Type type() {
return TYPE;
}
@Override
public InternalAggregation reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return aggregations.get(0);
}
InternalValueCount reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
reduced = (InternalValueCount) aggregation;
} else {
reduced.value += ((InternalValueCount) aggregation).value;
}
}
return reduced;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
value = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeVLong(value);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject(name)
.field(CommonFields.VALUE, value)
.endObject();
}
@Override
public String toString() {
return "count[" + value + "]";
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_valuecount_InternalValueCount.java
|
326 |
ExecutionCallback executionCallback = new ExecutionCallback() {
@Override
public void onResponse(Object response) {
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
1,144 |
public class OSQLMethodFormat extends OAbstractSQLMethod {
public static final String NAME = "format";
public OSQLMethodFormat() {
super(NAME, 1, 2);
}
@Override
public Object execute(OIdentifiable iRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
final Object v = getParameterValue(iRecord, iMethodParams[0].toString());
if (v != null) {
if (ioResult instanceof Date) {
final SimpleDateFormat format = new SimpleDateFormat(v.toString());
if (iMethodParams.length > 1)
format.setTimeZone(TimeZone.getTimeZone(iMethodParams[1].toString()));
else
format.setTimeZone(ODateHelper.getDatabaseTimeZone());
ioResult = format.format(ioResult);
} else {
ioResult = ioResult != null ? String.format(v.toString(), ioResult) : null;
}
}
return ioResult;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodFormat.java
|
143 |
public static class Name {
public static final String Rules = "StructuredContentImpl_Rules_Tab";
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentImpl.java
|
261 |
assertTrueEventually(new AssertTask() {
@Override
public void run() {
assertEquals(executions, counter.get());
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ExecutionDelayTest.java
|
608 |
public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<UpdateSettingsClusterStateUpdateRequest> {
private Settings settings;
public UpdateSettingsClusterStateUpdateRequest() {
}
/**
* Returns the {@link Settings} to update
*/
public Settings settings() {
return settings;
}
/**
* Sets the {@link Settings} to update
*/
public UpdateSettingsClusterStateUpdateRequest settings(Settings settings) {
this.settings = settings;
return this;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_put_UpdateSettingsClusterStateUpdateRequest.java
|
6,320 |
public static class Info implements Streamable, ToXContent {
private String name;
private String type;
private int min;
private int max;
private TimeValue keepAlive;
private SizeValue queueSize;
Info() {
}
public Info(String name, String type) {
this(name, type, -1);
}
public Info(String name, String type, int size) {
this(name, type, size, size, null, null);
}
public Info(String name, String type, int min, int max, @Nullable TimeValue keepAlive, @Nullable SizeValue queueSize) {
this.name = name;
this.type = type;
this.min = min;
this.max = max;
this.keepAlive = keepAlive;
this.queueSize = queueSize;
}
public String getName() {
return this.name;
}
public String getType() {
return this.type;
}
public int getMin() {
return this.min;
}
public int getMax() {
return this.max;
}
@Nullable
public TimeValue getKeepAlive() {
return this.keepAlive;
}
@Nullable
public SizeValue getQueueSize() {
return this.queueSize;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
type = in.readString();
min = in.readInt();
max = in.readInt();
if (in.readBoolean()) {
keepAlive = TimeValue.readTimeValue(in);
}
if (in.readBoolean()) {
queueSize = SizeValue.readSizeValue(in);
}
in.readBoolean(); // here to conform with removed waitTime
in.readBoolean(); // here to conform with removed rejected setting
in.readBoolean(); // here to conform with queue type
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeString(type);
out.writeInt(min);
out.writeInt(max);
if (keepAlive == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
keepAlive.writeTo(out);
}
if (queueSize == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
queueSize.writeTo(out);
}
out.writeBoolean(false); // here to conform with removed waitTime
out.writeBoolean(false); // here to conform with removed rejected setting
out.writeBoolean(false); // here to conform with queue type
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name, XContentBuilder.FieldCaseConversion.NONE);
builder.field(Fields.TYPE, type);
if (min != -1) {
builder.field(Fields.MIN, min);
}
if (max != -1) {
builder.field(Fields.MAX, max);
}
if (keepAlive != null) {
builder.field(Fields.KEEP_ALIVE, keepAlive.toString());
}
if (queueSize != null) {
builder.field(Fields.QUEUE_SIZE, queueSize.toString());
}
builder.endObject();
return builder;
}
static final class Fields {
static final XContentBuilderString TYPE = new XContentBuilderString("type");
static final XContentBuilderString MIN = new XContentBuilderString("min");
static final XContentBuilderString MAX = new XContentBuilderString("max");
static final XContentBuilderString KEEP_ALIVE = new XContentBuilderString("keep_alive");
static final XContentBuilderString QUEUE_SIZE = new XContentBuilderString("queue_size");
}
}
| 1no label
|
src_main_java_org_elasticsearch_threadpool_ThreadPool.java
|
364 |
public static class TestKeyPredicate
implements KeyPredicate<Integer> {
@Override
public boolean evaluate(Integer key) {
return key == 50;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
18 |
@Component("blTargetItemRulesValidator")
public class TargetItemRulesValidator implements PropertyValidator {
@Override
public PropertyValidationResult validate(Entity entity, Serializable instance, Map<String, FieldMetadata> entityFieldMetadata, Map<String, String> validationConfiguration, BasicFieldMetadata propertyMetadata, String propertyName, String value) {
Offer offer = (Offer)instance;
if (OfferType.ORDER_ITEM.equals(offer.getType())) {
return new PropertyValidationResult(CollectionUtils.isNotEmpty(offer.getTargetItemCriteria()), RequiredPropertyValidator.ERROR_MESSAGE);
} else {
return new PropertyValidationResult(true);
}
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_persistence_validation_TargetItemRulesValidator.java
|
462 |
public class ODatabaseCompare extends ODatabaseImpExpAbstract {
private OStorage storage1;
private OStorage storage2;
private ODatabaseDocumentTx databaseDocumentTxOne;
private ODatabaseDocumentTx databaseDocumentTxTwo;
private boolean compareEntriesForAutomaticIndexes = false;
private boolean autoDetectExportImportMap = true;
private OIndex<OIdentifiable> exportImportHashTable = null;
private int differences = 0;
public ODatabaseCompare(String iDb1URL, String iDb2URL, final OCommandOutputListener iListener) throws IOException {
super(null, null, iListener);
listener.onMessage("\nComparing two local databases:\n1) " + iDb1URL + "\n2) " + iDb2URL + "\n");
storage1 = Orient.instance().loadStorage(iDb1URL);
storage1.open(null, null, null);
storage2 = Orient.instance().loadStorage(iDb2URL);
storage2.open(null, null, null);
}
public ODatabaseCompare(String iDb1URL, String iDb2URL, final String userName, final String userPassword,
final OCommandOutputListener iListener) throws IOException {
super(null, null, iListener);
listener.onMessage("\nComparing two local databases:\n1) " + iDb1URL + "\n2) " + iDb2URL + "\n");
databaseDocumentTxOne = new ODatabaseDocumentTx(iDb1URL);
databaseDocumentTxOne.open(userName, userPassword);
databaseDocumentTxTwo = new ODatabaseDocumentTx(iDb2URL);
databaseDocumentTxTwo.open(userName, userPassword);
storage1 = databaseDocumentTxOne.getStorage();
storage2 = databaseDocumentTxTwo.getStorage();
// exclude automatically generated clusters
excludeClusters.add("orids");
excludeClusters.add(OMetadataDefault.CLUSTER_INDEX_NAME);
excludeClusters.add(OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME);
}
public boolean isCompareEntriesForAutomaticIndexes() {
return compareEntriesForAutomaticIndexes;
}
public void setAutoDetectExportImportMap(boolean autoDetectExportImportMap) {
this.autoDetectExportImportMap = autoDetectExportImportMap;
}
public void setCompareEntriesForAutomaticIndexes(boolean compareEntriesForAutomaticIndexes) {
this.compareEntriesForAutomaticIndexes = compareEntriesForAutomaticIndexes;
}
public boolean compare() {
if (isDocumentDatabases() && (databaseDocumentTxOne == null || databaseDocumentTxTwo == null)) {
listener.onMessage("\nPassed in URLs are related to document databases but credentials "
+ "were not provided to open them. Please provide user name + password for databases to compare");
return false;
}
if (!isDocumentDatabases() && (databaseDocumentTxOne != null || databaseDocumentTxTwo != null)) {
listener.onMessage("\nPassed in URLs are not related to document databases but credentials "
+ "were provided to open them. Please do not provide user name + password for databases to compare");
return false;
}
try {
ODocumentHelper.RIDMapper ridMapper = null;
if (autoDetectExportImportMap) {
listener
.onMessage("\nAuto discovery of mapping between RIDs of exported and imported records is switched on, try to discover mapping data on disk.");
exportImportHashTable = (OIndex<OIdentifiable>) databaseDocumentTxTwo.getMetadata().getIndexManager()
.getIndex(ODatabaseImport.EXPORT_IMPORT_MAP_NAME);
if (exportImportHashTable != null) {
listener.onMessage("\nMapping data were found and will be loaded.");
ridMapper = new ODocumentHelper.RIDMapper() {
@Override
public ORID map(ORID rid) {
if (rid == null)
return null;
if (!rid.isPersistent())
return null;
final OIdentifiable result = exportImportHashTable.get(rid);
if (result == null)
return null;
return result.getIdentity();
}
};
} else
listener.onMessage("\nMapping data were not found.");
}
compareClusters();
compareRecords(ridMapper);
if (isDocumentDatabases())
compareIndexes(ridMapper);
if (differences == 0) {
listener.onMessage("\n\nDatabases match.");
return true;
} else {
listener.onMessage("\n\nDatabases do not match. Found " + differences + " difference(s).");
return false;
}
} catch (Exception e) {
e.printStackTrace();
throw new ODatabaseExportException("Error on compare of database '" + storage1.getName() + "' against '" + storage2.getName()
+ "'", e);
} finally {
storage1.close();
storage2.close();
}
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private void compareIndexes(ODocumentHelper.RIDMapper ridMapper) {
listener.onMessage("\nStarting index comparison:");
boolean ok = true;
final OIndexManager indexManagerOne = makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<OIndexManager>() {
public OIndexManager call() {
return databaseDocumentTxOne.getMetadata().getIndexManager();
}
});
final OIndexManager indexManagerTwo = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<OIndexManager>() {
public OIndexManager call() {
return databaseDocumentTxTwo.getMetadata().getIndexManager();
}
});
final Collection<? extends OIndex<?>> indexesOne = makeDbCall(databaseDocumentTxOne,
new ODbRelatedCall<Collection<? extends OIndex<?>>>() {
public Collection<? extends OIndex<?>> call() {
return indexManagerOne.getIndexes();
}
});
int indexesSizeOne = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<Integer>() {
public Integer call() {
return indexesOne.size();
}
});
int indexesSizeTwo = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<Integer>() {
public Integer call() {
return indexManagerTwo.getIndexes().size();
}
});
if (exportImportHashTable != null)
indexesSizeTwo--;
if (indexesSizeOne != indexesSizeTwo) {
ok = false;
listener.onMessage("\n- ERR: Amount of indexes are different.");
listener.onMessage("\n--- DB1: " + indexesSizeOne);
listener.onMessage("\n--- DB2: " + indexesSizeTwo);
listener.onMessage("\n");
++differences;
}
final Iterator<? extends OIndex<?>> iteratorOne = makeDbCall(databaseDocumentTxOne,
new ODbRelatedCall<Iterator<? extends OIndex<?>>>() {
public Iterator<? extends OIndex<?>> call() {
return indexesOne.iterator();
}
});
while (makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<Boolean>() {
public Boolean call() {
return iteratorOne.hasNext();
}
})) {
final OIndex indexOne = makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<OIndex<?>>() {
public OIndex<?> call() {
return iteratorOne.next();
}
});
final OIndex<?> indexTwo = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<OIndex<?>>() {
public OIndex<?> call() {
return indexManagerTwo.getIndex(indexOne.getName());
}
});
if (indexTwo == null) {
ok = false;
listener.onMessage("\n- ERR: Index " + indexOne.getName() + " is absent in DB2.");
++differences;
continue;
}
if (!indexOne.getType().equals(indexTwo.getType())) {
ok = false;
listener.onMessage("\n- ERR: Index types for index " + indexOne.getName() + " are different.");
listener.onMessage("\n--- DB1: " + indexOne.getType());
listener.onMessage("\n--- DB2: " + indexTwo.getType());
listener.onMessage("\n");
++differences;
continue;
}
if (!indexOne.getClusters().equals(indexTwo.getClusters())) {
ok = false;
listener.onMessage("\n- ERR: Clusters to index for index " + indexOne.getName() + " are different.");
listener.onMessage("\n--- DB1: " + indexOne.getClusters());
listener.onMessage("\n--- DB2: " + indexTwo.getClusters());
listener.onMessage("\n");
++differences;
continue;
}
if (indexOne.getDefinition() == null && indexTwo.getDefinition() != null) {
ok = false;
listener.onMessage("\n- ERR: Index definition for index " + indexOne.getName() + " for DB2 is not null.");
++differences;
continue;
} else if (indexOne.getDefinition() != null && indexTwo.getDefinition() == null) {
ok = false;
listener.onMessage("\n- ERR: Index definition for index " + indexOne.getName() + " for DB2 is null.");
++differences;
continue;
} else if (indexOne.getDefinition() != null && !indexOne.getDefinition().equals(indexTwo.getDefinition())) {
ok = false;
listener.onMessage("\n- ERR: Index definitions for index " + indexOne.getName() + " are different.");
listener.onMessage("\n--- DB1: " + indexOne.getDefinition());
listener.onMessage("\n--- DB2: " + indexTwo.getDefinition());
listener.onMessage("\n");
++differences;
continue;
}
final long indexOneSize = makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<Long>() {
public Long call() {
return indexOne.getSize();
}
});
final long indexTwoSize = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<Long>() {
public Long call() {
return indexTwo.getSize();
}
});
if (indexOneSize != indexTwoSize) {
ok = false;
listener.onMessage("\n- ERR: Amount of entries for index " + indexOne.getName() + " are different.");
listener.onMessage("\n--- DB1: " + indexOneSize);
listener.onMessage("\n--- DB2: " + indexTwoSize);
listener.onMessage("\n");
++differences;
}
if (((compareEntriesForAutomaticIndexes && !indexOne.getType().equals("DICTIONARY")) || !indexOne.isAutomatic())) {
final Iterator<Map.Entry<Object, Object>> indexIteratorOne = makeDbCall(databaseDocumentTxOne,
new ODbRelatedCall<Iterator<Map.Entry<Object, Object>>>() {
public Iterator<Map.Entry<Object, Object>> call() {
return indexOne.iterator();
}
});
while (makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<Boolean>() {
public Boolean call() {
return indexIteratorOne.hasNext();
}
})) {
final Map.Entry<Object, Object> indexOneEntry = makeDbCall(databaseDocumentTxOne,
new ODbRelatedCall<Map.Entry<Object, Object>>() {
public Map.Entry<Object, Object> call() {
return indexIteratorOne.next();
}
});
final Object key = makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<Object>() {
public Object call() {
return indexOneEntry.getKey();
}
});
Object indexOneValue = makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<Object>() {
public Object call() {
return indexOneEntry.getValue();
}
});
final Object indexTwoValue = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<Object>() {
public Object call() {
return indexTwo.get(key);
}
});
if (indexTwoValue == null) {
ok = false;
listener.onMessage("\n- ERR: Entry with key " + key + " is absent in index " + indexOne.getName() + " for DB2.");
++differences;
continue;
}
if (indexOneValue instanceof Set && indexTwoValue instanceof Set) {
final Set<Object> indexOneValueSet = (Set<Object>) indexOneValue;
final Set<Object> indexTwoValueSet = (Set<Object>) indexTwoValue;
if (!ODocumentHelper.compareSets(databaseDocumentTxOne, indexOneValueSet, databaseDocumentTxTwo, indexTwoValueSet,
ridMapper)) {
ok = false;
reportIndexDiff(indexOne, key, indexOneValue, indexTwoValue);
}
} else if (indexOneValue instanceof ORID && indexTwoValue instanceof ORID) {
if (ridMapper != null && ((ORID) indexOneValue).isPersistent()) {
OIdentifiable identifiable = ridMapper.map((ORID) indexOneValue);
if (identifiable != null)
indexOneValue = identifiable.getIdentity();
}
if (!indexOneValue.equals(indexTwoValue)) {
ok = false;
reportIndexDiff(indexOne, key, indexOneValue, indexTwoValue);
}
} else if (!indexOneValue.equals(indexTwoValue)) {
ok = false;
reportIndexDiff(indexOne, key, indexOneValue, indexTwoValue);
}
}
}
}
if (ok)
listener.onMessage("OK");
}
private boolean compareClusters() {
listener.onMessage("\nStarting shallow comparison of clusters:");
listener.onMessage("\nChecking the number of clusters...");
if (storage1.getClusterNames().size() != storage1.getClusterNames().size()) {
listener.onMessage("ERR: cluster sizes are different: " + storage1.getClusterNames().size() + " <-> "
+ storage1.getClusterNames().size());
++differences;
}
int cluster2Id;
boolean ok;
for (String clusterName : storage1.getClusterNames()) {
// CHECK IF THE CLUSTER IS INCLUDED
if (includeClusters != null) {
if (!includeClusters.contains(clusterName))
continue;
} else if (excludeClusters != null) {
if (excludeClusters.contains(clusterName))
continue;
}
ok = true;
cluster2Id = storage2.getClusterIdByName(clusterName);
listener.onMessage("\n- Checking cluster " + String.format("%-25s: ", "'" + clusterName + "'"));
if (cluster2Id == -1) {
listener.onMessage("ERR: cluster name " + clusterName + " was not found on database " + storage2);
++differences;
ok = false;
}
if (cluster2Id != storage1.getClusterIdByName(clusterName)) {
listener.onMessage("ERR: cluster id is different for cluster " + clusterName + ": "
+ storage1.getClusterIdByName(clusterName) + " <-> " + cluster2Id);
++differences;
ok = false;
}
if (storage1.count(cluster2Id) != storage2.count(cluster2Id)) {
listener.onMessage("ERR: number of records different in cluster '" + clusterName + "' (id=" + cluster2Id + "): "
+ storage1.count(cluster2Id) + " <-> " + storage2.count(cluster2Id));
++differences;
ok = false;
}
if (ok)
listener.onMessage("OK");
}
listener.onMessage("\n\nShallow analysis done.");
return true;
}
private boolean compareRecords(ODocumentHelper.RIDMapper ridMapper) {
listener.onMessage("\nStarting deep comparison record by record. This may take a few minutes. Wait please...");
int clusterId;
for (String clusterName : storage1.getClusterNames()) {
// CHECK IF THE CLUSTER IS INCLUDED
if (includeClusters != null) {
if (!includeClusters.contains(clusterName))
continue;
} else if (excludeClusters != null) {
if (excludeClusters.contains(clusterName))
continue;
}
clusterId = storage1.getClusterIdByName(clusterName);
OClusterPosition[] db1Range = storage1.getClusterDataRange(clusterId);
OClusterPosition[] db2Range = storage2.getClusterDataRange(clusterId);
final OClusterPosition db1Max = db1Range[1];
final OClusterPosition db2Max = db2Range[1];
final ODocument doc1 = new ODocument();
final ODocument doc2 = new ODocument();
final ORecordId rid = new ORecordId(clusterId);
// TODO why this maximums can be different?
final OClusterPosition clusterMax = db1Max.compareTo(db2Max) > 0 ? db1Max : db2Max;
final OStorage storage;
if (clusterMax.equals(db1Max))
storage = storage1;
else
storage = storage2;
OPhysicalPosition[] physicalPositions = storage.ceilingPhysicalPositions(clusterId, new OPhysicalPosition(
OClusterPositionFactory.INSTANCE.valueOf(0)));
long recordsCounter = 0;
while (physicalPositions.length > 0) {
for (OPhysicalPosition physicalPosition : physicalPositions) {
recordsCounter++;
final OClusterPosition position = physicalPosition.clusterPosition;
rid.clusterPosition = position;
if (isDocumentDatabases() && rid.equals(new ORecordId(storage1.getConfiguration().indexMgrRecordId))
&& rid.equals(new ORecordId(storage2.getConfiguration().indexMgrRecordId)))
continue;
final ORawBuffer buffer1 = storage1.readRecord(rid, null, true, null, false).getResult();
final ORawBuffer buffer2;
if (ridMapper == null)
buffer2 = storage2.readRecord(rid, null, true, null, false).getResult();
else {
final ORID newRid = ridMapper.map(rid);
if (newRid == null)
buffer2 = storage2.readRecord(rid, null, true, null, false).getResult();
else
buffer2 = storage2.readRecord(new ORecordId(newRid), null, true, null, false).getResult();
}
if (buffer1 == null && buffer2 == null)
// BOTH RECORD NULL, OK
continue;
else if (buffer1 == null && buffer2 != null) {
// REC1 NULL
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " is null in DB1");
++differences;
} else if (buffer1 != null && buffer2 == null) {
// REC2 NULL
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " is null in DB2");
++differences;
} else {
if (buffer1.recordType != buffer2.recordType) {
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " recordType is different: "
+ (char) buffer1.recordType + " <-> " + (char) buffer2.recordType);
++differences;
}
if (buffer1.buffer == null && buffer2.buffer == null) {
} else if (buffer1.buffer == null && buffer2.buffer != null) {
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " content is different: null <-> "
+ buffer2.buffer.length);
++differences;
} else if (buffer1.buffer != null && buffer2.buffer == null) {
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " content is different: " + buffer1.buffer.length
+ " <-> null");
++differences;
} else {
if (buffer1.recordType == ODocument.RECORD_TYPE) {
// DOCUMENT: TRY TO INSTANTIATE AND COMPARE
makeDbCall(databaseDocumentTxOne, new ODocumentHelper.ODbRelatedCall<Object>() {
public Object call() {
doc1.reset();
doc1.fromStream(buffer1.buffer);
return null;
}
});
makeDbCall(databaseDocumentTxTwo, new ODocumentHelper.ODbRelatedCall<Object>() {
public Object call() {
doc2.reset();
doc2.fromStream(buffer2.buffer);
return null;
}
});
if (rid.toString().equals(storage1.getConfiguration().schemaRecordId)
&& rid.toString().equals(storage2.getConfiguration().schemaRecordId)) {
makeDbCall(databaseDocumentTxOne, new ODocumentHelper.ODbRelatedCall<java.lang.Object>() {
public Object call() {
convertSchemaDoc(doc1);
return null;
}
});
makeDbCall(databaseDocumentTxTwo, new ODocumentHelper.ODbRelatedCall<java.lang.Object>() {
public Object call() {
convertSchemaDoc(doc2);
return null;
}
});
}
if (!ODocumentHelper.hasSameContentOf(doc1, databaseDocumentTxOne, doc2, databaseDocumentTxTwo, ridMapper)) {
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " document content is different");
listener.onMessage("\n--- REC1: " + new String(buffer1.buffer));
listener.onMessage("\n--- REC2: " + new String(buffer2.buffer));
listener.onMessage("\n");
++differences;
}
} else {
if (buffer1.buffer.length != buffer2.buffer.length) {
// CHECK IF THE TRIMMED SIZE IS THE SAME
final String rec1 = new String(buffer1.buffer).trim();
final String rec2 = new String(buffer2.buffer).trim();
if (rec1.length() != rec2.length()) {
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " content length is different: "
+ buffer1.buffer.length + " <-> " + buffer2.buffer.length);
if (buffer1.recordType == ODocument.RECORD_TYPE || buffer1.recordType == ORecordFlat.RECORD_TYPE)
listener.onMessage("\n--- REC1: " + rec1);
if (buffer2.recordType == ODocument.RECORD_TYPE || buffer2.recordType == ORecordFlat.RECORD_TYPE)
listener.onMessage("\n--- REC2: " + rec2);
listener.onMessage("\n");
++differences;
}
} else {
// CHECK BYTE PER BYTE
for (int b = 0; b < buffer1.buffer.length; ++b) {
if (buffer1.buffer[b] != buffer2.buffer[b]) {
listener.onMessage("\n- ERR: RID=" + clusterId + ":" + position + " content is different at byte #" + b
+ ": " + buffer1.buffer[b] + " <-> " + buffer2.buffer[b]);
listener.onMessage("\n--- REC1: " + new String(buffer1.buffer));
listener.onMessage("\n--- REC2: " + new String(buffer2.buffer));
listener.onMessage("\n");
++differences;
break;
}
}
}
}
}
}
}
physicalPositions = storage.higherPhysicalPositions(clusterId, physicalPositions[physicalPositions.length - 1]);
if (recordsCounter % 10000 == 0)
listener.onMessage("\n" + recordsCounter + " records were processed for cluster " + clusterName + " ...");
}
listener.onMessage("\nCluster comparison was finished, " + recordsCounter + " records were processed for cluster "
+ clusterName + " ...");
}
return true;
}
private void convertSchemaDoc(final ODocument document) {
if (document.field("classes") != null) {
document.setFieldType("classes", OType.EMBEDDEDSET);
for (ODocument classDoc : document.<Set<ODocument>> field("classes")) {
classDoc.setFieldType("properties", OType.EMBEDDEDSET);
}
}
}
private boolean isDocumentDatabases() {
return storage1.getConfiguration().schemaRecordId != null && storage2.getConfiguration().schemaRecordId != null;
}
private void reportIndexDiff(OIndex<?> indexOne, Object key, final Object indexOneValue, final Object indexTwoValue) {
listener.onMessage("\n- ERR: Entry values for key '" + key + "' are different for index " + indexOne.getName());
listener.onMessage("\n--- DB1: " + makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<String>() {
public String call() {
return indexOneValue.toString();
}
}));
listener.onMessage("\n--- DB2: " + makeDbCall(databaseDocumentTxOne, new ODbRelatedCall<String>() {
public String call() {
return indexTwoValue.toString();
}
}));
listener.onMessage("\n");
++differences;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseCompare.java
|
4,426 |
public class IndicesClusterStateService extends AbstractLifecycleComponent<IndicesClusterStateService> implements ClusterStateListener {
private final IndicesService indicesService;
private final ClusterService clusterService;
private final ThreadPool threadPool;
private final RecoveryTarget recoveryTarget;
private final ShardStateAction shardStateAction;
private final NodeIndexDeletedAction nodeIndexDeletedAction;
private final NodeMappingRefreshAction nodeMappingRefreshAction;
// a map of mappings type we have seen per index due to cluster state
// we need this so we won't remove types automatically created as part of the indexing process
private final ConcurrentMap<Tuple<String, String>, Boolean> seenMappings = ConcurrentCollections.newConcurrentMap();
// a list of shards that failed during recovery
// we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
private final ConcurrentMap<ShardId, FailedShard> failedShards = ConcurrentCollections.newConcurrentMap();
static class FailedShard {
public final long version;
public final long timestamp;
FailedShard(long version) {
this.version = version;
this.timestamp = System.currentTimeMillis();
}
}
private final Object mutex = new Object();
private final FailedEngineHandler failedEngineHandler = new FailedEngineHandler();
private final boolean sendRefreshMapping;
@Inject
public IndicesClusterStateService(Settings settings, IndicesService indicesService, ClusterService clusterService,
ThreadPool threadPool, RecoveryTarget recoveryTarget,
ShardStateAction shardStateAction,
NodeIndexDeletedAction nodeIndexDeletedAction,
NodeMappingRefreshAction nodeMappingRefreshAction) {
super(settings);
this.indicesService = indicesService;
this.clusterService = clusterService;
this.threadPool = threadPool;
this.recoveryTarget = recoveryTarget;
this.shardStateAction = shardStateAction;
this.nodeIndexDeletedAction = nodeIndexDeletedAction;
this.nodeMappingRefreshAction = nodeMappingRefreshAction;
this.sendRefreshMapping = componentSettings.getAsBoolean("send_refresh_mapping", true);
}
@Override
protected void doStart() throws ElasticsearchException {
clusterService.addFirst(this);
}
@Override
protected void doStop() throws ElasticsearchException {
clusterService.remove(this);
}
@Override
protected void doClose() throws ElasticsearchException {
}
@Override
public void clusterChanged(final ClusterChangedEvent event) {
if (!indicesService.changesAllowed()) {
return;
}
if (!lifecycle.started()) {
return;
}
synchronized (mutex) {
// we need to clean the shards and indices we have on this node, since we
// are going to recover them again once state persistence is disabled (no master / not recovered)
// TODO: this feels a bit hacky here, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks?
if (event.state().blocks().disableStatePersistence()) {
for (final String index : indicesService.indices()) {
IndexService indexService = indicesService.indexService(index);
for (Integer shardId : indexService.shardIds()) {
logger.debug("[{}][{}] removing shard (disabled block persistence)", index, shardId);
try {
indexService.removeShard(shardId, "removing shard (disabled block persistence)");
} catch (Throwable e) {
logger.warn("[{}] failed to remove shard (disabled block persistence)", e, index);
}
}
removeIndex(index, "cleaning index (disabled block persistence)");
}
return;
}
cleanFailedShards(event);
cleanMismatchedIndexUUIDs(event);
applyNewIndices(event);
applyMappings(event);
applyAliases(event);
applyNewOrUpdatedShards(event);
applyDeletedIndices(event);
applyDeletedShards(event);
applyCleanedIndices(event);
applySettings(event);
sendIndexLifecycleEvents(event);
}
}
private void sendIndexLifecycleEvents(final ClusterChangedEvent event) {
String localNodeId = event.state().nodes().localNodeId();
assert localNodeId != null;
for (String index : event.indicesDeleted()) {
try {
nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index, localNodeId);
} catch (Throwable e) {
logger.debug("failed to send to master index {} deleted event", e, index);
}
}
}
private void cleanMismatchedIndexUUIDs(final ClusterChangedEvent event) {
for (IndexService indexService : indicesService) {
IndexMetaData indexMetaData = event.state().metaData().index(indexService.index().name());
if (indexMetaData == null) {
// got deleted on us, will be deleted later
continue;
}
if (!indexMetaData.isSameUUID(indexService.indexUUID())) {
logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.index());
removeIndex(indexMetaData.index(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
}
}
}
private void applyCleanedIndices(final ClusterChangedEvent event) {
// handle closed indices, since they are not allocated on a node once they are closed
// so applyDeletedIndices might not take them into account
for (final String index : indicesService.indices()) {
IndexMetaData indexMetaData = event.state().metaData().index(index);
if (indexMetaData != null && indexMetaData.state() == IndexMetaData.State.CLOSE) {
IndexService indexService = indicesService.indexService(index);
for (Integer shardId : indexService.shardIds()) {
logger.debug("[{}][{}] removing shard (index is closed)", index, shardId);
try {
indexService.removeShard(shardId, "removing shard (index is closed)");
} catch (Throwable e) {
logger.warn("[{}] failed to remove shard (index is closed)", e, index);
}
}
}
}
for (final String index : indicesService.indices()) {
if (indicesService.indexService(index).shardIds().isEmpty()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index (no shards allocated)", index);
}
// clean the index
removeIndex(index, "removing index (no shards allocated)");
}
}
}
private void applyDeletedIndices(final ClusterChangedEvent event) {
for (final String index : indicesService.indices()) {
if (!event.state().metaData().hasIndex(index)) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index);
}
removeIndex(index, "index no longer part of the metadata");
}
}
}
private void applyDeletedShards(final ClusterChangedEvent event) {
RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
return;
}
IntOpenHashSet newShardIds = new IntOpenHashSet();
for (IndexService indexService : indicesService) {
String index = indexService.index().name();
IndexMetaData indexMetaData = event.state().metaData().index(index);
if (indexMetaData == null) {
continue;
}
// now, go over and delete shards that needs to get deleted
newShardIds.clear();
for (MutableShardRouting shard : routingNode) {
if (shard.index().equals(index)) {
newShardIds.add(shard.id());
}
}
for (Integer existingShardId : indexService.shardIds()) {
if (!newShardIds.contains(existingShardId)) {
if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] removing shard (index is closed)", index, existingShardId);
}
indexService.removeShard(existingShardId, "removing shard (index is closed)");
} else {
// we can just remove the shard, without cleaning it locally, since we will clean it
// when all shards are allocated in the IndicesStore
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] removing shard (not allocated)", index, existingShardId);
}
indexService.removeShard(existingShardId, "removing shard (not allocated)");
}
}
}
}
}
private void applyNewIndices(final ClusterChangedEvent event) {
// we only create indices for shards that are allocated
RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
return;
}
for (MutableShardRouting shard : routingNode) {
if (!indicesService.hasIndex(shard.index())) {
final IndexMetaData indexMetaData = event.state().metaData().index(shard.index());
if (logger.isDebugEnabled()) {
logger.debug("[{}] creating index", indexMetaData.index());
}
indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), event.state().nodes().localNode().id());
}
}
}
private void applySettings(ClusterChangedEvent event) {
if (!event.metaDataChanged()) {
return;
}
for (IndexMetaData indexMetaData : event.state().metaData()) {
if (!indicesService.hasIndex(indexMetaData.index())) {
// we only create / update here
continue;
}
// if the index meta data didn't change, no need check for refreshed settings
if (!event.indexMetaDataChanged(indexMetaData)) {
continue;
}
String index = indexMetaData.index();
IndexService indexService = indicesService.indexServiceSafe(index);
IndexSettingsService indexSettingsService = indexService.injector().getInstance(IndexSettingsService.class);
indexSettingsService.refreshSettings(indexMetaData.settings());
}
}
private void applyMappings(ClusterChangedEvent event) {
// go over and update mappings
for (IndexMetaData indexMetaData : event.state().metaData()) {
if (!indicesService.hasIndex(indexMetaData.index())) {
// we only create / update here
continue;
}
List<String> typesToRefresh = null;
String index = indexMetaData.index();
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
// got deleted on us, ignore (closing the node)
return;
}
MapperService mapperService = indexService.mapperService();
// first, go over and update the _default_ mapping (if exists)
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
processMapping(index, mapperService, MapperService.DEFAULT_MAPPING, indexMetaData.mapping(MapperService.DEFAULT_MAPPING).source());
}
// go over and add the relevant mappings (or update them)
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
MappingMetaData mappingMd = cursor.value;
String mappingType = mappingMd.type();
CompressedString mappingSource = mappingMd.source();
if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { // we processed _default_ first
continue;
}
boolean requireRefresh = processMapping(index, mapperService, mappingType, mappingSource);
if (requireRefresh) {
if (typesToRefresh == null) {
typesToRefresh = Lists.newArrayList();
}
typesToRefresh.add(mappingType);
}
}
if (typesToRefresh != null) {
if (sendRefreshMapping) {
nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.uuid(),
typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId()));
}
}
// go over and remove mappings
for (DocumentMapper documentMapper : mapperService) {
if (seenMappings.containsKey(new Tuple<String, String>(index, documentMapper.type())) && !indexMetaData.mappings().containsKey(documentMapper.type())) {
// we have it in our mappings, but not in the metadata, and we have seen it in the cluster state, remove it
mapperService.remove(documentMapper.type());
seenMappings.remove(new Tuple<String, String>(index, documentMapper.type()));
}
}
}
}
private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedString mappingSource) {
if (!seenMappings.containsKey(new Tuple<String, String>(index, mappingType))) {
seenMappings.put(new Tuple<String, String>(index, mappingType), true);
}
// refresh mapping can happen for 2 reasons. The first is less urgent, and happens when the mapping on this
// node is ahead of what there is in the cluster state (yet an update-mapping has been sent to it already,
// it just hasn't been processed yet and published). Eventually, the mappings will converge, and the refresh
// mapping sent is more of a safe keeping (assuming the update mapping failed to reach the master, ...)
// the second case is where the parsing/merging of the mapping from the metadata doesn't result in the same
// mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
// merge version of it, which it does when refreshing the mappings), and warn log it.
boolean requiresRefresh = false;
try {
if (!mapperService.hasMapping(mappingType)) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string());
}
// we don't apply default, since it has been applied when the mappings were parsed initially
mapperService.merge(mappingType, mappingSource, false);
if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
requiresRefresh = true;
}
} else {
DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
if (!mappingSource.equals(existingMapper.mappingSource())) {
// mapping changed, update it
if (logger.isDebugEnabled()) {
logger.debug("[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string());
}
// we don't apply default, since it has been applied when the mappings were parsed initially
mapperService.merge(mappingType, mappingSource, false);
if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
requiresRefresh = true;
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
}
}
}
} catch (Throwable e) {
logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index, mappingType, mappingSource);
}
return requiresRefresh;
}
private boolean aliasesChanged(ClusterChangedEvent event) {
return !event.state().metaData().aliases().equals(event.previousState().metaData().aliases()) ||
!event.state().routingTable().equals(event.previousState().routingTable());
}
private void applyAliases(ClusterChangedEvent event) {
// check if aliases changed
if (aliasesChanged(event)) {
// go over and update aliases
for (IndexMetaData indexMetaData : event.state().metaData()) {
if (!indicesService.hasIndex(indexMetaData.index())) {
// we only create / update here
continue;
}
String index = indexMetaData.index();
IndexService indexService = indicesService.indexService(index);
IndexAliasesService indexAliasesService = indexService.aliasesService();
processAliases(index, indexMetaData.aliases().values(), indexAliasesService);
// go over and remove aliases
for (IndexAlias indexAlias : indexAliasesService) {
if (!indexMetaData.aliases().containsKey(indexAlias.alias())) {
// we have it in our aliases, but not in the metadata, remove it
indexAliasesService.remove(indexAlias.alias());
}
}
}
}
}
private void processAliases(String index, ObjectContainer<AliasMetaData> aliases, IndexAliasesService indexAliasesService) {
HashMap<String, IndexAlias> newAliases = newHashMap();
for (ObjectCursor<AliasMetaData> cursor : aliases) {
AliasMetaData aliasMd = cursor.value;
String alias = aliasMd.alias();
CompressedString filter = aliasMd.filter();
try {
if (!indexAliasesService.hasAlias(alias)) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] adding alias [{}], filter [{}]", index, alias, filter);
}
newAliases.put(alias, indexAliasesService.create(alias, filter));
} else {
if ((filter == null && indexAliasesService.alias(alias).filter() != null) ||
(filter != null && !filter.equals(indexAliasesService.alias(alias).filter()))) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] updating alias [{}], filter [{}]", index, alias, filter);
}
newAliases.put(alias, indexAliasesService.create(alias, filter));
}
}
} catch (Throwable e) {
logger.warn("[{}] failed to add alias [{}], filter [{}]", e, index, alias, filter);
}
}
indexAliasesService.addAll(newAliases);
}
private void applyNewOrUpdatedShards(final ClusterChangedEvent event) throws ElasticsearchException {
if (!indicesService.changesAllowed()) {
return;
}
RoutingTable routingTable = event.state().routingTable();
RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());;
if (routingNode == null) {
failedShards.clear();
return;
}
DiscoveryNodes nodes = event.state().nodes();
for (final ShardRouting shardRouting : routingNode) {
final IndexService indexService = indicesService.indexService(shardRouting.index());
if (indexService == null) {
// got deleted on us, ignore
continue;
}
final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index());
if (indexMetaData == null) {
// the index got deleted on the metadata, we will clean it later in the apply deleted method call
continue;
}
final int shardId = shardRouting.id();
if (!indexService.hasShard(shardId) && shardRouting.started()) {
if (!failedShards.containsKey(shardRouting.shardId())) {
// the master thinks we are started, but we don't have this shard at all, mark it as failed
logger.warn("[{}][{}] master [{}] marked shard as started, but shard has not been created, mark shard as failed", shardRouting.index(), shardId, nodes.masterNode());
failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
shardStateAction.shardFailed(shardRouting, indexMetaData.getUUID(),
"master " + nodes.masterNode() + " marked shard as started, but shard has not been created, mark shard as failed");
}
continue;
}
if (indexService.hasShard(shardId)) {
InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId);
ShardRouting currentRoutingEntry = indexShard.routingEntry();
// if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated
// for example: a shard that recovers from one node and now needs to recover to another node,
// or a replica allocated and then allocating a primary because the primary failed on another node
if (currentRoutingEntry.initializing() && shardRouting.initializing() && !currentRoutingEntry.equals(shardRouting)) {
logger.debug("[{}][{}] removing shard (different instance of it allocated on this node, current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting);
// cancel recovery just in case we are in recovery (its fine if we are not in recovery, it will be a noop).
recoveryTarget.cancelRecovery(indexShard);
indexService.removeShard(shardRouting.id(), "removing shard (different instance of it allocated on this node)");
}
}
if (indexService.hasShard(shardId)) {
InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId);
if (!shardRouting.equals(indexShard.routingEntry())) {
indexShard.routingEntry(shardRouting);
indexService.shardInjector(shardId).getInstance(IndexShardGatewayService.class).routingStateChanged();
}
}
if (shardRouting.initializing()) {
applyInitializingShard(routingTable, nodes, indexMetaData, routingTable.index(shardRouting.index()).shard(shardRouting.id()), shardRouting);
}
}
}
private void cleanFailedShards(final ClusterChangedEvent event) {
RoutingTable routingTable = event.state().routingTable();
RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());;
if (routingNode == null) {
failedShards.clear();
return;
}
DiscoveryNodes nodes = event.state().nodes();
long now = System.currentTimeMillis();
String localNodeId = nodes.localNodeId();
Iterator<Map.Entry<ShardId, FailedShard>> iterator = failedShards.entrySet().iterator();
shards:
while (iterator.hasNext()) {
Map.Entry<ShardId, FailedShard> entry = iterator.next();
FailedShard failedShard = entry.getValue();
IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex());
if (indexRoutingTable != null) {
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id());
if (shardRoutingTable != null) {
for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
if (localNodeId.equals(shardRouting.currentNodeId())) {
// we have a timeout here just to make sure we don't have dangled failed shards for some reason
// its just another safely layer
if (shardRouting.version() == failedShard.version && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) {
// It's the same failed shard - keep it if it hasn't timed out
continue shards;
} else {
// Different version or expired, remove it
break;
}
}
}
}
}
iterator.remove();
}
}
private void applyInitializingShard(final RoutingTable routingTable, final DiscoveryNodes nodes, final IndexMetaData indexMetaData, final IndexShardRoutingTable indexShardRouting, final ShardRouting shardRouting) throws ElasticsearchException {
final IndexService indexService = indicesService.indexService(shardRouting.index());
if (indexService == null) {
// got deleted on us, ignore
return;
}
final int shardId = shardRouting.id();
if (indexService.hasShard(shardId)) {
IndexShard indexShard = indexService.shardSafe(shardId);
if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) {
// the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting
// for master to confirm a shard started message (either master failover, or a cluster event before
// we managed to tell the master we started), mark us as started
if (logger.isTraceEnabled()) {
logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started",
indexShard.shardId(), indexShard.state());
}
shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(),
"master " + nodes.masterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started");
return;
} else {
if (indexShard.ignoreRecoveryAttempt()) {
return;
}
}
}
// if there is no shard, create it
if (!indexService.hasShard(shardId)) {
if (failedShards.containsKey(shardRouting.shardId())) {
// already tried to create this shard but it failed - ignore
logger.trace("[{}][{}] not initializing, this shards failed to recover on this node before, waiting for reassignment", shardRouting.index(), shardRouting.id());
return;
}
try {
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] creating shard", shardRouting.index(), shardId);
}
InternalIndexShard indexShard = (InternalIndexShard) indexService.createShard(shardId);
indexShard.routingEntry(shardRouting);
indexShard.engine().addFailedEngineListener(failedEngineHandler);
} catch (IndexShardAlreadyExistsException e) {
// ignore this, the method call can happen several times
} catch (Throwable e) {
logger.warn("[{}][{}] failed to create shard", e, shardRouting.index(), shardRouting.id());
try {
indexService.removeShard(shardId, "failed to create [" + ExceptionsHelper.detailedMessage(e) + "]");
} catch (IndexShardMissingException e1) {
// ignore
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to remove shard after failed creation", e1, shardRouting.index(), shardRouting.id());
}
failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
shardStateAction.shardFailed(shardRouting, indexMetaData.getUUID(), "Failed to create shard, message [" + detailedMessage(e) + "]");
return;
}
}
final InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(shardId);
if (indexShard.ignoreRecoveryAttempt()) {
// we are already recovering (we can get to this state since the cluster event can happen several
// times while we recover)
return;
}
if (!shardRouting.primary()) {
// recovery from primary
IndexShardRoutingTable shardRoutingTable = routingTable.index(shardRouting.index()).shard(shardRouting.id());
for (ShardRouting entry : shardRoutingTable) {
if (entry.primary() && entry.started()) {
// only recover from started primary, if we can't find one, we will do it next round
final DiscoveryNode sourceNode = nodes.get(entry.currentNodeId());
try {
// we are recovering a backup from a primary, so no need to mark it as relocated
final StartRecoveryRequest request = new StartRecoveryRequest(indexShard.shardId(), sourceNode, nodes.localNode(), false, indexShard.store().list());
recoveryTarget.startRecovery(request, indexShard, new PeerRecoveryListener(request, shardRouting, indexService, indexMetaData));
} catch (Throwable e) {
handleRecoveryFailure(indexService, indexMetaData, shardRouting, true, e);
break;
}
break;
}
}
} else {
if (shardRouting.relocatingNodeId() == null) {
// we are the first primary, recover from the gateway
// if its post api allocation, the index should exists
boolean indexShouldExists = indexShardRouting.primaryAllocatedPostApi();
IndexShardGatewayService shardGatewayService = indexService.shardInjector(shardId).getInstance(IndexShardGatewayService.class);
shardGatewayService.recover(indexShouldExists, new IndexShardGatewayService.RecoveryListener() {
@Override
public void onRecoveryDone() {
shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(), "after recovery from gateway");
}
@Override
public void onIgnoreRecovery(String reason) {
}
@Override
public void onRecoveryFailed(IndexShardGatewayRecoveryException e) {
handleRecoveryFailure(indexService, indexMetaData, shardRouting, true, e);
}
});
} else {
// relocating primaries, recovery from the relocating shard
final DiscoveryNode sourceNode = nodes.get(shardRouting.relocatingNodeId());
try {
// we don't mark this one as relocated at the end, requests in any case are routed to both when its relocating
// and that way we handle the edge case where its mark as relocated, and we might need to roll it back...
final StartRecoveryRequest request = new StartRecoveryRequest(indexShard.shardId(), sourceNode, nodes.localNode(), false, indexShard.store().list());
recoveryTarget.startRecovery(request, indexShard, new PeerRecoveryListener(request, shardRouting, indexService, indexMetaData));
} catch (Throwable e) {
handleRecoveryFailure(indexService, indexMetaData, shardRouting, true, e);
}
}
}
}
private class PeerRecoveryListener implements RecoveryTarget.RecoveryListener {
private final StartRecoveryRequest request;
private final ShardRouting shardRouting;
private final IndexService indexService;
private final IndexMetaData indexMetaData;
private PeerRecoveryListener(StartRecoveryRequest request, ShardRouting shardRouting, IndexService indexService, IndexMetaData indexMetaData) {
this.request = request;
this.shardRouting = shardRouting;
this.indexService = indexService;
this.indexMetaData = indexMetaData;
}
@Override
public void onRecoveryDone() {
shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(), "after recovery (replica) from node [" + request.sourceNode() + "]");
}
@Override
public void onRetryRecovery(TimeValue retryAfter, RecoveryStatus recoveryStatus) {
recoveryTarget.retryRecovery(request, recoveryStatus, PeerRecoveryListener.this);
}
@Override
public void onIgnoreRecovery(boolean removeShard, String reason) {
if (!removeShard) {
return;
}
synchronized (mutex) {
if (indexService.hasShard(shardRouting.shardId().id())) {
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] removing shard on ignored recovery, reason [{}]", shardRouting.index(), shardRouting.shardId().id(), reason);
}
try {
indexService.removeShard(shardRouting.shardId().id(), "ignore recovery: " + reason);
} catch (IndexShardMissingException e) {
// the node got closed on us, ignore it
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to delete shard after ignore recovery", e1, indexService.index().name(), shardRouting.shardId().id());
}
}
}
}
@Override
public void onRecoveryFailure(RecoveryFailedException e, boolean sendShardFailure) {
handleRecoveryFailure(indexService, indexMetaData, shardRouting, sendShardFailure, e);
}
}
private void handleRecoveryFailure(IndexService indexService, IndexMetaData indexMetaData, ShardRouting shardRouting, boolean sendShardFailure, Throwable failure) {
logger.warn("[{}][{}] failed to start shard", failure, indexService.index().name(), shardRouting.shardId().id());
synchronized (mutex) {
if (indexService.hasShard(shardRouting.shardId().id())) {
try {
indexService.removeShard(shardRouting.shardId().id(), "recovery failure [" + ExceptionsHelper.detailedMessage(failure) + "]");
} catch (IndexShardMissingException e) {
// the node got closed on us, ignore it
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to delete shard after failed startup", e1, indexService.index().name(), shardRouting.shardId().id());
}
}
if (sendShardFailure) {
try {
failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
shardStateAction.shardFailed(shardRouting, indexMetaData.getUUID(), "Failed to start shard, message [" + detailedMessage(failure) + "]");
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to mark shard as failed after a failed start", e1, indexService.index().name(), shardRouting.id());
}
}
}
}
private void removeIndex(String index, String reason) {
try {
indicesService.removeIndex(index, reason);
} catch (Throwable e) {
logger.warn("failed to clean index ({})", e, reason);
}
// clear seen mappings as well
for (Tuple<String, String> tuple : seenMappings.keySet()) {
if (tuple.v1().equals(index)) {
seenMappings.remove(tuple);
}
}
}
private class FailedEngineHandler implements Engine.FailedEngineListener {
@Override
public void onFailedEngine(final ShardId shardId, final Throwable failure) {
ShardRouting shardRouting = null;
final IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) {
IndexShard indexShard = indexService.shard(shardId.id());
if (indexShard != null) {
shardRouting = indexShard.routingEntry();
}
}
if (shardRouting == null) {
logger.warn("[{}][{}] engine failed, but can't find index shard", shardId.index().name(), shardId.id());
return;
}
final ShardRouting fShardRouting = shardRouting;
final String indexUUID = indexService.indexUUID(); // we know indexService is not null here.
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
synchronized (mutex) {
if (indexService.hasShard(shardId.id())) {
try {
indexService.removeShard(shardId.id(), "engine failure [" + ExceptionsHelper.detailedMessage(failure) + "]");
} catch (IndexShardMissingException e) {
// the node got closed on us, ignore it
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to delete shard after failed engine", e1, indexService.index().name(), shardId.id());
}
}
try {
failedShards.put(fShardRouting.shardId(), new FailedShard(fShardRouting.version()));
shardStateAction.shardFailed(fShardRouting, indexUUID, "engine failure, message [" + detailedMessage(failure) + "]");
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to mark shard as failed after a failed engine", e1, indexService.index().name(), shardId.id());
}
}
}
});
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_cluster_IndicesClusterStateService.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.