conflict_resolution
stringlengths 27
16k
|
---|
<<<<<<<
public ScoreFunction parse(QueryShardContext context, XContentParser parser) throws IOException, QueryParsingException {
QueryParseContext parseContext = context.parseContext();
=======
public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, ParsingException {
>>>>>>>
public ScoreFunction parse(QueryShardContext context, XContentParser parser) throws IOException, ParsingException {
QueryParseContext parseContext = context.parseContext(); |
<<<<<<<
import com.google.common.collect.Lists;
=======
import com.google.common.collect.ImmutableList;
>>>>>>>
<<<<<<<
import java.util.Collections;
=======
import java.util.ArrayList;
>>>>>>>
import java.util.ArrayList;
import java.util.Collections; |
<<<<<<<
public static class ExtractFieldScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(ExtractFieldScriptEngine.class, ExtractFieldScriptEngine.NAME, true));
=======
public static class ExtractFieldScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public String name() {
return ExtractFieldScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + AvgIT.class;
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ExtractFieldScriptEngine();
>>>>>>>
public static class ExtractFieldScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ExtractFieldScriptEngine();
<<<<<<<
public static class FieldValueScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(FieldValueScriptEngine.class, FieldValueScriptEngine.NAME, true));
=======
public static class FieldValueScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public String name() {
return FieldValueScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + AvgIT.class;
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new FieldValueScriptEngine();
>>>>>>>
public static class FieldValueScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new FieldValueScriptEngine(); |
<<<<<<<
public synchronized AmazonS3 client() {
String endpoint = getDefaultEndpoint();
String account = settings.get(CLOUD_S3.KEY, settings.get(CLOUD_AWS.KEY));
String key = settings.get(CLOUD_S3.SECRET, settings.get(CLOUD_AWS.SECRET));
return getClient(endpoint, null, account, key, null, null);
}
@Override
public AmazonS3 client(String endpoint, String protocol, String region, String account, String key) {
return client(endpoint, protocol, region, account, key, null, null);
}
@Override
public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries, Boolean pathStyleAccess) {
if (region != null && endpoint == null) {
endpoint = getEndpoint(region);
logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint);
} else if (endpoint == null) {
endpoint = getDefaultEndpoint();
}
if (account == null || key == null) {
account = settings.get(CLOUD_S3.KEY, settings.get(CLOUD_AWS.KEY));
key = settings.get(CLOUD_S3.SECRET, settings.get(CLOUD_AWS.SECRET));
=======
public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries) {
if (Strings.isNullOrEmpty(endpoint)) {
// We need to set the endpoint based on the region
if (region != null) {
endpoint = getEndpoint(region);
logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint);
} else {
// No region has been set so we will use the default endpoint
endpoint = getDefaultEndpoint();
}
>>>>>>>
public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, Boolean pathStyleAccess) {
if (Strings.isNullOrEmpty(endpoint)) {
// We need to set the endpoint based on the region
if (region != null) {
endpoint = getEndpoint(region);
logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint);
} else {
// No region has been set so we will use the default endpoint
endpoint = getDefaultEndpoint();
}
<<<<<<<
private synchronized AmazonS3 getClient(String endpoint, String protocol, String account, String key, Integer maxRetries, Boolean pathStyleAccess) {
Tuple<String, String> clientDescriptor = new Tuple<String, String>(endpoint, account);
=======
private synchronized AmazonS3 getClient(String endpoint, Protocol protocol, String account, String key, Integer maxRetries) {
Tuple<String, String> clientDescriptor = new Tuple<>(endpoint, account);
>>>>>>>
private synchronized AmazonS3 getClient(String endpoint, Protocol protocol, String account, String key, Integer maxRetries, Boolean pathStyleAccess) {
Tuple<String, String> clientDescriptor = new Tuple<>(endpoint, account); |
<<<<<<<
import org.apache.lucene.store.OutputStreamDataOutput;
import org.elasticsearch.common.lucene.Lucene;
=======
>>>>>>>
import org.elasticsearch.common.lucene.Lucene; |
<<<<<<<
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.seqno.LocalCheckpointService;
=======
>>>>>>>
import org.elasticsearch.index.seqno.LocalCheckpointService; |
<<<<<<<
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,
boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, includeExclude,
aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
=======
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map<String, Object> metaData) throws IOException {
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();
return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData);
>>>>>>>
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode,
boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter();
return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, filter,
aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
<<<<<<<
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
=======
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map<String, Object> metaData) throws IOException {
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData);
>>>>>>>
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
<<<<<<<
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
=======
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map<String, Object> metaData) throws IOException {
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData);
>>>>>>>
AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); |
<<<<<<<
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory;
=======
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
>>>>>>>
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory; |
<<<<<<<
import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation;
=======
import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase;
>>>>>>>
import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation; |
<<<<<<<
List<ZenPing.PingResponse> pingResponses = new ArrayList<>();
for (ZenPing.PingResponse pingResponse : fullPingResponses) {
DiscoveryNode node = pingResponse.node();
//TODO we should rename this and its setting, also we ignore node.ingest, but maybe it's ok here
if (masterElectionFilterClientNodes && node.masterNode() == false && node.dataNode() == false) {
// filter out nodes that don't hold data and are not master eligible
} else if (masterElectionFilterDataNodes && node.masterNode() == false && node.dataNode()) {
// filter out dedicated data nodes
} else {
pingResponses.add(pingResponse);
}
}
if (logger.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
if (pingResponses.isEmpty()) {
sb.append(" {none}");
} else {
for (ZenPing.PingResponse pingResponse : pingResponses) {
sb.append("\n\t--> ").append(pingResponse);
}
}
logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes,
masterElectionFilterDataNodes, sb);
}
=======
final List<ZenPing.PingResponse> pingResponses;
pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
>>>>>>>
final List<ZenPing.PingResponse> pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger); |
<<<<<<<
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true);
=======
promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true);
>>>>>>>
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true);
<<<<<<<
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true);
=======
promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true);
>>>>>>>
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true);
<<<<<<<
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true);
=======
promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true);
>>>>>>>
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true);
<<<<<<<
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true);
=======
promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true);
>>>>>>>
promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true);
<<<<<<<
promote = AnalyzerCaster.promoteNumeric(last.after, false, true);
=======
promote = AnalyzerCaster.promoteNumeric(definition, last.after, false);
>>>>>>>
promote = AnalyzerCaster.promoteNumeric(last.after, false);
<<<<<<<
promote = AnalyzerCaster.promoteNumeric(last.after, false, true);
=======
promote = AnalyzerCaster.promoteNumeric(definition, last.after, false);
>>>>>>>
promote = AnalyzerCaster.promoteNumeric(last.after, false);
<<<<<<<
promote = AnalyzerCaster.promoteNumeric(last.after, false, true);
=======
promote = AnalyzerCaster.promoteNumeric(definition, last.after, false);
>>>>>>>
promote = AnalyzerCaster.promoteNumeric(last.after, false);
<<<<<<<
there = AnalyzerCaster.getLegalCast(location, last.after, promote, false);
back = AnalyzerCaster.getLegalCast(location, promote, last.after, true);
=======
there = AnalyzerCaster.getLegalCast(definition, location, last.after, promote, false, false);
back = AnalyzerCaster.getLegalCast(definition, location, promote, last.after, true, false);
>>>>>>>
there = AnalyzerCaster.getLegalCast(location, last.after, promote, false, false);
back = AnalyzerCaster.getLegalCast(location, promote, last.after, true, false);
<<<<<<<
expression.write(adapter);
adapter.writeBinaryInstruction(location, promote, operation);
=======
expression.write(settings, definition, adapter);
adapter.writeBinaryInstruction(location, promote, operation);
>>>>>>>
expression.write(adapter);
adapter.writeBinaryInstruction(location, promote, operation); |
<<<<<<<
doc.seqNo().setLongValue(seqNo);
return new Engine.Index(uid, doc, seqNo, version, versionType, origin, startTime);
=======
return new Engine.Index(uid, doc, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry);
>>>>>>>
return new Engine.Index(uid, doc, seqNo, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry); |
<<<<<<<
return tags.get() == null
&& forceLogging.get() == null
&& performanceLogging.get() == null
&& performanceLogRecords.get() == null
&& aclLogging.get() == null
&& aclLogRecords.get() == null;
=======
return tags.get() == null && forceLogging.get() == null && performanceLogging.get() == null;
>>>>>>>
return tags.get() == null
&& forceLogging.get() == null
&& performanceLogging.get() == null
&& aclLogging.get() == null
&& aclLogRecords.get() == null; |
<<<<<<<
public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
ParseFieldMatcher parseFieldMatcher)
=======
public static SearchSourceBuilder getRestSearchSource(BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers)
>>>>>>>
public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry,
ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers)
<<<<<<<
source.parseXContent(parser, queryParseContext);
=======
return SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers);
>>>>>>>
source.parseXContent(parser, queryParseContext, aggParsers); |
<<<<<<<
ElasticTestUtils.configure(
elasticsearchConfig, nodeInfo.hostname, nodeInfo.port, indicesPrefix);
return Guice.createInjector(new InMemoryModule(elasticsearchConfig));
=======
ElasticTestUtils.configure(elasticsearchConfig, container, indicesPrefix);
return Guice.createInjector(new InMemoryModule(elasticsearchConfig, notesMigration));
>>>>>>>
ElasticTestUtils.configure(elasticsearchConfig, container, indicesPrefix);
return Guice.createInjector(new InMemoryModule(elasticsearchConfig)); |
<<<<<<<
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.support.QueryInnerHits;
import org.elasticsearch.index.query.QueryParsingException;
=======
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.support.QueryInnerHits;
import org.elasticsearch.common.ParsingException;
<<<<<<<
assertThat(e.getRootCause(), instanceOf(QueryShardException.class));
=======
assertThat(e.getRootCause(), instanceOf(ParsingException.class));
>>>>>>>
assertThat(e.getRootCause(), instanceOf(QueryShardException.class));
<<<<<<<
assertThat(e.getRootCause(), instanceOf(QueryShardException.class));
=======
assertThat(e.getRootCause(), instanceOf(ParsingException.class));
>>>>>>>
assertThat(e.getRootCause(), instanceOf(QueryShardException.class));
<<<<<<<
assertThat(e.getCause(), instanceOf(QueryShardException.class));
=======
assertThat(e.getCause(), instanceOf(ParsingException.class));
>>>>>>>
assertThat(e.getCause(), instanceOf(QueryShardException.class)); |
<<<<<<<
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.gateway.CorruptStateException;
import org.elasticsearch.index.IndexService;
=======
>>>>>>> |
<<<<<<<
public static class PutFieldValuesScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(PutFieldValuesScriptEngine.class, PutFieldValuesScriptEngine.NAME, true));
=======
public static class PutFieldValuesScriptPlugin extends Plugin implements ScriptPlugin {
public PutFieldValuesScriptPlugin() {
}
@Override
public String name() {
return PutFieldValuesScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + UpdateIT.class;
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new PutFieldValuesScriptEngine();
>>>>>>>
public static class PutFieldValuesScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new PutFieldValuesScriptEngine();
<<<<<<<
public static class FieldIncrementScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(FieldIncrementScriptEngine.class, FieldIncrementScriptEngine.NAME, true));
=======
public static class FieldIncrementScriptPlugin extends Plugin implements ScriptPlugin {
public FieldIncrementScriptPlugin() {
}
@Override
public String name() {
return FieldIncrementScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + UpdateIT.class;
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new FieldIncrementScriptEngine();
>>>>>>>
public static class FieldIncrementScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new FieldIncrementScriptEngine();
<<<<<<<
public static class ScriptedUpsertScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(ScriptedUpsertScriptEngine.class, ScriptedUpsertScriptEngine.NAME, true));
=======
public static class ScriptedUpsertScriptPlugin extends Plugin implements ScriptPlugin {
public ScriptedUpsertScriptPlugin() {
}
@Override
public String name() {
return ScriptedUpsertScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + UpdateIT.class + ".testScriptedUpsert";
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ScriptedUpsertScriptEngine();
>>>>>>>
public static class ScriptedUpsertScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ScriptedUpsertScriptEngine();
<<<<<<<
public static class ExtractContextInSourceScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(ExtractContextInSourceScriptEngine.class, ExtractContextInSourceScriptEngine.NAME, true));
=======
public static class ExtractContextInSourceScriptPlugin extends Plugin implements ScriptPlugin {
public ExtractContextInSourceScriptPlugin() {
}
@Override
public String name() {
return ExtractContextInSourceScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + UpdateIT.class;
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ExtractContextInSourceScriptEngine();
>>>>>>>
public static class ExtractContextInSourceScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ExtractContextInSourceScriptEngine(); |
<<<<<<<
String relocatingNodeId, RestoreSource restoreSource, long primaryTerm, boolean primary,
ShardRoutingState state, long version, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primaryTerm, primary, state, version, unassignedInfo, buildAllocationId(state), true, -1);
=======
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo) {
return newShardRouting(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, unassignedInfo);
}
public static ShardRouting newShardRouting(Index index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, unassignedInfo, buildAllocationId(state), true, -1);
}
public static void relocate(ShardRouting shardRouting, String relocatingNodeId, long expectedShardSize) {
shardRouting.relocate(relocatingNodeId, expectedShardSize);
>>>>>>>
String relocatingNodeId, RestoreSource restoreSource, long primaryTerm, boolean primary,
ShardRoutingState state, UnassignedInfo unassignedInfo) {
return newShardRouting(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), shardId, currentNodeId, relocatingNodeId, restoreSource, primaryTerm, primary, state, unassignedInfo);
}
public static ShardRouting newShardRouting(Index index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, long primaryTerm, boolean primary,
ShardRoutingState state, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primaryTerm, primary, state, unassignedInfo, buildAllocationId(state), true, -1);
}
public static void relocate(ShardRouting shardRouting, String relocatingNodeId, long expectedShardSize) {
shardRouting.relocate(relocatingNodeId, expectedShardSize); |
<<<<<<<
import com.google.common.io.Resources;
=======
>>>>>>>
<<<<<<<
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
=======
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.*;
>>>>>>>
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.*;
<<<<<<<
=======
private List<String> readMarvelHeroNames() throws IOException, URISyntaxException {
return Files.readAllLines(PathUtils.get(SuggestSearchIT.class.getResource("/config/names.txt").toURI()), StandardCharsets.UTF_8);
}
>>>>>>>
private List<String> readMarvelHeroNames() throws IOException, URISyntaxException {
return Files.readAllLines(PathUtils.get(SuggestSearchIT.class.getResource("/config/names.txt").toURI()), StandardCharsets.UTF_8);
} |
<<<<<<<
final long localCheckpoint = seqNoService().getLocalCheckpoint();
try (Translog.Snapshot snapshot = getTranslog().newSnapshotFrom(localCheckpoint + 1)) {
=======
final long localCheckpoint = seqNoService.getLocalCheckpoint();
try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(localCheckpoint + 1)) {
>>>>>>>
final long localCheckpoint = seqNoService.getLocalCheckpoint();
try (Translog.Snapshot snapshot = getTranslog().newSnapshotFrom(localCheckpoint + 1)) { |
<<<<<<<
import org.elasticsearch.index.query.QueryParsingException;
import org.elasticsearch.index.query.QueryShardException;
=======
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.index.query.TestParsingException;
>>>>>>>
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.index.query.QueryShardException; |
<<<<<<<
=======
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.elasticsearch.common.ParsingException;
<<<<<<<
public SpanNearQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public SpanNearQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
<<<<<<<
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (!(query instanceof SpanQueryBuilder)) {
throw new QueryParsingException(parseContext, "spanNear [clauses] must be of type span query");
=======
Query query = parseContext.parseInnerQuery();
if (!(query instanceof SpanQuery)) {
throw new ParsingException(parseContext, "spanNear [clauses] must be of type span query");
>>>>>>>
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (!(query instanceof SpanQueryBuilder)) {
throw new ParsingException(parseContext, "spanNear [clauses] must be of type span query"); |
<<<<<<<
long term = randomInt(200);
ShardRouting routing = TestShardRouting.newShardRouting("foo", 1, "node_1", null, null, term, false, ShardRoutingState.INITIALIZING, 1);
=======
ShardRouting routing = TestShardRouting.newShardRouting("foo", 1, "node_1", null, null, false, ShardRoutingState.INITIALIZING);
>>>>>>>
long term = randomInt(200);
ShardRouting routing = TestShardRouting.newShardRouting("foo", 1, "node_1", null, null, term, false, ShardRoutingState.INITIALIZING);
<<<<<<<
long term = randomInt(200);
ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, term, false, ShardRoutingState.UNASSIGNED, 1);
ShardRouting unassignedShard1 = TestShardRouting.newShardRouting("test", 1, null, term, false, ShardRoutingState.UNASSIGNED, 1);
ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "1", term, randomBoolean(), ShardRoutingState.INITIALIZING, 1);
ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "1", term, randomBoolean(), ShardRoutingState.INITIALIZING, 1);
=======
ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED);
ShardRouting unassignedShard1 = TestShardRouting.newShardRouting("test", 1, null, false, ShardRoutingState.UNASSIGNED);
ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "1", randomBoolean(), ShardRoutingState.INITIALIZING);
ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "1", randomBoolean(), ShardRoutingState.INITIALIZING);
>>>>>>>
long term = randomInt(200);
ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, term, false, ShardRoutingState.UNASSIGNED);
ShardRouting unassignedShard1 = TestShardRouting.newShardRouting("test", 1, null, term, false, ShardRoutingState.UNASSIGNED);
ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "1", term, randomBoolean(), ShardRoutingState.INITIALIZING);
ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "1", term, randomBoolean(), ShardRoutingState.INITIALIZING);
<<<<<<<
return TestShardRouting.newShardRouting(index, shard, state == ShardRoutingState.UNASSIGNED ? null : "1", randomInt(200),
state != ShardRoutingState.UNASSIGNED && randomBoolean(), state, randomInt(5));
=======
return TestShardRouting.newShardRouting(index, shard, state == ShardRoutingState.UNASSIGNED ? null : "1", state != ShardRoutingState.UNASSIGNED && randomBoolean(), state);
>>>>>>>
return TestShardRouting.newShardRouting(index, shard, state == ShardRoutingState.UNASSIGNED ? null : "1",
randomInt(200), state != ShardRoutingState.UNASSIGNED && randomBoolean(), state);
<<<<<<<
ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, randomInt(200), false, ShardRoutingState.UNASSIGNED, 1);
ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "node1", randomInt(200), randomBoolean(), ShardRoutingState.INITIALIZING, 1);
ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "node1", randomInt(200), randomBoolean(), ShardRoutingState.INITIALIZING, 1);
=======
ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED);
ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "node1", randomBoolean(), ShardRoutingState.INITIALIZING);
ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "node1", randomBoolean(), ShardRoutingState.INITIALIZING);
>>>>>>>
ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, randomInt(200),false, ShardRoutingState.UNASSIGNED);
ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "node1", randomInt(200), randomBoolean(), ShardRoutingState.INITIALIZING);
ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "node1", randomInt(200), randomBoolean(), ShardRoutingState.INITIALIZING);
<<<<<<<
otherRouting = new ShardRouting(routing,
randomBoolean() ? routing.version() : routing.version() + 1,
randomBoolean() ? routing.primaryTerm() : routing.primaryTerm() + 1);
=======
otherRouting = new ShardRouting(routing);
>>>>>>>
otherRouting = new ShardRouting(routing,randomBoolean() ? routing.primaryTerm() : routing.primaryTerm() + 1);
<<<<<<<
otherRouting = TestShardRouting.newShardRouting(otherRouting.index() + "a", otherRouting.id(), otherRouting.currentNodeId(),
otherRouting.relocatingNodeId(), otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(),
otherRouting.state(), otherRouting.version(), otherRouting.unassignedInfo());
=======
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName() + "a", otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
>>>>>>>
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName() + "a", otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
<<<<<<<
otherRouting = TestShardRouting.newShardRouting(otherRouting.index(), otherRouting.id() + 1, otherRouting.currentNodeId(),
otherRouting.relocatingNodeId(), otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(),
otherRouting.state(), otherRouting.version(), otherRouting.unassignedInfo());
=======
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id() + 1, otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
>>>>>>>
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id() + 1, otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
<<<<<<<
otherRouting = TestShardRouting.newShardRouting(otherRouting.index(), otherRouting.id(), otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId() + "_1", otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(),
otherRouting.version(), otherRouting.unassignedInfo());
=======
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId() + "_1", otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
>>>>>>>
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId() + "_1", otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
<<<<<<<
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(),
otherRouting.version(), otherRouting.unassignedInfo());
=======
otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
>>>>>>>
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
<<<<<<<
otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(), otherRouting.version(),
otherRouting.unassignedInfo());
=======
otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
>>>>>>>
otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
<<<<<<<
otherRouting = TestShardRouting.newShardRouting(otherRouting.index(), otherRouting.id(), otherRouting.currentNodeId(),
otherRouting.relocatingNodeId(), otherRouting.restoreSource(), otherRouting.primaryTerm(),
otherRouting.primary() == false, otherRouting.state(), otherRouting.version(), otherRouting.unassignedInfo());
=======
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primary() == false, otherRouting.state(), otherRouting.unassignedInfo());
>>>>>>>
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary() == false, otherRouting.state(), otherRouting.unassignedInfo());
<<<<<<<
otherRouting = TestShardRouting.newShardRouting(otherRouting.index(), otherRouting.id(), otherRouting.currentNodeId(),
otherRouting.relocatingNodeId(), otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(),
newState, otherRouting.version(), unassignedInfo);
=======
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primary(), newState, unassignedInfo);
>>>>>>>
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), newState, unassignedInfo);
<<<<<<<
// change version
otherRouting = new ShardRouting(otherRouting, otherRouting.version() + 1, otherRouting.primaryTerm());
}
if (randomBoolean()) {
// increase term
otherRouting = new ShardRouting(otherRouting, otherRouting.version(), otherRouting.primaryTerm() + 1);
}
if (randomBoolean()) {
=======
>>>>>>>
// increase term
otherRouting = new ShardRouting(otherRouting, otherRouting.primaryTerm() + 1);
}
if (randomBoolean()) {
<<<<<<<
otherRouting = TestShardRouting.newShardRouting(otherRouting.index(), otherRouting.id(), otherRouting.currentNodeId(),
otherRouting.relocatingNodeId(), otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(),
otherRouting.state(), otherRouting.version(),
=======
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primary(), otherRouting.state(),
>>>>>>>
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
otherRouting.restoreSource(), otherRouting.primaryTerm(), otherRouting.primary(), otherRouting.state(), |
<<<<<<<
public class TopChildrenQueryParser extends BaseQueryParserTemp {
=======
@Deprecated
public class TopChildrenQueryParser implements QueryParser {
>>>>>>>
@Deprecated
public class TopChildrenQueryParser extends BaseQueryParserTemp { |
<<<<<<<
import java.io.Closeable;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.LongSupplier;
import java.util.function.Supplier;
import java.util.function.ToLongBiFunction;
import java.util.stream.Collectors;
import java.util.stream.LongStream;
=======
>>>>>>> |
<<<<<<<
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getVersion(), indexResponse.isCreated());
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
=======
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult());
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
>>>>>>>
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getVersion(), indexResponse.getResult());
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
<<<<<<<
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), false);
=======
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
>>>>>>>
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getVersion(), response.getResult()); |
<<<<<<<
}, new TranslogHandler(shardId.index().getName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(),
translogConfig, TimeValue.timeValueMinutes(5));
=======
}, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5));
>>>>>>>
}, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5));
<<<<<<<
assertThat(create.version(), equalTo(1l));
create = new Engine.Index(newUid("1"), doc, create.seqNo(), create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
=======
assertThat(create.version(), equalTo(1L));
create = new Engine.Index(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
>>>>>>>
assertThat(create.version(), equalTo(1L));
create = new Engine.Index(newUid("1"), doc, create.seqNo(), create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
<<<<<<<
delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2l, VersionType.INTERNAL, PRIMARY, 0, false);
=======
delete = new Engine.Delete("test", "1", newUid("1"), 2L, VersionType.INTERNAL, PRIMARY, 0, false);
>>>>>>>
delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0, false);
<<<<<<<
index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2l, VersionType.INTERNAL, PRIMARY, 0);
=======
index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0);
>>>>>>>
index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0);
<<<<<<<
delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2l, VersionType.INTERNAL, PRIMARY, 0, false);
=======
delete = new Engine.Delete("test", "1", newUid("1"), 2L, VersionType.INTERNAL, PRIMARY, 0, false);
>>>>>>>
delete = new Engine.Delete("test", "1", newUid("1"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0, false);
<<<<<<<
index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2l, VersionType.INTERNAL, PRIMARY, 0);
=======
index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0);
>>>>>>>
index = new Engine.Index(newUid("1"), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2L, VersionType.INTERNAL, PRIMARY, 0);
<<<<<<<
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 1l, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
=======
index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
>>>>>>>
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
<<<<<<<
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 2l
=======
index = new Engine.Index(newUid("1"), doc, 2L
>>>>>>>
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 2L
<<<<<<<
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 1l
=======
index = new Engine.Index(newUid("1"), doc, 1L
>>>>>>>
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 1L
<<<<<<<
delete = new Engine.Delete("test", "1", newUid("1"), delete.seqNo(), 3l
=======
delete = new Engine.Delete("test", "1", newUid("1"), 3L
>>>>>>>
delete = new Engine.Delete("test", "1", newUid("1"), delete.seqNo(), 3L
<<<<<<<
delete = new Engine.Delete("test", "1", newUid("1"), delete.seqNo(), 3l
=======
delete = new Engine.Delete("test", "1", newUid("1"), 3L
>>>>>>>
delete = new Engine.Delete("test", "1", newUid("1"), delete.seqNo(), 3L
<<<<<<<
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 2l, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
=======
index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
>>>>>>>
index = new Engine.Index(newUid("1"), doc, index.seqNo(), 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
<<<<<<<
// #10312
public void testDeletesAloneCanTriggerRefresh() throws Exception {
try (Store store = createStore();
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings), newMergePolicy()),
false)) {
engine.config().setIndexingBufferSize(new ByteSizeValue(1, ByteSizeUnit.KB));
for (int i = 0; i < 100; i++) {
String id = Integer.toString(i);
ParsedDocument doc = testParsedDocument(id, id, "test", null, -1, -1, testDocument(), B_1, null);
engine.index(new Engine.Index(newUid(id), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
}
// Force merge so we know all merges are done before we start deleting:
engine.forceMerge(true, 1, false, false, false);
Searcher s = engine.acquireSearcher("test");
final long version1 = ((DirectoryReader) s.reader()).getVersion();
s.close();
for (int i = 0; i < 100; i++) {
String id = Integer.toString(i);
engine.delete(new Engine.Delete("test", id, newUid(id), SequenceNumbersService.UNASSIGNED_SEQ_NO, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false));
}
// We must assertBusy because refresh due to version map being full is done in background (REFRESH) thread pool:
assertBusy(() -> {
Searcher s2 = engine.acquireSearcher("test");
long version2 = ((DirectoryReader) s2.reader()).getVersion();
s2.close();
// 100 buffered deletes will easily exceed 25% of our 1 KB indexing buffer so it should have forced a refresh:
assertThat(version2, greaterThan(version1));
});
}
}
=======
>>>>>>> |
<<<<<<<
public QueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public QueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, ParsingException {
<<<<<<<
if (indices.isEmpty() == false) {
throw new QueryParsingException(parseContext, "[indices] indices or index already specified");
=======
if (indicesFound) {
throw new ParsingException(parseContext, "[indices] indices or index already specified");
>>>>>>>
if (indices.isEmpty() == false) {
throw new ParsingException(parseContext, "[indices] indices or index already specified");
<<<<<<<
if (indices.isEmpty() == false) {
throw new QueryParsingException(parseContext, "[indices] indices or index already specified");
=======
if (indicesFound) {
throw new ParsingException(parseContext, "[indices] indices or index already specified");
>>>>>>>
if (indices.isEmpty() == false) {
throw new ParsingException(parseContext, "[indices] indices or index already specified"); |
<<<<<<<
import java.io.IOException;
import java.io.PrintStream;
import java.nio.channels.ClosedByInterruptException;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.lucene.index.*;
=======
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.index.Term;
>>>>>>>
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.index.Term;
<<<<<<<
=======
import java.io.IOException;
import java.io.PrintStream;
import java.nio.channels.ClosedByInterruptException;
import java.nio.charset.StandardCharsets;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
>>>>>>>
import java.io.IOException;
import java.io.PrintStream;
import java.nio.channels.ClosedByInterruptException;
import java.nio.charset.StandardCharsets;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
<<<<<<<
public static final String INDEX_TRANSLOG_DISABLE_FLUSH = "index.translog.disable_flush";
public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
=======
/** If we see no indexing operations after this much time for a given shard, we consider that shard inactive (default: 5 minutes). */
public static final String INDEX_SHARD_INACTIVE_TIME_SETTING = "index.shard.inactive_time";
private static final String INDICES_INACTIVE_TIME_SETTING = "indices.memory.shard_inactive_time";
>>>>>>>
public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
<<<<<<<
public long getIndexBufferRAMBytesUsed() {
=======
public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
public void addShardFailureCallback(Callback<ShardFailure> onShardFailure) {
this.shardEventListener.delegates.add(onShardFailure);
}
/**
* Change the indexing and translog buffer sizes. If {@code IndexWriter} is currently using more than
* the new buffering indexing size then we do a refresh to free up the heap.
*/
public void updateBufferSize(ByteSizeValue shardIndexingBufferSize) {
final EngineConfig config = engineConfig;
final ByteSizeValue preValue = config.getIndexingBufferSize();
config.setIndexingBufferSize(shardIndexingBufferSize);
>>>>>>>
public long getIndexBufferRAMBytesUsed() {
<<<<<<<
logger.debug("shard is now inactive");
=======
updateBufferSize(IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER);
logger.debug("marking shard as inactive (inactive_time=[{}]) indexing wise", inactiveTime);
>>>>>>>
logger.debug("shard is now inactive"); |
<<<<<<<
AmazonS3 client(String endpoint, String region, String account, String key);
=======
AmazonS3 client(String region, String account, String key);
AmazonS3 client(String region, String account, String key, Integer maxRetries);
>>>>>>>
AmazonS3 client(String endpoint, String region, String account, String key);
AmazonS3 client(String endpoint, String region, String account, String key, Integer maxRetries); |
<<<<<<<
import org.elasticsearch.index.seqno.SequenceNumbersService;
=======
import org.elasticsearch.index.IndexSettings;
>>>>>>>
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.seqno.SequenceNumbersService;
<<<<<<<
private long seqNo;
=======
private boolean forcedRefresh;
>>>>>>>
private long seqNo;
private boolean forcedRefresh;
<<<<<<<
/**
* Returns the sequence number assigned for this change. Returns {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if the operation
* wasn't performed (i.e., an update operation that resulted in a NOOP).
*/
public long getSeqNo() {
return seqNo;
}
=======
/**
* Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to
* {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will
* only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
*/
public boolean forcedRefresh() {
return forcedRefresh;
}
@Override
public void setForcedRefresh(boolean forcedRefresh) {
this.forcedRefresh = forcedRefresh;
}
>>>>>>>
/**
* Returns the sequence number assigned for this change. Returns {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if the operation
* wasn't performed (i.e., an update operation that resulted in a NOOP).
*/
public long getSeqNo() {
return seqNo;
}
/**
* Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to
* {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will
* only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
*/
public boolean forcedRefresh() {
return forcedRefresh;
}
@Override
public void setForcedRefresh(boolean forcedRefresh) {
this.forcedRefresh = forcedRefresh;
}
<<<<<<<
seqNo = in.readZLong();
=======
forcedRefresh = in.readBoolean();
>>>>>>>
seqNo = in.readZLong();
forcedRefresh = in.readBoolean();
<<<<<<<
out.writeZLong(seqNo);
=======
out.writeBoolean(forcedRefresh);
>>>>>>>
out.writeZLong(seqNo);
out.writeBoolean(forcedRefresh);
<<<<<<<
.field(Fields._TYPE, type)
.field(Fields._ID, id)
.field(Fields._VERSION, version);
=======
.field(Fields._TYPE, type)
.field(Fields._ID, id)
.field(Fields._VERSION, version)
.field("forced_refresh", forcedRefresh);
>>>>>>>
.field(Fields._TYPE, type)
.field(Fields._ID, id)
.field(Fields._VERSION, version)
.field("forced_refresh", forcedRefresh); |
<<<<<<<
expression.analyze(variables);
arguments.set(argument, expression.cast(variables));
=======
expression.internal = true;
expression.analyze(settings, definition, variables);
arguments.set(argument, expression.cast(settings, definition, variables));
>>>>>>>
expression.internal = true;
expression.analyze(variables);
arguments.set(argument, expression.cast(variables)); |
<<<<<<<
import org.elasticsearch.xpack.scheduler.SchedulerEngine;
import org.elasticsearch.xpack.watcher.support.clock.Clock;
=======
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.xpack.support.clock.Clock;
>>>>>>>
import org.elasticsearch.xpack.scheduler.SchedulerEngine;
import org.elasticsearch.xpack.support.clock.Clock; |
<<<<<<<
// test if we don't write it if the shard is not active
ShardRouting inactiveRouting = TestShardRouting.newShardRouting(shard.shardRouting.index(), shard.shardRouting.shardId().id(),
shard.shardRouting.currentNodeId(), null, null, shard.shardRouting.primaryTerm(), true, ShardRoutingState.INITIALIZING,
shard.shardRouting.version() + 1);
=======
// test if we still write it even if the shard is not active
ShardRouting inactiveRouting = TestShardRouting.newShardRouting(shard.shardRouting.index(), shard.shardRouting.shardId().id(), shard.shardRouting.currentNodeId(), null, null, true, ShardRoutingState.INITIALIZING);
>>>>>>>
// test if we still write it even if the shard is not active
ShardRouting inactiveRouting = TestShardRouting.newShardRouting(shard.shardRouting.index(), shard.shardRouting.shardId().id(),
shard.shardRouting.currentNodeId(), null, null, shard.shardRouting.primaryTerm(), true, ShardRoutingState.INITIALIZING);
<<<<<<<
routing = TestShardRouting.newShardRouting(shard.shardId.index().getName(), shard.shardId.id(), routing.currentNodeId(), null,
shard.shardRouting.primaryTerm(), routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.allocationId(),
shard.shardRouting.version() + 1);
=======
routing = TestShardRouting.newShardRouting(shard.shardId.getIndex(), shard.shardId.id(), routing.currentNodeId(), null, routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.allocationId());
>>>>>>>
routing = TestShardRouting.newShardRouting(shard.shardId.getIndex(), shard.shardId.id(), routing.currentNodeId(), null,
shard.shardRouting.primaryTerm(), routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.allocationId());
<<<<<<<
indexShard.incrementOperationCounterOnPrimary();
=======
indexShard.acquireReplicaOperationLock();
>>>>>>>
indexShard.acquireReplicaOperationLock(indexShard.routingEntry().primaryTerm());
<<<<<<<
final long primaryTerm = indexShard.shardRouting.primaryTerm();
// ugly hack to allow the shard to operated both as a replica and a primary
ShardRouting temp = indexShard.routingEntry();
ShardRouting newShardRouting = TestShardRouting.newShardRouting(temp.getIndex(), temp.id(), temp.currentNodeId(), "BLA!", temp.primaryTerm(),
temp.primary(), ShardRoutingState.INITIALIZING, AllocationId.newRelocation(temp.allocationId()), temp.version() + 1);
indexShard.updateRoutingEntry(newShardRouting, false);
assertEquals(0, indexShard.getOperationsCount());
if (randomBoolean()) {
indexShard.incrementOperationCounterOnPrimary();
} else {
indexShard.incrementOperationCounterOnReplica(primaryTerm);
}
assertEquals(1, indexShard.getOperationsCount());
if (randomBoolean()) {
indexShard.incrementOperationCounterOnPrimary();
} else {
indexShard.incrementOperationCounterOnReplica(primaryTerm);
}
assertEquals(2, indexShard.getOperationsCount());
try {
indexShard.incrementOperationCounterOnReplica(primaryTerm - 1);
fail("you can not increment the operation counter with an older primary term");
} catch (IllegalIndexShardStateException e) {
assertThat(e.getMessage(), containsString("operation term"));
assertThat(e.getMessage(), containsString("too old"));
}
// but you can increment with a newer one..
indexShard.incrementOperationCounterOnReplica(primaryTerm + 1 + randomInt(20));
indexShard.decrementOperationCounter();
indexShard.decrementOperationCounter();
indexShard.decrementOperationCounter();
assertEquals(0, indexShard.getOperationsCount());
=======
assertEquals(0, indexShard.getActiveOperationsCount());
Releasable operation1 = indexShard.acquirePrimaryOperationLock();
assertEquals(1, indexShard.getActiveOperationsCount());
Releasable operation2 = indexShard.acquirePrimaryOperationLock();
assertEquals(2, indexShard.getActiveOperationsCount());
Releasables.close(operation1, operation2);
assertEquals(0, indexShard.getActiveOperationsCount());
>>>>>>>
final long primaryTerm = indexShard.shardRouting.primaryTerm();
// ugly hack to allow the shard to operated both as a replica and a primary
ShardRouting temp = indexShard.routingEntry();
ShardRouting newShardRouting = TestShardRouting.newShardRouting(temp.index(), temp.id(), temp.currentNodeId(), "BLA!", temp.primaryTerm(),
temp.primary(), ShardRoutingState.INITIALIZING, AllocationId.newRelocation(temp.allocationId()));
indexShard.updateRoutingEntry(newShardRouting, false);
assertEquals(0, indexShard.getActiveOperationsCount());
Releasable operation1;
if (randomBoolean()) {
operation1 = indexShard.acquirePrimaryOperationLock();
} else {
operation1 = indexShard.acquireReplicaOperationLock(primaryTerm);
}
assertEquals(1, indexShard.getActiveOperationsCount());
Releasable operation2;
if (randomBoolean()) {
operation2 = indexShard.acquirePrimaryOperationLock();
} else {
operation2 = indexShard.acquireReplicaOperationLock(primaryTerm);
}
assertEquals(2, indexShard.getActiveOperationsCount());
try {
indexShard.acquireReplicaOperationLock(primaryTerm - 1);
fail("you can not increment the operation counter with an older primary term");
} catch (IllegalIndexShardStateException e) {
assertThat(e.getMessage(), containsString("operation term"));
assertThat(e.getMessage(), containsString("too old"));
}
// but you can increment with a newer one..
indexShard.acquireReplicaOperationLock(primaryTerm + 1 + randomInt(20)).close();
Releasables.close(operation1, operation2);
assertEquals(0, indexShard.getActiveOperationsCount());
<<<<<<<
ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(shard, new CommonStatsFlags()),
shard.commitStats(), shard.seqNoStats());
=======
ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats());
>>>>>>>
ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats(), shard.seqNoStats());
<<<<<<<
public void testFailIfIndexNotPresentInRecoverFromStore() throws Throwable {
=======
public void testRecoverFromCleanStore() throws IOException {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test");
final IndexShard shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
if (randomBoolean()) {
client().admin().indices().prepareFlush().get();
}
ShardRouting routing = new ShardRouting(shard.routingEntry());
test.removeShard(0, "b/c simon says so");
ShardRoutingHelper.reinit(routing, UnassignedInfo.Reason.INDEX_CREATED);
IndexShard newShard = test.createShard(routing);
newShard.updateRoutingEntry(routing, false);
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,
localNode));
assertTrue(newShard.recoverFromStore(localNode));
assertEquals(0, newShard.recoveryState().getTranslog().recoveredOperations());
assertEquals(0, newShard.recoveryState().getTranslog().totalOperations());
assertEquals(0, newShard.recoveryState().getTranslog().totalOperationsOnStart());
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
routing = new ShardRouting(routing);
ShardRoutingHelper.moveToStarted(routing);
newShard.updateRoutingEntry(routing, true);
SearchResponse response = client().prepareSearch().get();
assertHitCount(response, 0);
}
public void testFailIfIndexNotPresentInRecoverFromStore() throws IOException {
>>>>>>>
public void testRecoverFromCleanStore() throws IOException {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test");
final IndexShard shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
if (randomBoolean()) {
client().admin().indices().prepareFlush().get();
}
ShardRouting routing = new ShardRouting(shard.routingEntry());
test.removeShard(0, "b/c simon says so");
ShardRoutingHelper.reinit(routing, UnassignedInfo.Reason.INDEX_CREATED);
IndexShard newShard = test.createShard(routing);
newShard.updateRoutingEntry(routing, false);
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,
localNode));
assertTrue(newShard.recoverFromStore(localNode));
assertEquals(0, newShard.recoveryState().getTranslog().recoveredOperations());
assertEquals(0, newShard.recoveryState().getTranslog().totalOperations());
assertEquals(0, newShard.recoveryState().getTranslog().totalOperationsOnStart());
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
routing = new ShardRouting(routing);
ShardRoutingHelper.moveToStarted(routing);
newShard.updateRoutingEntry(routing, true);
SearchResponse response = client().prepareSearch().get();
assertHitCount(response, 0);
}
public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception { |
<<<<<<<
import java.sql.Timestamp;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
=======
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.storage.file.FileBasedConfig;
import org.eclipse.jgit.util.FS;
import org.eclipse.jgit.util.SystemReader;
>>>>>>>
import java.sql.Timestamp;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.storage.file.FileBasedConfig;
import org.eclipse.jgit.util.FS;
import org.eclipse.jgit.util.SystemReader;
<<<<<<<
DateTimeUtils.setCurrentMillisProvider(
new MillisProvider() {
@Override
public long getMillis() {
return clockMs.getAndAdd(clockStepMs);
}
});
}
/**
* Set the clock to a specific timestamp.
*
* @param ts time to set
*/
public static synchronized void setClock(Timestamp ts) {
checkState(clockMs != null, "call resetWithClockStep first");
clockMs.set(ts.getTime());
}
/**
* Increment the clock once by a given amount.
*
* @param clockStep amount to increment clock by.
* @param clockStepUnit time unit for {@code clockStep}.
*/
public static synchronized void incrementClock(long clockStep, TimeUnit clockStepUnit) {
checkState(clockMs != null, "call resetWithClockStep first");
clockMs.addAndGet(clockStepUnit.toMillis(clockStep));
=======
DateTimeUtils.setCurrentMillisProvider(new MillisProvider() {
@Override
public long getMillis() {
return clockMs.getAndAdd(clockStepMs);
}
});
SystemReader.setInstance(null);
final SystemReader defaultReader = SystemReader.getInstance();
SystemReader r = new SystemReader() {
@Override
public String getHostname() {
return defaultReader.getHostname();
}
@Override
public String getenv(String variable) {
return defaultReader.getenv(variable);
}
@Override
public String getProperty(String key) {
return defaultReader.getProperty(key);
}
@Override
public FileBasedConfig openUserConfig(Config parent, FS fs) {
return defaultReader.openUserConfig(parent, fs);
}
@Override
public FileBasedConfig openSystemConfig(Config parent, FS fs) {
return defaultReader.openSystemConfig(parent, fs);
}
@Override
public long getCurrentTime() {
return clockMs.getAndAdd(clockStepMs);
}
@Override
public int getTimezone(long when) {
return defaultReader.getTimezone(when);
}
};
SystemReader.setInstance(r);
>>>>>>>
DateTimeUtils.setCurrentMillisProvider(
new MillisProvider() {
@Override
public long getMillis() {
return clockMs.getAndAdd(clockStepMs);
}
});
SystemReader.setInstance(null);
final SystemReader defaultReader = SystemReader.getInstance();
SystemReader r =
new SystemReader() {
@Override
public String getHostname() {
return defaultReader.getHostname();
}
@Override
public String getenv(String variable) {
return defaultReader.getenv(variable);
}
@Override
public String getProperty(String key) {
return defaultReader.getProperty(key);
}
@Override
public FileBasedConfig openUserConfig(Config parent, FS fs) {
return defaultReader.openUserConfig(parent, fs);
}
@Override
public FileBasedConfig openSystemConfig(Config parent, FS fs) {
return defaultReader.openSystemConfig(parent, fs);
}
@Override
public long getCurrentTime() {
return clockMs.getAndAdd(clockStepMs);
}
@Override
public int getTimezone(long when) {
return defaultReader.getTimezone(when);
}
};
SystemReader.setInstance(r);
}
/**
* Set the clock to a specific timestamp.
*
* @param ts time to set
*/
public static synchronized void setClock(Timestamp ts) {
checkState(clockMs != null, "call resetWithClockStep first");
clockMs.set(ts.getTime());
}
/**
* Increment the clock once by a given amount.
*
* @param clockStep amount to increment clock by.
* @param clockStepUnit time unit for {@code clockStep}.
*/
public static synchronized void incrementClock(long clockStep, TimeUnit clockStepUnit) {
checkState(clockMs != null, "call resetWithClockStep first");
clockMs.addAndGet(clockStepUnit.toMillis(clockStep)); |
<<<<<<<
this.globalCheckpointSyncer = globalCheckpointSyncer;
this.indexAnalyzers = registry.build(indexSettings);
=======
>>>>>>>
this.globalCheckpointSyncer = globalCheckpointSyncer;
<<<<<<<
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, indexAnalyzers, refreshTask, fsyncTask, globalCheckpointTask);
=======
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, mapperService, refreshTask, fsyncTask);
>>>>>>>
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, mapperService, refreshTask, fsyncTask, globalCheckpointTask); |
<<<<<<<
private TaskThunk(CommandRunnable thunk) {
maskSensitiveParameters();
=======
private TaskThunk(final CommandRunnable thunk) {
>>>>>>>
private TaskThunk(CommandRunnable thunk) { |
<<<<<<<
@Source("listAdd.png")
public ImageResource listAdd();
@Source("dashboard.png")
public ImageResource dashboard();
=======
>>>>>>>
@Source("listAdd.png")
public ImageResource listAdd(); |
<<<<<<<
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(JavaScriptScriptEngineService.class, JavaScriptScriptEngineService.NAME));
=======
@Override
public String name() {
return "lang-javascript";
}
@Override
public String description() {
return "JavaScript plugin allowing to add javascript scripting support";
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new JavaScriptScriptEngineService(settings);
>>>>>>>
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new JavaScriptScriptEngineService(settings); |
<<<<<<<
clearRollupState();
clearILMState();
=======
>>>>>>>
clearILMState();
<<<<<<<
protected boolean isRollupTest() {
String testName = getTestName();
return testName != null && (testName.contains("=rollup/") || testName.contains("=rollup\\"));
}
protected boolean isILMTest() {
String testName = getTestName();
return testName != null && (testName.contains("=ilm/") || testName.contains("=ilm\\"))
|| (testName.contains("/ilm/") || testName.contains("\\ilm\\"));
}
=======
>>>>>>>
protected boolean isILMTest() {
String testName = getTestName();
return testName != null && (testName.contains("=ilm/") || testName.contains("=ilm\\"))
|| (testName.contains("/ilm/") || testName.contains("\\ilm\\"));
} |
<<<<<<<
/**
* Create a new SearchSourceBuilder with attributes set by an xContent.
*/
public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context) throws IOException {
=======
public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers)
throws IOException {
>>>>>>>
/**
* Create a new SearchSourceBuilder with attributes set by an xContent.
*/
public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers)
throws IOException {
<<<<<<<
aggregations = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
currentFieldName = parser.currentName();
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder();
xContentBuilder.startObject();
xContentBuilder.field(currentFieldName);
xContentBuilder.copyCurrentStructure(parser);
xContentBuilder.endObject();
aggregations.add(xContentBuilder.bytes());
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());
}
}
=======
builder.aggregations = aggParsers.parseAggregators(parser, context);
>>>>>>>
aggregations = aggParsers.parseAggregators(parser, context); |
<<<<<<<
import org.elasticsearch.common.network.NetworkModule;
=======
import org.elasticsearch.common.settings.ClusterSettings;
>>>>>>>
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.ClusterSettings; |
<<<<<<<
assertCompileAccepted(lang, script, scriptType, scriptContext);
=======
assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders);
>>>>>>>
assertCompileAccepted(lang, script, scriptType, scriptContext);
<<<<<<<
assertCompileRejected(lang, script, scriptType, scriptContext);
=======
assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders);
>>>>>>>
assertCompileRejected(lang, script, scriptType, scriptContext);
<<<<<<<
if (scriptEngineService.sandboxed()) {
assertCompileAccepted(lang, script, scriptType, scriptContext);
=======
if (scriptEngineService.isSandboxed()) {
assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders);
>>>>>>>
if (scriptEngineService.isSandboxed()) {
assertCompileAccepted(lang, script, scriptType, scriptContext);
<<<<<<<
assertCompileRejected(lang, script, scriptType, scriptContext);
=======
assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders);
>>>>>>>
assertCompileRejected(lang, script, scriptType, scriptContext);
<<<<<<<
private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) {
assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, Collections.emptyMap()), notNullValue());
=======
private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, HasContextAndHeaders contextAndHeaders) {
assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders, Collections.emptyMap()), notNullValue());
>>>>>>>
private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) {
assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, Collections.emptyMap()), notNullValue()); |
<<<<<<<
import org.elasticsearch.license.plugin.core.XPackLicenseState;
=======
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.xpack.security.SecurityLicenseState;
>>>>>>>
import org.elasticsearch.license.plugin.core.XPackLicenseState;
import org.elasticsearch.script.ScriptService; |
<<<<<<<
public synchronized AmazonS3 client() {
return cachedWrapper(super.client());
}
@Override
public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key) {
return cachedWrapper(super.client(endpoint, protocol, region, account, key));
}
@Override
public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries, Boolean pathStyleAccess) {
return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries, pathStyleAccess));
=======
public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries) {
return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries));
>>>>>>>
public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, Boolean pathStyleAccess) {
return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries, pathStyleAccess)); |
<<<<<<<
public void testGetEngineFactory() throws IOException {
final IndicesService indicesService = getIndicesService();
final Boolean[] values = new Boolean[] { true, false, null };
for (final Boolean value : values) {
final String indexName = "foo-" + value;
final Index index = new Index(indexName, UUIDs.randomBase64UUID());
final Settings.Builder builder = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID());
if (value != null) {
builder.put(FooEnginePlugin.FOO_INDEX_SETTING.getKey(), value);
}
final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName())
.settings(builder.build())
.numberOfShards(1)
.numberOfReplicas(0)
.build();
final IndexService indexService = indicesService.createIndex(indexMetaData, Collections.emptyList());
if (value != null && value) {
assertThat(indexService.getEngineFactory(), instanceOf(FooEnginePlugin.FooEngineFactory.class));
} else {
assertThat(indexService.getEngineFactory(), instanceOf(InternalEngineFactory.class));
}
}
}
public void testConflictingEngineFactories() throws IOException {
final String indexName = "foobar";
final Index index = new Index(indexName, UUIDs.randomBase64UUID());
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
.put(FooEnginePlugin.FOO_INDEX_SETTING.getKey(), true)
.put(BarEnginePlugin.BAR_INDEX_SETTING.getKey(), true)
.build();
final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName())
.settings(settings)
.numberOfShards(1)
.numberOfReplicas(0)
.build();
final IndicesService indicesService = getIndicesService();
final IllegalStateException e =
expectThrows(IllegalStateException.class, () -> indicesService.createIndex(indexMetaData, Collections.emptyList()));
final String pattern =
".*multiple plugins provided engine factories for \\[foobar/.*\\]: "
+ "\\[.*FooEnginePlugin/.*FooEngineFactory\\],\\[.*BarEnginePlugin/.*BarEngineFactory\\].*";
assertThat(e, hasToString(new RegexMatcher(pattern)));
}
=======
public void testIsMetaDataField() {
IndicesService indicesService = getIndicesService();
assertFalse(indicesService.isMetaDataField(randomAlphaOfLengthBetween(10, 15)));
for (String builtIn : IndicesModule.getBuiltInMetaDataFields()) {
assertTrue(indicesService.isMetaDataField(builtIn));
}
}
>>>>>>>
public void testIsMetaDataField() {
IndicesService indicesService = getIndicesService();
assertFalse(indicesService.isMetaDataField(randomAlphaOfLengthBetween(10, 15)));
for (String builtIn : IndicesModule.getBuiltInMetaDataFields()) {
assertTrue(indicesService.isMetaDataField(builtIn));
}
}
public void testGetEngineFactory() throws IOException {
final IndicesService indicesService = getIndicesService();
final Boolean[] values = new Boolean[] { true, false, null };
for (final Boolean value : values) {
final String indexName = "foo-" + value;
final Index index = new Index(indexName, UUIDs.randomBase64UUID());
final Settings.Builder builder = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID());
if (value != null) {
builder.put(FooEnginePlugin.FOO_INDEX_SETTING.getKey(), value);
}
final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName())
.settings(builder.build())
.numberOfShards(1)
.numberOfReplicas(0)
.build();
final IndexService indexService = indicesService.createIndex(indexMetaData, Collections.emptyList());
if (value != null && value) {
assertThat(indexService.getEngineFactory(), instanceOf(FooEnginePlugin.FooEngineFactory.class));
} else {
assertThat(indexService.getEngineFactory(), instanceOf(InternalEngineFactory.class));
}
}
}
public void testConflictingEngineFactories() throws IOException {
final String indexName = "foobar";
final Index index = new Index(indexName, UUIDs.randomBase64UUID());
final Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
.put(FooEnginePlugin.FOO_INDEX_SETTING.getKey(), true)
.put(BarEnginePlugin.BAR_INDEX_SETTING.getKey(), true)
.build();
final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName())
.settings(settings)
.numberOfShards(1)
.numberOfReplicas(0)
.build();
final IndicesService indicesService = getIndicesService();
final IllegalStateException e =
expectThrows(IllegalStateException.class, () -> indicesService.createIndex(indexMetaData, Collections.emptyList()));
final String pattern =
".*multiple plugins provided engine factories for \\[foobar/.*\\]: "
+ "\\[.*FooEnginePlugin/.*FooEngineFactory\\],\\[.*BarEnginePlugin/.*BarEngineFactory\\].*";
assertThat(e, hasToString(new RegexMatcher(pattern)));
} |
<<<<<<<
new EnvironmentModule(new Environment(settings)), settingsModule,
new ThreadPoolModule(threadPool),
scriptModule, new IndicesModule(namedWriteableRegistry) {
=======
new EnvironmentModule(new Environment(settings), threadPool), settingsModule,
scriptModule, new IndicesModule() {
>>>>>>>
new EnvironmentModule(new Environment(settings), threadPool), settingsModule,
scriptModule, new IndicesModule(namedWriteableRegistry) { |
<<<<<<<
if (aggFactory != null) {
throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: ["
+ aggFactory.type + "] and [" + fieldName + "]");
=======
if (factory != null) {
throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: ["
+ factory.type + "] and [" + fieldName + "]", parser.getTokenLocation());
>>>>>>>
if (aggFactory != null) {
throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: ["
+ aggFactory.type + "] and [" + fieldName + "]", parser.getTokenLocation());
<<<<<<<
Reducer.Parser reducerParser = reducer(fieldName);
if (reducerParser == null) {
throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in ["
+ aggregationName + "]");
} else {
reducerFactory = reducerParser.parse(aggregationName, parser, context);
}
} else {
aggFactory = aggregatorParser.parse(aggregationName, parser, context);
=======
throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + aggregationName
+ "]", parser.getTokenLocation());
>>>>>>>
Reducer.Parser reducerParser = reducer(fieldName);
if (reducerParser == null) {
throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in ["
+ aggregationName + "]", parser.getTokenLocation());
} else {
reducerFactory = reducerParser.parse(aggregationName, parser, context);
}
} else {
aggFactory = aggregatorParser.parse(aggregationName, parser, context);
<<<<<<<
if (aggFactory == null && reducerFactory == null) {
throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]");
} else if (aggFactory != null) {
assert reducerFactory == null;
if (metaData != null) {
aggFactory.setMetaData(metaData);
}
=======
if (factory == null) {
throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]",
parser.getTokenLocation());
}
>>>>>>>
if (aggFactory == null && reducerFactory == null) {
throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]",
parser.getTokenLocation());
} else if (aggFactory != null) {
assert reducerFactory == null;
if (metaData != null) {
aggFactory.setMetaData(metaData);
}
if (subFactories != null) {
aggFactory.subFactories(subFactories);
}
if (level == 0) {
aggFactory.validate();
}
factories.addAggregator(aggFactory);
} else {
assert reducerFactory != null; |
<<<<<<<
=======
import org.apache.lucene.queries.BoostingQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.elasticsearch.common.ParsingException;
<<<<<<<
public BoostingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public BoostingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { |
<<<<<<<
import org.elasticsearch.common.ParseField;
=======
import org.apache.lucene.search.Query;
import org.elasticsearch.Version;
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
<<<<<<<
public GeoPolygonQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public GeoPolygonQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
<<<<<<<
GeoPolygonQueryBuilder builder = new GeoPolygonQueryBuilder(fieldName, shell);
if (coerce != null) {
builder.coerce(coerce);
=======
if (shell.isEmpty()) {
throw new ParsingException(parseContext, "no points defined for geo_polygon query");
} else {
if (shell.size() < 3) {
throw new ParsingException(parseContext, "too few points defined for geo_polygon query");
}
GeoPoint start = shell.get(0);
if (!start.equals(shell.get(shell.size() - 1))) {
shell.add(start);
}
if (shell.size() < 4) {
throw new ParsingException(parseContext, "too few points defined for geo_polygon query");
}
}
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
if (!indexCreatedBeforeV2_0 && !ignoreMalformed) {
for (GeoPoint point : shell) {
if (point.lat() > 90.0 || point.lat() < -90.0) {
throw new ParsingException(parseContext, "illegal latitude value [{}] for [{}]", point.lat(), NAME);
}
if (point.lon() > 180.0 || point.lon() < -180) {
throw new ParsingException(parseContext, "illegal longitude value [{}] for [{}]", point.lon(), NAME);
}
}
>>>>>>>
GeoPolygonQueryBuilder builder = new GeoPolygonQueryBuilder(fieldName, shell);
if (coerce != null) {
builder.coerce(coerce); |
<<<<<<<
IndexService dummyIndexService = indicesService.createIndex(nodeServicesProvider, tmpIndexMetadata, Collections.emptyList(), shardId -> {});
=======
IndexService dummyIndexService = indicesService.createIndex(tmpIndexMetadata, Collections.emptyList());
>>>>>>>
IndexService dummyIndexService = indicesService.createIndex(tmpIndexMetadata, Collections.emptyList(), shardId -> {}); |
<<<<<<<
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
IndexService indexService, IndexShard indexShard, ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout,
FetchPhase fetchPhase) {
super(parseFieldMatcher, request);
=======
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard,
ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher,
TimeValue timeout
) {
super(parseFieldMatcher);
>>>>>>>
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
IndexService indexService, IndexShard indexShard, ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout,
FetchPhase fetchPhase) {
super(parseFieldMatcher); |
<<<<<<<
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.*;
=======
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
>>>>>>>
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer; |
<<<<<<<
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
=======
>>>>>>>
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
<<<<<<<
import org.junit.Before;
=======
import org.elasticsearch.test.InternalAggregationTestCase;
>>>>>>>
import org.junit.Before;
import org.elasticsearch.test.InternalAggregationTestCase; |
<<<<<<<
import java.util.Collections;
=======
import java.util.ArrayList;
>>>>>>>
import java.util.ArrayList;
import java.util.Collections; |
<<<<<<<
import org.elasticsearch.common.util.concurrent.ThreadContext;
=======
import org.elasticsearch.env.Environment;
>>>>>>>
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
<<<<<<<
.put("path.home", createTempDir().toString())
.put("name", "test-" + getTestName())
=======
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
>>>>>>>
.put("path.home", createTempDir().toString())
.put("name", "test-" + getTestName())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) |
<<<<<<<
pendingShards.incrementAndGet(); // increase by 1 until we finish all primary coordination
=======
pendingActions.incrementAndGet();
>>>>>>>
pendingActions.incrementAndGet(); // increase by 1 until we finish all primary coordination
<<<<<<<
pendingShards.incrementAndGet();
replicasProxy.performOn(shard, replicaRequest, new ActionListener<ReplicaResponse>() {
=======
pendingActions.incrementAndGet();
replicasProxy.performOn(shard, replicaRequest, new ActionListener<TransportResponse.Empty>() {
>>>>>>>
pendingActions.incrementAndGet();
replicasProxy.performOn(shard, replicaRequest, new ActionListener<ReplicaResponse>() { |
<<<<<<<
import com.google.gerrit.server.index.group.GroupIndexer;
import com.google.gerrit.server.notedb.AbstractChangeNotes;
=======
>>>>>>>
import com.google.gerrit.server.notedb.AbstractChangeNotes;
<<<<<<<
@Inject private ChangeIndexCollection changeIndexes;
@Inject private EventRecorder.Factory eventRecorderFactory;
@Inject private GroupIndexer groupIndexer;
@Inject private Groups groups;
@Inject private InProcessProtocol inProcessProtocol;
@Inject private ProjectIndexCollection projectIndexes;
@Inject private ProjectOperations projectOperations;
@Inject private RequestScopeOperations requestScopeOperations;
=======
>>>>>>>
@Inject private ChangeIndexCollection changeIndexes;
@Inject private EventRecorder.Factory eventRecorderFactory;
@Inject private InProcessProtocol inProcessProtocol;
@Inject private ProjectIndexCollection projectIndexes;
@Inject private ProjectOperations projectOperations;
@Inject private RequestScopeOperations requestScopeOperations;
<<<<<<<
toClose = Collections.synchronizedList(new ArrayList<>());
// All groups which were added during the server start (e.g. in SchemaCreatorImpl) aren't
// contained in the instance of the group index which is available here and in tests. There are
// two reasons:
// 1) No group index is available in SchemaCreatorImpl when using an in-memory database.
// (This could be fixed by using the IndexManagerOnInit in InMemoryTestingDatabaseModule similar
// to how BaseInit uses it.)
// 2) During the on-init part of the server start, we use another instance of the index than
// later on. As test indexes are non-permanent, closing an instance and opening another one
// removes all indexed data.
// As a workaround, we simply reindex all available groups here.
reindexAllGroups();
=======
toClose = Collections.synchronizedList(new ArrayList<Repository>());
db = reviewDbProvider.open();
>>>>>>>
toClose = Collections.synchronizedList(new ArrayList<>()); |
<<<<<<<
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if (!scriptParameterParser.token(currentFieldName, token, parser)) {
=======
} else if (!scriptParameterParser.token(currentFieldName, token, parser, parseContext.parseFieldMatcher())) {
>>>>>>>
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if (!scriptParameterParser.token(currentFieldName, token, parser, parseContext.parseFieldMatcher())) { |
<<<<<<<
import com.google.gerrit.common.data.GroupDetail;
=======
import com.google.gerrit.common.Nullable;
>>>>>>>
<<<<<<<
import com.google.gerrit.server.account.VersionedAccountDestinations;
import com.google.gerrit.server.account.VersionedAccountQueries;
import com.google.gerrit.server.account.GroupCache;
import com.google.gerrit.server.account.GroupDetailFactory;
=======
>>>>>>>
import com.google.gerrit.server.account.VersionedAccountDestinations;
import com.google.gerrit.server.account.VersionedAccountQueries;
<<<<<<<
final boolean allowsDrafts;
final ChangeIndex index;
=======
>>>>>>>
final ChangeIndex index;
<<<<<<<
allProjectsName, allUsersName, patchListCache, repoManager,
projectCache, groupCache, listChildProjects, submitStrategyFactory,
conflictsCache, trackingFooters,
indexes != null ? indexes.getSearchIndex() : null,
indexConfig,
=======
allProjectsName, patchListCache, repoManager, projectCache,
listChildProjects, indexes, submitStrategyFactory,
conflictsCache, trackingFooters, indexConfig, listMembers,
>>>>>>>
allProjectsName, allUsersName, patchListCache, repoManager,
projectCache, listChildProjects, submitStrategyFactory,
conflictsCache, trackingFooters,
indexes != null ? indexes.getSearchIndex() : null,
indexConfig, listMembers,
<<<<<<<
this.allowsDrafts = allowsDrafts;
this.index = index;
=======
>>>>>>>
this.index = index;
<<<<<<<
allProjectsName, allUsersName, patchListCache, repoManager,
projectCache, groupCache, listChildProjects, submitStrategyFactory,
conflictsCache, trackingFooters, index, indexConfig, allowsDrafts);
=======
allProjectsName, patchListCache, repoManager, projectCache,
listChildProjects, indexes, submitStrategyFactory, conflictsCache,
trackingFooters, indexConfig, listMembers, allowsDrafts);
>>>>>>>
allProjectsName, allUsersName, patchListCache, repoManager,
projectCache, listChildProjects, submitStrategyFactory,
conflictsCache, trackingFooters, index, indexConfig, listMembers,
allowsDrafts); |
<<<<<<<
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId(), request)) {
=======
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId(), request.primaryTerm())) {
>>>>>>>
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId(), request.primaryTerm())) {
<<<<<<<
IndexShardReference ref = new IndexShardReferenceImpl(indexShard, true);
assert addShardReference(ref, "primary: " + request.toString() + " " + ref.routingEntry());
return ref;
=======
return IndexShardReferenceImpl.createOnPrimary(indexShard);
>>>>>>>
IndexShardReference ref = IndexShardReferenceImpl.createOnPrimary(indexShard);
assert addShardReference(ref, "primary: " + request.toString() + " " + ref.routingEntry());
return ref; |
<<<<<<<
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null,
new InternalEngineFactory(), () -> {});
=======
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, null, () -> {},
EMPTY_EVENT_LISTENER);
>>>>>>>
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null,
new InternalEngineFactory(), () -> {},
EMPTY_EVENT_LISTENER); |
<<<<<<<
import java.util.Collection;
=======
import java.util.ArrayList;
>>>>>>>
import java.util.Collection;
import java.util.ArrayList; |
<<<<<<<
=======
import org.apache.lucene.search.Query;
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.elasticsearch.common.ParsingException;
<<<<<<<
public SimpleQueryStringBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public SimpleQueryStringBuilder fromXContent(QueryParseContext parseContext) throws IOException {
<<<<<<<
throw new QueryParsingException(parseContext, "[" + SimpleQueryStringBuilder.NAME + "] query does not support [" + currentFieldName + "]");
=======
throw new ParsingException(parseContext, "[" + NAME + "] query does not support [" + currentFieldName + "]");
>>>>>>>
throw new ParsingException(parseContext, "[" + SimpleQueryStringBuilder.NAME + "] query does not support [" + currentFieldName + "]");
<<<<<<<
analyzerName = parser.text();
=======
analyzer = parseContext.analysisService().analyzer(parser.text());
if (analyzer == null) {
throw new ParsingException(parseContext, "[" + NAME + "] analyzer [" + parser.text() + "] not found");
}
>>>>>>>
analyzerName = parser.text();
<<<<<<<
defaultOperator = Operator.fromString(parser.text());
=======
String op = parser.text();
if ("or".equalsIgnoreCase(op)) {
defaultOperator = BooleanClause.Occur.SHOULD;
} else if ("and".equalsIgnoreCase(op)) {
defaultOperator = BooleanClause.Occur.MUST;
} else {
throw new ParsingException(parseContext, "[" + NAME + "] default operator [" + op + "] is not allowed");
}
>>>>>>>
defaultOperator = Operator.fromString(parser.text());
<<<<<<<
throw new QueryParsingException(parseContext, "[" + SimpleQueryStringBuilder.NAME + "] unsupported field [" + parser.currentName() + "]");
=======
throw new ParsingException(parseContext, "[" + NAME + "] unsupported field [" + parser.currentName() + "]");
>>>>>>>
throw new ParsingException(parseContext, "[" + SimpleQueryStringBuilder.NAME + "] unsupported field [" + parser.currentName() + "]");
<<<<<<<
throw new QueryParsingException(parseContext, "[" + SimpleQueryStringBuilder.NAME + "] query text missing");
=======
throw new ParsingException(parseContext, "[" + NAME + "] query text missing");
>>>>>>>
throw new ParsingException(parseContext, "[" + SimpleQueryStringBuilder.NAME + "] query text missing"); |
<<<<<<<
public static final String NAME = "script";
private final String script;
=======
private Script script;
@Deprecated
private String scriptString;
>>>>>>>
private Script script;
public static final String NAME = "script";
@Deprecated
private String scriptString;
<<<<<<<
static final ScriptQueryBuilder PROTOTYPE = new ScriptQueryBuilder(null);
public ScriptQueryBuilder(String script) {
=======
public ScriptQueryBuilder(Script script) {
>>>>>>>
static final ScriptQueryBuilder PROTOTYPE = new ScriptQueryBuilder((Script) null);
public ScriptQueryBuilder(Script script) {
<<<<<<<
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.field("script", script);
if (this.params != null) {
builder.field("params", this.params);
}
if (this.lang != null) {
builder.field("lang", lang);
=======
protected void doXContent(XContentBuilder builder, Params builderParams) throws IOException {
builder.startObject(ScriptQueryParser.NAME);
if (script != null) {
builder.field(ScriptField.SCRIPT.getPreferredName(), script);
} else {
if (this.scriptString != null) {
builder.field("script", scriptString);
}
if (this.params != null) {
builder.field("params", this.params);
}
if (this.lang != null) {
builder.field("lang", lang);
}
>>>>>>>
protected void doXContent(XContentBuilder builder, Params builderParams) throws IOException {
builder.startObject(NAME);
if (script != null) {
builder.field(ScriptField.SCRIPT.getPreferredName(), script);
} else {
if (this.scriptString != null) {
builder.field("script", scriptString);
}
if (this.params != null) {
builder.field("params", this.params);
}
if (this.lang != null) {
builder.field("lang", lang);
} |
<<<<<<<
assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder()
.put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10, TimeUnit.SECONDS)));
=======
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
.put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10)));
>>>>>>>
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
.put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10, TimeUnit.SECONDS)));
<<<<<<<
assertAcked(prepareCreate("test-idx", 2, ImmutableSettings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5, TimeUnit.SECONDS)));
=======
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5)));
>>>>>>>
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5, TimeUnit.SECONDS))); |
<<<<<<<
IndexService indexService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService,
Query aliasFilter, ParseFieldMatcher parseFieldMatcher, FetchPhase fetchPhase) {
super(parseFieldMatcher, request);
=======
IndexService indexService, PageCacheRecycler pageCacheRecycler,
BigArrays bigArrays, ScriptService scriptService, Query aliasFilter, ParseFieldMatcher parseFieldMatcher) {
super(parseFieldMatcher);
>>>>>>>
IndexService indexService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService,
Query aliasFilter, ParseFieldMatcher parseFieldMatcher, FetchPhase fetchPhase) {
super(parseFieldMatcher); |
<<<<<<<
=======
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentLocation;
>>>>>>>
import org.elasticsearch.common.util.set.Sets;
<<<<<<<
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
=======
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
>>>>>>>
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer; |
<<<<<<<
public QueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public QueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
<<<<<<<
throw new QueryParsingException(parseContext, "no value specified for fuzzy query");
=======
throw new ParsingException(parseContext, "No value specified for fuzzy query");
}
Query query = null;
MappedFieldType fieldType = parseContext.fieldMapper(fieldName);
if (fieldType != null) {
query = fieldType.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions);
}
if (query == null) {
int maxEdits = fuzziness.asDistance(BytesRefs.toString(value));
query = new FuzzyQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), maxEdits, prefixLength, maxExpansions, transpositions);
>>>>>>>
throw new ParsingException(parseContext, "no value specified for fuzzy query"); |
<<<<<<<
import static com.google.gerrit.server.project.testing.TestLabels.label;
import static com.google.gerrit.server.project.testing.TestLabels.value;
=======
import static com.google.gerrit.reviewdb.client.Patch.COMMIT_MSG;
import static com.google.gerrit.reviewdb.client.Patch.MERGE_LIST;
import static com.google.gerrit.server.group.SystemGroupBackend.ANONYMOUS_USERS;
import static com.google.gerrit.server.group.SystemGroupBackend.REGISTERED_USERS;
import static com.google.gerrit.server.project.testing.Util.category;
import static com.google.gerrit.server.project.testing.Util.value;
>>>>>>>
import static com.google.gerrit.server.group.SystemGroupBackend.ANONYMOUS_USERS;
import static com.google.gerrit.server.group.SystemGroupBackend.REGISTERED_USERS;
import static com.google.gerrit.server.project.testing.TestLabels.label;
import static com.google.gerrit.server.project.testing.TestLabels.value;
<<<<<<<
=======
protected void deny(String ref, String permission, AccountGroup.UUID id) throws Exception {
deny(project, ref, permission, id);
}
protected void deny(Project.NameKey p, String ref, String permission, AccountGroup.UUID id)
throws Exception {
try (ProjectConfigUpdate u = updateProject(p)) {
Util.deny(u.getConfig(), permission, id, ref);
u.save();
}
}
protected PermissionRule block(String ref, String permission, AccountGroup.UUID id)
throws Exception {
return block(project, ref, permission, id);
}
protected PermissionRule block(
Project.NameKey project, String ref, String permission, AccountGroup.UUID id)
throws Exception {
try (ProjectConfigUpdate u = updateProject(project)) {
PermissionRule rule = Util.block(u.getConfig(), permission, id, ref);
u.save();
return rule;
}
}
protected void blockLabel(
String label, int min, int max, AccountGroup.UUID id, String ref, Project.NameKey project)
throws Exception {
try (ProjectConfigUpdate u = updateProject(project)) {
Util.block(u.getConfig(), Permission.LABEL + label, min, max, id, ref);
u.save();
}
}
protected void grant(Project.NameKey project, String ref, String permission)
throws RepositoryNotFoundException, IOException, ConfigInvalidException {
grant(project, ref, permission, false);
}
protected void grant(Project.NameKey project, String ref, String permission, boolean force)
throws RepositoryNotFoundException, IOException, ConfigInvalidException {
grant(project, ref, permission, force, adminGroupUuid());
}
protected void grant(
Project.NameKey project,
String ref,
String permission,
boolean force,
AccountGroup.UUID groupUUID)
throws RepositoryNotFoundException, IOException, ConfigInvalidException {
try (MetaDataUpdate md = metaDataUpdateFactory.create(project)) {
md.setMessage(String.format("Grant %s on %s", permission, ref));
ProjectConfig config = projectConfigFactory.read(md);
AccessSection s = config.getAccessSection(ref, true);
Permission p = s.getPermission(permission, true);
PermissionRule rule = Util.newRule(config, groupUUID);
rule.setForce(force);
p.add(rule);
config.commit(md);
projectCache.evict(config.getProject());
}
}
protected void grantLabel(
String label,
int min,
int max,
Project.NameKey project,
String ref,
boolean force,
AccountGroup.UUID groupUUID,
boolean exclusive)
throws RepositoryNotFoundException, IOException, ConfigInvalidException {
String permission = Permission.LABEL + label;
try (MetaDataUpdate md = metaDataUpdateFactory.create(project)) {
md.setMessage(String.format("Grant %s on %s", permission, ref));
ProjectConfig config = projectConfigFactory.read(md);
AccessSection s = config.getAccessSection(ref, true);
Permission p = s.getPermission(permission, true);
p.setExclusiveGroup(exclusive);
PermissionRule rule = Util.newRule(config, groupUUID);
rule.setForce(force);
rule.setMin(min);
rule.setMax(max);
p.add(rule);
config.commit(md);
projectCache.evict(config.getProject());
}
}
protected void removePermission(Project.NameKey project, String ref, String permission)
throws IOException, ConfigInvalidException {
try (MetaDataUpdate md = metaDataUpdateFactory.create(project)) {
md.setMessage(String.format("Remove %s on %s", permission, ref));
ProjectConfig config = projectConfigFactory.read(md);
AccessSection s = config.getAccessSection(ref, true);
Permission p = s.getPermission(permission, true);
p.clearRules();
config.commit(md);
projectCache.evict(config.getProject());
}
}
protected void blockRead(String ref) throws Exception {
block(ref, Permission.READ, REGISTERED_USERS);
}
protected void blockAnonymousRead() throws Exception {
AccountGroup.UUID anonymous = systemGroupBackend.getGroup(ANONYMOUS_USERS).getUUID();
AccountGroup.UUID registered = systemGroupBackend.getGroup(REGISTERED_USERS).getUUID();
String allRefs = RefNames.REFS + "*";
try (ProjectConfigUpdate u = updateProject(project)) {
Util.block(u.getConfig(), Permission.READ, anonymous, allRefs);
Util.allow(u.getConfig(), Permission.READ, registered, allRefs);
u.save();
}
}
>>>>>>>
protected void blockAnonymousRead() throws Exception {
String allRefs = RefNames.REFS + "*";
projectOperations
.project(project)
.forUpdate()
.add(block(Permission.READ).ref(allRefs).group(ANONYMOUS_USERS))
.add(allow(Permission.READ).ref(allRefs).group(REGISTERED_USERS))
.update();
} |
<<<<<<<
public static class ExtractFieldScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(ExtractFieldScriptEngine.class, ExtractFieldScriptEngine.NAME, true));
=======
public static class ExtractFieldScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public String name() {
return ExtractFieldScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + SumIT.class;
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ExtractFieldScriptEngine();
>>>>>>>
public static class ExtractFieldScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new ExtractFieldScriptEngine();
<<<<<<<
public static class FieldValueScriptPlugin extends Plugin {
public void onModule(ScriptModule module) {
module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(FieldValueScriptEngine.class, FieldValueScriptEngine.NAME, true));
=======
public static class FieldValueScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public String name() {
return FieldValueScriptEngine.NAME;
}
@Override
public String description() {
return "Mock script engine for " + SumIT.class;
}
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new FieldValueScriptEngine();
>>>>>>>
public static class FieldValueScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public ScriptEngineService getScriptEngineService(Settings settings) {
return new FieldValueScriptEngine(); |
<<<<<<<
import org.apache.lucene.search.FuzzyQuery;
=======
import org.apache.lucene.queries.ExtendedCommonTermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.apache.lucene.search.FuzzyQuery;
import org.elasticsearch.common.ParsingException;
<<<<<<<
public MatchQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public MatchQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
<<<<<<<
analyzer = parser.text();
=======
String analyzer = parser.text();
if (parseContext.analysisService().analyzer(analyzer) == null) {
throw new ParsingException(parseContext, "[match] analyzer [" + parser.text() + "] not found");
}
matchQuery.setAnalyzer(analyzer);
>>>>>>>
analyzer = parser.text();
<<<<<<<
operator = Operator.fromString(parser.text());
=======
String op = parser.text();
if ("or".equalsIgnoreCase(op)) {
matchQuery.setOccur(BooleanClause.Occur.SHOULD);
} else if ("and".equalsIgnoreCase(op)) {
matchQuery.setOccur(BooleanClause.Occur.MUST);
} else {
throw new ParsingException(parseContext, "text query requires operator to be either 'and' or 'or', not ["
+ op + "]");
}
>>>>>>>
operator = Operator.fromString(parser.text()); |
<<<<<<<
import org.elasticsearch.action.fieldstats.FieldStats;
=======
import org.elasticsearch.Version;
import org.elasticsearch.action.index.IndexRequest;
>>>>>>>
import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.action.index.IndexRequest;
<<<<<<<
import static org.elasticsearch.index.seqno.SequenceNumbersService.NO_OPS_PERFORMED;
/**
*
*/
=======
>>>>>>>
import static org.elasticsearch.index.seqno.SequenceNumbersService.NO_OPS_PERFORMED;
<<<<<<<
private void maybeUpdateSequenceNumber(Engine.Operation op) {
if (op.origin() == Operation.Origin.PRIMARY) {
op.updateSeqNo(seqNoService.generateSeqNo());
}
}
private static VersionValueSupplier NEW_VERSION_VALUE = (u, t, l) -> new VersionValue(u, l);
=======
private static VersionValueSupplier NEW_VERSION_VALUE = (u, t) -> new VersionValue(u);
>>>>>>>
private void maybeUpdateSequenceNumber(Engine.Operation op) {
if (op.origin() == Operation.Origin.PRIMARY) {
op.updateSeqNo(seqNoService.generateSeqNo());
}
}
private static VersionValueSupplier NEW_VERSION_VALUE = (u, t) -> new VersionValue(u);
<<<<<<<
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) return false;
maybeUpdateSequenceNumber(index);
=======
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) {
index.setCreated(false);
return;
}
>>>>>>>
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) {
index.setCreated(false);
return;
}
maybeUpdateSequenceNumber(index);
<<<<<<<
final boolean created = indexOrUpdate(index, currentVersion, versionValue);
=======
index.setCreated(deleted);
if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) {
// document does not exists, we can optimize for create
index(index, indexWriter);
} else {
update(index, indexWriter);
}
>>>>>>>
index.setCreated(deleted);
if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) {
// document does not exists, we can optimize for create
index(index, indexWriter);
} else {
update(index, indexWriter);
} |
<<<<<<<
import com.amazonaws.services.s3.AmazonS3;
=======
import com.amazonaws.Protocol;
>>>>>>>
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.Protocol;
<<<<<<<
String storageClass = repositorySettings.settings().get("storage_class", settings.get(REPOSITORY_S3.STORAGE_CLASS, null));
String cannedACL = repositorySettings.settings().get("canned_acl", settings.get(REPOSITORY_S3.CANNED_ACL, null));
Boolean pathStyleAccess = repositorySettings.settings().getAsBoolean("path_style_access", settings.getAsBoolean(REPOSITORY_S3.PATH_STYLE_ACCESS, null));
=======
String storageClass = getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING);
String cannedACL = getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING);
>>>>>>>
String storageClass = getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING);
String cannedACL = getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING);
Boolean pathStyleAccess = repositorySettings.settings().getAsBoolean("path_style_access", settings.getAsBoolean(REPOSITORY_S3.PATH_STYLE_ACCESS, null));
<<<<<<<
AmazonS3 client = s3Service.client(endpoint, protocol, region, repositorySettings.settings().get("access_key"),
repositorySettings.settings().get("secret_key"), maxRetries, pathStyleAccess);
blobStore = new S3BlobStore(settings, client, bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass);
=======
String key = getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING);
String secret = getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING);
blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, key, secret, maxRetries),
bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass);
>>>>>>>
String key = getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING);
String secret = getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING);
blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, key, secret, maxRetries),
bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass); |
<<<<<<<
public IndexResponse(ShardId shardId, String type, String id, long seqNo, long version, boolean created) {
super(shardId, type, id, seqNo, version);
this.created = created;
}
/**
* Returns true if the document was created, false if updated.
*/
public boolean isCreated() {
return this.created;
=======
public IndexResponse(ShardId shardId, String type, String id, long version, boolean created) {
super(shardId, type, id, version, created ? Result.CREATED : Result.UPDATED);
>>>>>>>
public IndexResponse(ShardId shardId, String type, String id, long seqNo, long version, boolean created) {
super(shardId, type, id, seqNo, version, created ? Result.CREATED : Result.UPDATED);
<<<<<<<
builder.append(",created=").append(created);
builder.append(",seqNo=").append(getSeqNo());
builder.append(",shards=").append(getShardInfo());
=======
builder.append(",result=").append(getResult().getLowercase());
builder.append(",shards=").append(Strings.toString(getShardInfo(), true));
>>>>>>>
builder.append(",result=").append(getResult().getLowercase());
builder.append(",seqNo=").append(getSeqNo());
builder.append(",shards=").append(Strings.toString(getShardInfo(), true)); |
<<<<<<<
ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
=======
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
>>>>>>>
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
<<<<<<<
ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", -1);
=======
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1);
>>>>>>>
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1);
<<<<<<<
ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", -1);
=======
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1);
>>>>>>>
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1);
<<<<<<<
ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", -1);
=======
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1);
>>>>>>>
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1);
<<<<<<<
ShardRouting shard = ShardRouting.newUnassigned("test", 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", -1);
=======
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1);
>>>>>>>
ShardRouting shard = ShardRouting.newUnassigned(new Index("test","_na_"), 0, null, 1, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
shard.initialize("node1", null, -1); |
<<<<<<<
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
=======
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.index.Index;
>>>>>>>
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.index.Index;
<<<<<<<
final ShardRouting initShard;
final ShardRouting startedShard;
final ShardRouting relocatingShard;
final IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder("test");
if (randomBoolean()) {
initShard = TestShardRouting.newShardRouting("test", 0, "node1", 1, true, ShardRoutingState.INITIALIZING, 1);
ShardRouting replica = TestShardRouting.newShardRouting("test", 0, null, 1, false, ShardRoutingState.UNASSIGNED, 1);
indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).addShard(replica).build());
} else {
ShardRouting primaryShard = TestShardRouting.newShardRouting("test", 0, "node2", 1, true, ShardRoutingState.STARTED, 1);
initShard = TestShardRouting.newShardRouting("test", 0, "node1", 1, false, ShardRoutingState.INITIALIZING, 1);
indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(primaryShard).addShard(initShard).build());
}
if (randomBoolean()) {
startedShard = TestShardRouting.newShardRouting("test", 1, "node2", 1, true, ShardRoutingState.STARTED, 1);
ShardRouting replica = TestShardRouting.newShardRouting("test", 1, null, 1, false, ShardRoutingState.UNASSIGNED, 1);
indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(startedShard).addShard(replica).build());
} else {
ShardRouting primaryShard = TestShardRouting.newShardRouting("test", 1, "node1", 1, true, ShardRoutingState.STARTED, 1);
startedShard = TestShardRouting.newShardRouting("test", 1, "node2", 1, false, ShardRoutingState.STARTED, 1);
indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(primaryShard).addShard(startedShard).build());
}
if (randomBoolean()) {
relocatingShard = TestShardRouting.newShardRouting("test", 2, "node1", "node2", 1, true, ShardRoutingState.RELOCATING, 1);
ShardRouting replica = TestShardRouting.newShardRouting("test", 2, null, 1, false, ShardRoutingState.UNASSIGNED, 1);
indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).addShard(replica).build());
} else {
ShardRouting primaryShard = TestShardRouting.newShardRouting("test", 2, "node3", 1, true, ShardRoutingState.STARTED, 1);
relocatingShard = TestShardRouting.newShardRouting("test", 2, "node1", "node2", 1, false, ShardRoutingState.RELOCATING, 1);
indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId())
.addShard(primaryShard).addShard(relocatingShard).build());
}
stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTable).build());
=======
final ShardRouting initShard = TestShardRouting.newShardRouting(index, 0, "node1", true, ShardRoutingState.INITIALIZING);
final ShardRouting startedShard = TestShardRouting.newShardRouting(index, 1, "node2", true, ShardRoutingState.STARTED);
final ShardRouting relocatingShard = TestShardRouting.newShardRouting(index, 2, "node1", "node2", true, ShardRoutingState.RELOCATING);
stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index)
.addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).build())
.addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(startedShard).build())
.addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).build())).build());
>>>>>>>
final ShardRouting initShard = TestShardRouting.newShardRouting(index, 0, "node1", 1, true, ShardRoutingState.INITIALIZING);
final ShardRouting startedShard = TestShardRouting.newShardRouting(index, 1, "node2", 1, true, ShardRoutingState.STARTED);
final ShardRouting relocatingShard = TestShardRouting.newShardRouting(index, 2, "node1", "node2", 1, true, ShardRoutingState.RELOCATING);
stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index)
.addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).build())
.addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(startedShard).build())
.addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).build())).build());
<<<<<<<
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(),
initShard.primaryTerm(), initShard.primary(),
ShardRoutingState.INITIALIZING, initShard.allocationId(), randomInt())), false);
=======
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(),
ShardRoutingState.INITIALIZING, initShard.allocationId())), false);
>>>>>>>
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(),
initShard.primaryTerm(), initShard.primary(),
ShardRoutingState.INITIALIZING, initShard.allocationId())), false);
<<<<<<<
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(),
initShard.primaryTerm(), initShard.primary(),
ShardRoutingState.INITIALIZING, 1)), false);
=======
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(),
ShardRoutingState.INITIALIZING)), false);
>>>>>>>
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(),
initShard.primaryTerm(), initShard.primary(),
ShardRoutingState.INITIALIZING)), false);
<<<<<<<
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), "some_node", initShard.currentNodeId(),
initShard.primaryTerm(), initShard.primary(),
ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId()))
, 1)), false);
=======
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), "some_node", initShard.currentNodeId(), initShard.primary(),
ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId())))), false);
>>>>>>>
TestShardRouting.newShardRouting(initShard.index(), initShard.id(), "some_node", initShard.currentNodeId(),
initShard.primaryTerm(), initShard.primary(),
ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId())))), false);
<<<<<<<
TestShardRouting.newShardRouting(startedShard.index(), startedShard.id(), startedShard.currentNodeId(), startedShard.relocatingNodeId(),
startedShard.primaryTerm(), startedShard.primary(),
ShardRoutingState.INITIALIZING, startedShard.allocationId(), 1)), false);
=======
TestShardRouting.newShardRouting(startedShard.index(), startedShard.id(), startedShard.currentNodeId(), startedShard.relocatingNodeId(), startedShard.primary(),
ShardRoutingState.INITIALIZING, startedShard.allocationId())), false);
>>>>>>>
TestShardRouting.newShardRouting(startedShard.index(), startedShard.id(), startedShard.currentNodeId(),
startedShard.relocatingNodeId(), startedShard.primaryTerm(), startedShard.primary(),
ShardRoutingState.INITIALIZING, startedShard.allocationId())), false);
<<<<<<<
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(),
relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING, targetAllocationId, randomInt())), false);
=======
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING, targetAllocationId)), false);
>>>>>>>
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(),
relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING, targetAllocationId)), false);
<<<<<<<
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(),
relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING, relocatingShard.version())));
=======
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING)));
>>>>>>>
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(),
relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING)));
<<<<<<<
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(),
relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING, relocatingShard.allocationId(), randomInt())), false);
=======
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING, relocatingShard.allocationId())), false);
>>>>>>>
TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(),
relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(),
ShardRoutingState.INITIALIZING, relocatingShard.allocationId())), false); |
<<<<<<<
public RoutingAllocation.Result reroute(ClusterState clusterState, boolean debug) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
=======
protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) {
return new RoutingAllocation.Result(false, clusterState.routingTable());
>>>>>>>
protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); |
<<<<<<<
import org.apache.lucene.search.Query;
import org.elasticsearch.Version;
import org.elasticsearch.common.geo.GeoHashUtils;
=======
>>>>>>>
import org.apache.lucene.search.Query;
import org.elasticsearch.Version; |
<<<<<<<
protected InternalTerms<?, ?> createTestInstance(String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData,
InternalAggregations aggregations,
boolean showTermDocCountError,
long docCountError) {
Terms.Order order = Terms.Order.count(false);
=======
protected InternalTerms<?, ?> createTestInstance(
String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) {
BucketOrder order = BucketOrder.count(false);
>>>>>>>
protected InternalTerms<?, ?> createTestInstance(String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData,
InternalAggregations aggregations,
boolean showTermDocCountError,
long docCountError) {
BucketOrder order = BucketOrder.count(false); |
<<<<<<<
import org.elasticsearch.node.Node;
=======
import org.elasticsearch.common.transport.TransportAddress;
>>>>>>>
import org.elasticsearch.node.Node;
import org.elasticsearch.common.transport.TransportAddress;
<<<<<<<
public Map<String, String> buildAttributes() {
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(this.settings).getAsMap());
=======
public DiscoveryNode buildLocalNode(TransportAddress publishAddress) {
Map<String, String> attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap());
>>>>>>>
public DiscoveryNode buildLocalNode(TransportAddress publishAddress) {
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(this.settings).getAsMap()); |
<<<<<<<
return prepareIndex(docMapper(source.type()), source, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, versionType, Engine.Operation.Origin.PRIMARY);
} catch (Throwable t) {
verifyNotClosed(t);
throw t;
=======
return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.PRIMARY);
} catch (Exception e) {
verifyNotClosed(e);
throw e;
>>>>>>>
return prepareIndex(docMapper(source.type()), source, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, versionType, Engine.Operation.Origin.PRIMARY);
} catch (Exception e) {
verifyNotClosed(e);
throw e;
<<<<<<<
return prepareIndex(docMapper(source.type()), source, seqNo, version, versionType, Engine.Operation.Origin.REPLICA);
} catch (Throwable t) {
verifyNotClosed(t);
throw t;
=======
return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.REPLICA);
} catch (Exception e) {
verifyNotClosed(e);
throw e;
>>>>>>>
return prepareIndex(docMapper(source.type()), source, seqNo, version, versionType, Engine.Operation.Origin.REPLICA);
} catch (Exception e) {
verifyNotClosed(e);
throw e; |
<<<<<<<
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedAggregation;
=======
>>>>>>>
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.ParsedAggregation; |
<<<<<<<
0,
DefBootstrap.METHOD_CALL, 0L);
=======
DefBootstrap.METHOD_CALL, "");
>>>>>>>
0,
DefBootstrap.METHOD_CALL, "");
<<<<<<<
0,
DefBootstrap.METHOD_CALL, 0L);
=======
DefBootstrap.METHOD_CALL, "");
>>>>>>>
0,
DefBootstrap.METHOD_CALL, "");
<<<<<<<
0,
DefBootstrap.METHOD_CALL, 0L);
=======
DefBootstrap.METHOD_CALL, "");
>>>>>>>
0,
DefBootstrap.METHOD_CALL, "");
<<<<<<<
0,
DefBootstrap.METHOD_CALL, 0L);
=======
DefBootstrap.METHOD_CALL, "");
>>>>>>>
0,
DefBootstrap.METHOD_CALL, ""); |
<<<<<<<
assertTrue(documentMapper.timestampFieldMapper().fieldType().hasDocValues());
documentMapper = indexService.mapperService().parse("type", new CompressedString(documentMapper.mappingSource().string()), true);
=======
assertTrue(documentMapper.timestampFieldMapper().hasDocValues());
documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
>>>>>>>
assertTrue(documentMapper.timestampFieldMapper().fieldType().hasDocValues());
documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true); |
<<<<<<<
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.functionscore.factor.FactorBuilder;
import org.elasticsearch.index.query.support.QueryInnerHits;
=======
import org.elasticsearch.index.query.QueryParsingException;
import org.elasticsearch.index.query.functionscore.weight.WeightBuilder;
import org.elasticsearch.index.query.support.QueryInnerHitBuilder;
>>>>>>>
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.support.QueryInnerHits;
import org.elasticsearch.index.query.QueryParsingException;
import org.elasticsearch.index.query.functionscore.weight.WeightBuilder; |
<<<<<<<
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory;
=======
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
>>>>>>>
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory; |
<<<<<<<
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(),
new CommonStats(indexShard, SHARD_STATS_FLAGS), indexShard.commitStats(), indexShard.seqNoStats()));
=======
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
>>>>>>>
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(),
new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats(),
indexShard.seqNoStats())); |
<<<<<<<
=======
private float boost = -1;
>>>>>>>
<<<<<<<
static final QueryStringQueryBuilder PROTOTYPE = new QueryStringQueryBuilder(null);
=======
private Boolean escape;
>>>>>>>
static final QueryStringQueryBuilder PROTOTYPE = new QueryStringQueryBuilder(null);
<<<<<<<
printBoostAndQueryName(builder);
=======
if (escape != null) {
builder.field("escape", escape);
}
>>>>>>>
if (escape != null) {
builder.field("escape", escape);
}
printBoostAndQueryName(builder); |
<<<<<<<
TestShardRouting.newShardRouting(index, i, randomFrom(nodeIds), null, null, term, j == 0,
ShardRoutingState.fromValue((byte) randomIntBetween(2, 4)), 1, unassignedInfo));
=======
TestShardRouting.newShardRouting(index, i, randomFrom(nodeIds), null, null, j == 0,
ShardRoutingState.fromValue((byte) randomIntBetween(2, 4)), unassignedInfo));
>>>>>>>
TestShardRouting.newShardRouting(index, i, randomFrom(nodeIds), null, null, term, j == 0,
ShardRoutingState.fromValue((byte) randomIntBetween(2, 4)), unassignedInfo)); |
<<<<<<<
private final SequenceNumbersService seqNoService;
=======
// How many callers are currently requesting index throttling. Currently there are only two situations where we do this: when merges
// are falling behind and when writing indexing buffer to disk is too slow. When this is 0, there is no throttling, else we throttling
// incoming indexing ops to a single thread:
private final AtomicInteger throttleRequestCount = new AtomicInteger();
>>>>>>>
private final SequenceNumbersService seqNoService;
// How many callers are currently requesting index throttling. Currently there are only two situations where we do this: when merges
// are falling behind and when writing indexing buffer to disk is too slow. When this is 0, there is no throttling, else we throttling
// incoming indexing ops to a single thread:
private final AtomicInteger throttleRequestCount = new AtomicInteger();
<<<<<<<
seqNoService = new SequenceNumbersService(shardId, engineConfig.getIndexSettings());
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig());
=======
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings());
>>>>>>>
seqNoService = new SequenceNumbersService(shardId, engineConfig.getIndexSettings());
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); |
<<<<<<<
=======
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
>>>>>>>
import java.util.ArrayList;
import java.util.Collection; |
<<<<<<<
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
=======
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
headers.applyTo(request);
>>>>>>>
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { |
<<<<<<<
import org.elasticsearch.common.settings.ClusterSettings;
=======
import org.elasticsearch.common.Randomness;
>>>>>>>
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.Randomness; |
<<<<<<<
=======
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.elasticsearch.common.ParsingException;
<<<<<<<
=======
import org.elasticsearch.index.mapper.MappedFieldType;
>>>>>>>
<<<<<<<
public FieldMaskingSpanQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public FieldMaskingSpanQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
<<<<<<<
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (!(query instanceof SpanQueryBuilder)) {
throw new QueryParsingException(parseContext, "[field_masking_span] query must be of type span query");
=======
Query query = parseContext.parseInnerQuery();
if (!(query instanceof SpanQuery)) {
throw new ParsingException(parseContext, "[field_masking_span] query] must be of type span query");
>>>>>>>
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (!(query instanceof SpanQueryBuilder)) {
throw new ParsingException(parseContext, "[field_masking_span] query must be of type span query"); |
<<<<<<<
import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation;
=======
import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase;
>>>>>>>
import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation; |
<<<<<<<
=======
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.ExtendedCommonTermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.ParsingException;
>>>>>>>
import org.elasticsearch.common.ParsingException;
<<<<<<<
public CommonTermsQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException {
=======
public Query parse(QueryParseContext parseContext) throws IOException, ParsingException {
>>>>>>>
public CommonTermsQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
<<<<<<<
analyzer = parser.text();
=======
String analyzer = parser.text();
if (parseContext.analysisService().analyzer(analyzer) == null) {
throw new ParsingException(parseContext, "[common] analyzer [" + parser.text() + "] not found");
}
queryAnalyzer = analyzer;
>>>>>>>
analyzer = parser.text();
<<<<<<<
highFreqOperator = Operator.fromString(parser.text());
=======
String op = parser.text();
if ("or".equalsIgnoreCase(op)) {
highFreqOccur = BooleanClause.Occur.SHOULD;
} else if ("and".equalsIgnoreCase(op)) {
highFreqOccur = BooleanClause.Occur.MUST;
} else {
throw new ParsingException(parseContext,
"[common] query requires operator to be either 'and' or 'or', not [" + op + "]");
}
>>>>>>>
highFreqOperator = Operator.fromString(parser.text());
<<<<<<<
lowFreqOperator = Operator.fromString(parser.text());
=======
String op = parser.text();
if ("or".equalsIgnoreCase(op)) {
lowFreqOccur = BooleanClause.Occur.SHOULD;
} else if ("and".equalsIgnoreCase(op)) {
lowFreqOccur = BooleanClause.Occur.MUST;
} else {
throw new ParsingException(parseContext,
"[common] query requires operator to be either 'and' or 'or', not [" + op + "]");
}
>>>>>>>
lowFreqOperator = Operator.fromString(parser.text());
<<<<<<<
if (text == null) {
throw new QueryParsingException(parseContext, "No text specified for text query");
=======
if (value == null) {
throw new ParsingException(parseContext, "No text specified for text query");
>>>>>>>
if (text == null) {
throw new ParsingException(parseContext, "No text specified for text query"); |
<<<<<<<
modules.add(new ClusterModule(this.settings));
modules.add(new IndicesModule(namedWriteableRegistry));
=======
modules.add(new ClusterModule(this.settings, clusterService));
modules.add(new IndicesModule());
>>>>>>>
modules.add(new ClusterModule(this.settings, clusterService));
modules.add(new IndicesModule(namedWriteableRegistry)); |
<<<<<<<
import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
=======
>>>>>>>
<<<<<<<
private final FetchPhase fetchPhase;
=======
private SearchLookup searchLookup;
>>>>>>>
private SearchLookup searchLookup;
private final FetchPhase fetchPhase;
<<<<<<<
this.fetchPhase = null;
=======
this.queryShardContext = queryShardContext;
>>>>>>>
this.queryShardContext = queryShardContext;
this.fetchPhase = null; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.